mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-10-25 10:09:34 +01:00 
			
		
		
		
	Compare commits
	
		
			5 Commits
		
	
	
		
			feature/sy
			...
			sycl-linki
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | 6d25badce5 | ||
|  | adcc97cbbb | ||
|  | bc8c5fb16c | ||
|  | 845d757bb0 | ||
|  | 0d5470c363 | 
| @@ -37,9 +37,7 @@ directory | |||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  //disables and intel compiler specific warning (in json.hpp) |  //disables and intel compiler specific warning (in json.hpp) | ||||||
| #ifdef __ICC |  | ||||||
| #pragma warning disable 488   | #pragma warning disable 488   | ||||||
| #endif |  | ||||||
|  |  | ||||||
| #ifdef __NVCC__ | #ifdef __NVCC__ | ||||||
|  //disables nvcc specific warning in json.hpp |  //disables nvcc specific warning in json.hpp | ||||||
|   | |||||||
| @@ -54,7 +54,7 @@ Version.h: version-cache | |||||||
| include Make.inc | include Make.inc | ||||||
| include Eigen.inc | include Eigen.inc | ||||||
|  |  | ||||||
| #extra_sources+=$(ZWILS_FERMION_FILES) | extra_sources+=$(ZWILS_FERMION_FILES) | ||||||
| extra_sources+=$(WILS_FERMION_FILES) | extra_sources+=$(WILS_FERMION_FILES) | ||||||
| extra_sources+=$(STAG_FERMION_FILES) | extra_sources+=$(STAG_FERMION_FILES) | ||||||
| if BUILD_GPARITY | if BUILD_GPARITY | ||||||
|   | |||||||
| @@ -31,7 +31,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | |||||||
| #ifndef  GRID_ALGORITHM_COARSENED_MATRIX_H | #ifndef  GRID_ALGORITHM_COARSENED_MATRIX_H | ||||||
| #define  GRID_ALGORITHM_COARSENED_MATRIX_H | #define  GRID_ALGORITHM_COARSENED_MATRIX_H | ||||||
|  |  | ||||||
| #include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No) |  | ||||||
|  |  | ||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
| @@ -60,14 +59,12 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner, | |||||||
| class Geometry { | class Geometry { | ||||||
| public: | public: | ||||||
|   int npoint; |   int npoint; | ||||||
|   int base; |  | ||||||
|   std::vector<int> directions   ; |   std::vector<int> directions   ; | ||||||
|   std::vector<int> displacements; |   std::vector<int> displacements; | ||||||
|   std::vector<int> points_dagger; |  | ||||||
|  |  | ||||||
|   Geometry(int _d)  { |   Geometry(int _d)  { | ||||||
|      |      | ||||||
|     base = (_d==5) ? 1:0; |     int base = (_d==5) ? 1:0; | ||||||
|  |  | ||||||
|     // make coarse grid stencil for 4d , not 5d |     // make coarse grid stencil for 4d , not 5d | ||||||
|     if ( _d==5 ) _d=4; |     if ( _d==5 ) _d=4; | ||||||
| @@ -75,51 +72,16 @@ public: | |||||||
|     npoint = 2*_d+1; |     npoint = 2*_d+1; | ||||||
|     directions.resize(npoint); |     directions.resize(npoint); | ||||||
|     displacements.resize(npoint); |     displacements.resize(npoint); | ||||||
|     points_dagger.resize(npoint); |  | ||||||
|     for(int d=0;d<_d;d++){ |     for(int d=0;d<_d;d++){ | ||||||
|       directions[d   ] = d+base; |       directions[d   ] = d+base; | ||||||
|       directions[d+_d] = d+base; |       directions[d+_d] = d+base; | ||||||
|       displacements[d  ] = +1; |       displacements[d  ] = +1; | ||||||
|       displacements[d+_d]= -1; |       displacements[d+_d]= -1; | ||||||
|       points_dagger[d   ] = d+_d; |  | ||||||
|       points_dagger[d+_d] = d; |  | ||||||
|     } |     } | ||||||
|     directions   [2*_d]=0; |     directions   [2*_d]=0; | ||||||
|     displacements[2*_d]=0; |     displacements[2*_d]=0; | ||||||
|     points_dagger[2*_d]=2*_d; |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   int point(int dir, int disp) { |  | ||||||
|     assert(disp == -1 || disp == 0 || disp == 1); |  | ||||||
|     assert(base+0 <= dir && dir < base+4); |  | ||||||
|  |  | ||||||
|     // directions faster index = new indexing |  | ||||||
|     // 4d (base = 0): |  | ||||||
|     // point 0  1  2  3  4  5  6  7  8 |  | ||||||
|     // dir   0  1  2  3  0  1  2  3  0 |  | ||||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0 |  | ||||||
|     // 5d (base = 1): |  | ||||||
|     // point 0  1  2  3  4  5  6  7  8 |  | ||||||
|     // dir   1  2  3  4  1  2  3  4  0 |  | ||||||
|     // disp +1 +1 +1 +1 -1 -1 -1 -1  0 |  | ||||||
|  |  | ||||||
|     // displacements faster index = old indexing |  | ||||||
|     // 4d (base = 0): |  | ||||||
|     // point 0  1  2  3  4  5  6  7  8 |  | ||||||
|     // dir   0  0  1  1  2  2  3  3  0 |  | ||||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0 |  | ||||||
|     // 5d (base = 1): |  | ||||||
|     // point 0  1  2  3  4  5  6  7  8 |  | ||||||
|     // dir   1  1  2  2  3  3  4  4  0 |  | ||||||
|     // disp +1 -1 +1 -1 +1 -1 +1 -1  0 |  | ||||||
|  |  | ||||||
|     if(dir == 0 and disp == 0) |  | ||||||
|       return 8; |  | ||||||
|     else // New indexing |  | ||||||
|       return (1 - disp) / 2 * 4 + dir - base; |  | ||||||
|     // else // Old indexing |  | ||||||
|     //   return (4 * (dir - base) + 1 - disp) / 2; |  | ||||||
|   } |  | ||||||
| }; | }; | ||||||
|    |    | ||||||
| template<class Fobj,class CComplex,int nbasis> | template<class Fobj,class CComplex,int nbasis> | ||||||
| @@ -296,7 +258,7 @@ public: | |||||||
| // Fine Object == (per site) type of fine field | // Fine Object == (per site) type of fine field | ||||||
| // nbasis      == number of deflation vectors | // nbasis      == number of deflation vectors | ||||||
| template<class Fobj,class CComplex,int nbasis> | template<class Fobj,class CComplex,int nbasis> | ||||||
| class CoarsenedMatrix : public CheckerBoardedSparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  { | class CoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > >  { | ||||||
| public: | public: | ||||||
|      |      | ||||||
|   typedef iVector<CComplex,nbasis >           siteVector; |   typedef iVector<CComplex,nbasis >           siteVector; | ||||||
| @@ -306,59 +268,33 @@ public: | |||||||
|   typedef iMatrix<CComplex,nbasis >  Cobj; |   typedef iMatrix<CComplex,nbasis >  Cobj; | ||||||
|   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field |   typedef Lattice< CComplex >   CoarseScalar; // used for inner products on fine field | ||||||
|   typedef Lattice<Fobj >        FineField; |   typedef Lattice<Fobj >        FineField; | ||||||
|   typedef CoarseVector FermionField; |  | ||||||
|  |  | ||||||
|   // enrich interface, use default implementation as in FermionOperator /////// |  | ||||||
|   void Dminus(CoarseVector const& in, CoarseVector& out) { out = in; } |  | ||||||
|   void DminusDag(CoarseVector const& in, CoarseVector& out) { out = in; } |  | ||||||
|   void ImportPhysicalFermionSource(CoarseVector const& input, CoarseVector& imported) { imported = input; } |  | ||||||
|   void ImportUnphysicalFermion(CoarseVector const& input, CoarseVector& imported) { imported = input; } |  | ||||||
|   void ExportPhysicalFermionSolution(CoarseVector const& solution, CoarseVector& exported) { exported = solution; }; |  | ||||||
|   void ExportPhysicalFermionSource(CoarseVector const& solution, CoarseVector& exported) { exported = solution; }; |  | ||||||
|  |  | ||||||
|   //////////////////// |   //////////////////// | ||||||
|   // Data members |   // Data members | ||||||
|   //////////////////// |   //////////////////// | ||||||
|   Geometry         geom; |   Geometry         geom; | ||||||
|   GridBase *       _grid;  |   GridBase *       _grid;  | ||||||
|   GridBase*        _cbgrid; |  | ||||||
|   int hermitian; |   int hermitian; | ||||||
|  |  | ||||||
|   CartesianStencil<siteVector,siteVector,int> Stencil;  |   CartesianStencil<siteVector,siteVector,int> Stencil;  | ||||||
|   CartesianStencil<siteVector,siteVector,int> StencilEven; |  | ||||||
|   CartesianStencil<siteVector,siteVector,int> StencilOdd; |  | ||||||
|  |  | ||||||
|   std::vector<CoarseMatrix> A; |   std::vector<CoarseMatrix> A; | ||||||
|   std::vector<CoarseMatrix> Aeven; |      | ||||||
|   std::vector<CoarseMatrix> Aodd; |  | ||||||
|  |  | ||||||
|   CoarseMatrix AselfInv; |  | ||||||
|   CoarseMatrix AselfInvEven; |  | ||||||
|   CoarseMatrix AselfInvOdd; |  | ||||||
|  |  | ||||||
|   Vector<RealD> dag_factor; |  | ||||||
|  |  | ||||||
|   /////////////////////// |   /////////////////////// | ||||||
|   // Interface |   // Interface | ||||||
|   /////////////////////// |   /////////////////////// | ||||||
|   GridBase * Grid(void)         { return _grid; };   // this is all the linalg routines need to know |   GridBase * Grid(void)         { return _grid; };   // this is all the linalg routines need to know | ||||||
|   GridBase * RedBlackGrid()     { return _cbgrid; }; |  | ||||||
|  |  | ||||||
|   int ConstEE() { return 0; } |  | ||||||
|  |  | ||||||
|   void M (const CoarseVector &in, CoarseVector &out) |   void M (const CoarseVector &in, CoarseVector &out) | ||||||
|   { |   { | ||||||
|     conformable(_grid,in.Grid()); |     conformable(_grid,in.Grid()); | ||||||
|     conformable(in.Grid(),out.Grid()); |     conformable(in.Grid(),out.Grid()); | ||||||
|     out.Checkerboard() = in.Checkerboard(); |  | ||||||
|  |  | ||||||
|     SimpleCompressor<siteVector> compressor; |     SimpleCompressor<siteVector> compressor; | ||||||
|  |  | ||||||
|     Stencil.HaloExchange(in,compressor); |     Stencil.HaloExchange(in,compressor); | ||||||
|     autoView( in_v , in, AcceleratorRead); |     autoView( in_v , in, AcceleratorRead); | ||||||
|     autoView( out_v , out, AcceleratorWrite); |     autoView( out_v , out, AcceleratorWrite); | ||||||
|     autoView( Stencil_v  , Stencil, AcceleratorRead); |  | ||||||
|     auto& geom_v = geom; |  | ||||||
|     typedef LatticeView<Cobj> Aview; |     typedef LatticeView<Cobj> Aview; | ||||||
|        |        | ||||||
|     Vector<Aview> AcceleratorViewContainer; |     Vector<Aview> AcceleratorViewContainer; | ||||||
| @@ -380,14 +316,14 @@ public: | |||||||
|       int ptype; |       int ptype; | ||||||
|       StencilEntry *SE; |       StencilEntry *SE; | ||||||
|  |  | ||||||
|       for(int point=0;point<geom_v.npoint;point++){ |       for(int point=0;point<geom.npoint;point++){ | ||||||
|  |  | ||||||
| 	SE=Stencil_v.GetEntry(ptype,point,ss); | 	SE=Stencil.GetEntry(ptype,point,ss); | ||||||
| 	   | 	   | ||||||
| 	if(SE->_is_local) {  | 	if(SE->_is_local) {  | ||||||
| 	  nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); | 	  nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); | ||||||
| 	} else { | 	} else { | ||||||
| 	  nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]); | 	  nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]); | ||||||
| 	} | 	} | ||||||
| 	acceleratorSynchronise(); | 	acceleratorSynchronise(); | ||||||
|  |  | ||||||
| @@ -408,72 +344,12 @@ public: | |||||||
|       return M(in,out); |       return M(in,out); | ||||||
|     } else { |     } else { | ||||||
|       // corresponds to Galerkin coarsening |       // corresponds to Galerkin coarsening | ||||||
|       return MdagNonHermitian(in, out); |       CoarseVector tmp(Grid()); | ||||||
|  |       G5C(tmp, in);  | ||||||
|  |       M(tmp, out); | ||||||
|  |       G5C(out, out); | ||||||
|     } |     } | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
|   void MdagNonHermitian(const CoarseVector &in, CoarseVector &out) |  | ||||||
|   { |  | ||||||
|     conformable(_grid,in.Grid()); |  | ||||||
|     conformable(in.Grid(),out.Grid()); |  | ||||||
|     out.Checkerboard() = in.Checkerboard(); |  | ||||||
|  |  | ||||||
|     SimpleCompressor<siteVector> compressor; |  | ||||||
|  |  | ||||||
|     Stencil.HaloExchange(in,compressor); |  | ||||||
|     autoView( in_v , in, AcceleratorRead); |  | ||||||
|     autoView( out_v , out, AcceleratorWrite); |  | ||||||
|     autoView( Stencil_v  , Stencil, AcceleratorRead); |  | ||||||
|     auto& geom_v = geom; |  | ||||||
|     typedef LatticeView<Cobj> Aview; |  | ||||||
|  |  | ||||||
|     Vector<Aview> AcceleratorViewContainer; |  | ||||||
|  |  | ||||||
|     for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer.push_back(A[p].View(AcceleratorRead)); |  | ||||||
|     Aview *Aview_p = & AcceleratorViewContainer[0]; |  | ||||||
|  |  | ||||||
|     const int Nsimd = CComplex::Nsimd(); |  | ||||||
|     typedef decltype(coalescedRead(in_v[0])) calcVector; |  | ||||||
|     typedef decltype(coalescedRead(in_v[0](0))) calcComplex; |  | ||||||
|  |  | ||||||
|     int osites=Grid()->oSites(); |  | ||||||
|  |  | ||||||
|     Vector<int> points(geom.npoint, 0); |  | ||||||
|     for(int p=0; p<geom.npoint; p++) |  | ||||||
|       points[p] = geom.points_dagger[p]; |  | ||||||
|  |  | ||||||
|     RealD* dag_factor_p = &dag_factor[0]; |  | ||||||
|  |  | ||||||
|     accelerator_for(sss, Grid()->oSites()*nbasis, Nsimd, { |  | ||||||
|       int ss = sss/nbasis; |  | ||||||
|       int b  = sss%nbasis; |  | ||||||
|       calcComplex res = Zero(); |  | ||||||
|       calcVector nbr; |  | ||||||
|       int ptype; |  | ||||||
|       StencilEntry *SE; |  | ||||||
|  |  | ||||||
|       for(int p=0;p<geom_v.npoint;p++){ |  | ||||||
|         int point = points[p]; |  | ||||||
|  |  | ||||||
| 	SE=Stencil_v.GetEntry(ptype,point,ss); |  | ||||||
|  |  | ||||||
| 	if(SE->_is_local) { |  | ||||||
| 	  nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); |  | ||||||
| 	} else { |  | ||||||
| 	  nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]); |  | ||||||
| 	} |  | ||||||
| 	acceleratorSynchronise(); |  | ||||||
|  |  | ||||||
| 	for(int bb=0;bb<nbasis;bb++) { |  | ||||||
| 	  res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb); |  | ||||||
| 	} |  | ||||||
|       } |  | ||||||
|       coalescedWrite(out_v[ss](b),res); |  | ||||||
|       }); |  | ||||||
|  |  | ||||||
|     for(int p=0;p<geom.npoint;p++) AcceleratorViewContainer[p].ViewClose(); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MdirComms(const CoarseVector &in) |   void MdirComms(const CoarseVector &in) | ||||||
|   { |   { | ||||||
|     SimpleCompressor<siteVector> compressor; |     SimpleCompressor<siteVector> compressor; | ||||||
| @@ -483,7 +359,6 @@ public: | |||||||
|   { |   { | ||||||
|     conformable(_grid,in.Grid()); |     conformable(_grid,in.Grid()); | ||||||
|     conformable(_grid,out.Grid()); |     conformable(_grid,out.Grid()); | ||||||
|     out.Checkerboard() = in.Checkerboard(); |  | ||||||
|  |  | ||||||
|     typedef LatticeView<Cobj> Aview; |     typedef LatticeView<Cobj> Aview; | ||||||
|     Vector<Aview> AcceleratorViewContainer; |     Vector<Aview> AcceleratorViewContainer; | ||||||
| @@ -492,7 +367,6 @@ public: | |||||||
|  |  | ||||||
|     autoView( out_v , out, AcceleratorWrite); |     autoView( out_v , out, AcceleratorWrite); | ||||||
|     autoView( in_v  , in, AcceleratorRead); |     autoView( in_v  , in, AcceleratorRead); | ||||||
|     autoView( Stencil_v  , Stencil, AcceleratorRead); |  | ||||||
|  |  | ||||||
|     const int Nsimd = CComplex::Nsimd(); |     const int Nsimd = CComplex::Nsimd(); | ||||||
|     typedef decltype(coalescedRead(in_v[0])) calcVector; |     typedef decltype(coalescedRead(in_v[0])) calcVector; | ||||||
| @@ -506,12 +380,12 @@ public: | |||||||
|       int ptype; |       int ptype; | ||||||
|       StencilEntry *SE; |       StencilEntry *SE; | ||||||
|  |  | ||||||
|       SE=Stencil_v.GetEntry(ptype,point,ss); |       SE=Stencil.GetEntry(ptype,point,ss); | ||||||
| 	   | 	   | ||||||
|       if(SE->_is_local) {  |       if(SE->_is_local) {  | ||||||
| 	nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); | 	nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); | ||||||
|       } else { |       } else { | ||||||
| 	nbr = coalescedRead(Stencil_v.CommBuf()[SE->_offset]); | 	nbr = coalescedRead(Stencil.CommBuf()[SE->_offset]); | ||||||
|       } |       } | ||||||
|       acceleratorSynchronise(); |       acceleratorSynchronise(); | ||||||
|  |  | ||||||
| @@ -539,7 +413,34 @@ public: | |||||||
|  |  | ||||||
|     this->MdirComms(in); |     this->MdirComms(in); | ||||||
|  |  | ||||||
|     MdirCalc(in,out,geom.point(dir,disp)); |     int ndim = in.Grid()->Nd(); | ||||||
|  |  | ||||||
|  |     ////////////// | ||||||
|  |     // 4D action like wilson | ||||||
|  |     // 0+ => 0  | ||||||
|  |     // 0- => 1 | ||||||
|  |     // 1+ => 2  | ||||||
|  |     // 1- => 3 | ||||||
|  |     // etc.. | ||||||
|  |     ////////////// | ||||||
|  |     // 5D action like DWF | ||||||
|  |     // 1+ => 0  | ||||||
|  |     // 1- => 1 | ||||||
|  |     // 2+ => 2  | ||||||
|  |     // 2- => 3 | ||||||
|  |     // etc.. | ||||||
|  |     auto point = [dir, disp, ndim](){ | ||||||
|  |       if(dir == 0 and disp == 0) | ||||||
|  | 	return 8; | ||||||
|  |       else if ( ndim==4 ) {  | ||||||
|  | 	return (4 * dir + 1 - disp) / 2; | ||||||
|  |       } else {  | ||||||
|  | 	return (4 * (dir-1) + 1 - disp) / 2; | ||||||
|  |       } | ||||||
|  |     }(); | ||||||
|  |  | ||||||
|  |     MdirCalc(in,out,point); | ||||||
|  |  | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
|   void Mdiag(const CoarseVector &in, CoarseVector &out) |   void Mdiag(const CoarseVector &in, CoarseVector &out) | ||||||
| @@ -548,296 +449,23 @@ public: | |||||||
|     MdirCalc(in, out, point); // No comms |     MdirCalc(in, out, point); // No comms | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
|   void Mooee(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     MooeeInternal(in, out, DaggerNo, InverseNo); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MooeeInv(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     MooeeInternal(in, out, DaggerNo, InverseYes); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MooeeDag(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     MooeeInternal(in, out, DaggerYes, InverseNo); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MooeeInvDag(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     MooeeInternal(in, out, DaggerYes, InverseYes); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void Meooe(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     if(in.Checkerboard() == Odd) { |  | ||||||
|       DhopEO(in, out, DaggerNo); |  | ||||||
|     } else { |  | ||||||
|       DhopOE(in, out, DaggerNo); |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MeooeDag(const CoarseVector &in, CoarseVector &out) { |  | ||||||
|     if(in.Checkerboard() == Odd) { |  | ||||||
|       DhopEO(in, out, DaggerYes); |  | ||||||
|     } else { |  | ||||||
|       DhopOE(in, out, DaggerYes); |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void Dhop(const CoarseVector &in, CoarseVector &out, int dag) { |  | ||||||
|     conformable(in.Grid(), _grid); // verifies full grid |  | ||||||
|     conformable(in.Grid(), out.Grid()); |  | ||||||
|  |  | ||||||
|     out.Checkerboard() = in.Checkerboard(); |  | ||||||
|  |  | ||||||
|     DhopInternal(Stencil, A, in, out, dag); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void DhopOE(const CoarseVector &in, CoarseVector &out, int dag) { |  | ||||||
|     conformable(in.Grid(), _cbgrid);    // verifies half grid |  | ||||||
|     conformable(in.Grid(), out.Grid()); // drops the cb check |  | ||||||
|  |  | ||||||
|     assert(in.Checkerboard() == Even); |  | ||||||
|     out.Checkerboard() = Odd; |  | ||||||
|  |  | ||||||
|     DhopInternal(StencilEven, Aodd, in, out, dag); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void DhopEO(const CoarseVector &in, CoarseVector &out, int dag) { |  | ||||||
|     conformable(in.Grid(), _cbgrid);    // verifies half grid |  | ||||||
|     conformable(in.Grid(), out.Grid()); // drops the cb check |  | ||||||
|  |  | ||||||
|     assert(in.Checkerboard() == Odd); |  | ||||||
|     out.Checkerboard() = Even; |  | ||||||
|  |  | ||||||
|     DhopInternal(StencilOdd, Aeven, in, out, dag); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void MooeeInternal(const CoarseVector &in, CoarseVector &out, int dag, int inv) { |  | ||||||
|     out.Checkerboard() = in.Checkerboard(); |  | ||||||
|     assert(in.Checkerboard() == Odd || in.Checkerboard() == Even); |  | ||||||
|  |  | ||||||
|     CoarseMatrix *Aself = nullptr; |  | ||||||
|     if(in.Grid()->_isCheckerBoarded) { |  | ||||||
|       if(in.Checkerboard() == Odd) { |  | ||||||
|         Aself = (inv) ? &AselfInvOdd : &Aodd[geom.npoint-1]; |  | ||||||
|         DselfInternal(StencilOdd, *Aself, in, out, dag); |  | ||||||
|       } else { |  | ||||||
|         Aself = (inv) ? &AselfInvEven : &Aeven[geom.npoint-1]; |  | ||||||
|         DselfInternal(StencilEven, *Aself, in, out, dag); |  | ||||||
|       } |  | ||||||
|     } else { |  | ||||||
|       Aself = (inv) ? &AselfInv : &A[geom.npoint-1]; |  | ||||||
|       DselfInternal(Stencil, *Aself, in, out, dag); |  | ||||||
|     } |  | ||||||
|     assert(Aself != nullptr); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void DselfInternal(CartesianStencil<siteVector,siteVector,int> &st, CoarseMatrix &a, |  | ||||||
|                        const CoarseVector &in, CoarseVector &out, int dag) { |  | ||||||
|     int point = geom.npoint-1; |  | ||||||
|     autoView( out_v, out, AcceleratorWrite); |  | ||||||
|     autoView( in_v,  in,  AcceleratorRead); |  | ||||||
|     autoView( st_v,  st,  AcceleratorRead); |  | ||||||
|     autoView( a_v,   a,   AcceleratorRead); |  | ||||||
|  |  | ||||||
|     const int Nsimd = CComplex::Nsimd(); |  | ||||||
|     typedef decltype(coalescedRead(in_v[0])) calcVector; |  | ||||||
|     typedef decltype(coalescedRead(in_v[0](0))) calcComplex; |  | ||||||
|  |  | ||||||
|     RealD* dag_factor_p = &dag_factor[0]; |  | ||||||
|  |  | ||||||
|     if(dag) { |  | ||||||
|       accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, { |  | ||||||
|         int ss = sss/nbasis; |  | ||||||
|         int b  = sss%nbasis; |  | ||||||
|         calcComplex res = Zero(); |  | ||||||
|         calcVector nbr; |  | ||||||
|         int ptype; |  | ||||||
|         StencilEntry *SE; |  | ||||||
|  |  | ||||||
|         SE=st_v.GetEntry(ptype,point,ss); |  | ||||||
|  |  | ||||||
|         if(SE->_is_local) { |  | ||||||
|           nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); |  | ||||||
|         } else { |  | ||||||
|           nbr = coalescedRead(st_v.CommBuf()[SE->_offset]); |  | ||||||
|         } |  | ||||||
|         acceleratorSynchronise(); |  | ||||||
|  |  | ||||||
|         for(int bb=0;bb<nbasis;bb++) { |  | ||||||
|           res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(a_v[ss](b,bb))*nbr(bb); |  | ||||||
|         } |  | ||||||
|         coalescedWrite(out_v[ss](b),res); |  | ||||||
|       }); |  | ||||||
|     } else { |  | ||||||
|       accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, { |  | ||||||
|         int ss = sss/nbasis; |  | ||||||
|         int b  = sss%nbasis; |  | ||||||
|         calcComplex res = Zero(); |  | ||||||
|         calcVector nbr; |  | ||||||
|         int ptype; |  | ||||||
|         StencilEntry *SE; |  | ||||||
|  |  | ||||||
|         SE=st_v.GetEntry(ptype,point,ss); |  | ||||||
|  |  | ||||||
|         if(SE->_is_local) { |  | ||||||
|           nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); |  | ||||||
|         } else { |  | ||||||
|           nbr = coalescedRead(st_v.CommBuf()[SE->_offset]); |  | ||||||
|         } |  | ||||||
|         acceleratorSynchronise(); |  | ||||||
|  |  | ||||||
|         for(int bb=0;bb<nbasis;bb++) { |  | ||||||
|           res = res + coalescedRead(a_v[ss](b,bb))*nbr(bb); |  | ||||||
|         } |  | ||||||
|         coalescedWrite(out_v[ss](b),res); |  | ||||||
|       }); |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void DhopInternal(CartesianStencil<siteVector,siteVector,int> &st, std::vector<CoarseMatrix> &a, |  | ||||||
|                     const CoarseVector &in, CoarseVector &out, int dag) { |  | ||||||
|     SimpleCompressor<siteVector> compressor; |  | ||||||
|  |  | ||||||
|     st.HaloExchange(in,compressor); |  | ||||||
|     autoView( in_v,  in,  AcceleratorRead); |  | ||||||
|     autoView( out_v, out, AcceleratorWrite); |  | ||||||
|     autoView( st_v , st,  AcceleratorRead); |  | ||||||
|     typedef LatticeView<Cobj> Aview; |  | ||||||
|  |  | ||||||
|     // determine in what order we need the points |  | ||||||
|     int npoint = geom.npoint-1; |  | ||||||
|     Vector<int> points(npoint, 0); |  | ||||||
|     for(int p=0; p<npoint; p++) |  | ||||||
|       points[p] = (dag && !hermitian) ? geom.points_dagger[p] : p; |  | ||||||
|  |  | ||||||
|     Vector<Aview> AcceleratorViewContainer; |  | ||||||
|     for(int p=0;p<npoint;p++) AcceleratorViewContainer.push_back(a[p].View(AcceleratorRead)); |  | ||||||
|     Aview *Aview_p = & AcceleratorViewContainer[0]; |  | ||||||
|  |  | ||||||
|     const int Nsimd = CComplex::Nsimd(); |  | ||||||
|     typedef decltype(coalescedRead(in_v[0])) calcVector; |  | ||||||
|     typedef decltype(coalescedRead(in_v[0](0))) calcComplex; |  | ||||||
|  |  | ||||||
|     RealD* dag_factor_p = &dag_factor[0]; |  | ||||||
|  |  | ||||||
|     if(dag) { |  | ||||||
|       accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, { |  | ||||||
|         int ss = sss/nbasis; |  | ||||||
|         int b  = sss%nbasis; |  | ||||||
|         calcComplex res = Zero(); |  | ||||||
|         calcVector nbr; |  | ||||||
|         int ptype; |  | ||||||
|         StencilEntry *SE; |  | ||||||
|  |  | ||||||
|         for(int p=0;p<npoint;p++){ |  | ||||||
|           int point = points[p]; |  | ||||||
|           SE=st_v.GetEntry(ptype,point,ss); |  | ||||||
|  |  | ||||||
|           if(SE->_is_local) { |  | ||||||
|             nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); |  | ||||||
|           } else { |  | ||||||
|             nbr = coalescedRead(st_v.CommBuf()[SE->_offset]); |  | ||||||
|           } |  | ||||||
|           acceleratorSynchronise(); |  | ||||||
|  |  | ||||||
|           for(int bb=0;bb<nbasis;bb++) { |  | ||||||
|             res = res + dag_factor_p[b*nbasis+bb]*coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb); |  | ||||||
|           } |  | ||||||
|         } |  | ||||||
|         coalescedWrite(out_v[ss](b),res); |  | ||||||
|       }); |  | ||||||
|     } else { |  | ||||||
|       accelerator_for(sss, in.Grid()->oSites()*nbasis, Nsimd, { |  | ||||||
|         int ss = sss/nbasis; |  | ||||||
|         int b  = sss%nbasis; |  | ||||||
|         calcComplex res = Zero(); |  | ||||||
|         calcVector nbr; |  | ||||||
|         int ptype; |  | ||||||
|         StencilEntry *SE; |  | ||||||
|  |  | ||||||
|         for(int p=0;p<npoint;p++){ |  | ||||||
|           int point = points[p]; |  | ||||||
|           SE=st_v.GetEntry(ptype,point,ss); |  | ||||||
|  |  | ||||||
|           if(SE->_is_local) { |  | ||||||
|             nbr = coalescedReadPermute(in_v[SE->_offset],ptype,SE->_permute); |  | ||||||
|           } else { |  | ||||||
|             nbr = coalescedRead(st_v.CommBuf()[SE->_offset]); |  | ||||||
|           } |  | ||||||
|           acceleratorSynchronise(); |  | ||||||
|  |  | ||||||
|           for(int bb=0;bb<nbasis;bb++) { |  | ||||||
|             res = res + coalescedRead(Aview_p[point][ss](b,bb))*nbr(bb); |  | ||||||
|           } |  | ||||||
|         } |  | ||||||
|         coalescedWrite(out_v[ss](b),res); |  | ||||||
|       }); |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     for(int p=0;p<npoint;p++) AcceleratorViewContainer[p].ViewClose(); |  | ||||||
|   } |  | ||||||
|    |    | ||||||
|   CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) 	: |  CoarsenedMatrix(GridCartesian &CoarseGrid, int hermitian_=0) 	:  | ||||||
|  |  | ||||||
|     _grid(&CoarseGrid), |     _grid(&CoarseGrid), | ||||||
|     _cbgrid(new GridRedBlackCartesian(&CoarseGrid)), |  | ||||||
|     geom(CoarseGrid._ndimension), |     geom(CoarseGrid._ndimension), | ||||||
|     hermitian(hermitian_), |     hermitian(hermitian_), | ||||||
|     Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0), |     Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0), | ||||||
|     StencilEven(_cbgrid,geom.npoint,Even,geom.directions,geom.displacements,0), |       A(geom.npoint,&CoarseGrid) | ||||||
|     StencilOdd(_cbgrid,geom.npoint,Odd,geom.directions,geom.displacements,0), |  | ||||||
|     A(geom.npoint,&CoarseGrid), |  | ||||||
|     Aeven(geom.npoint,_cbgrid), |  | ||||||
|     Aodd(geom.npoint,_cbgrid), |  | ||||||
|     AselfInv(&CoarseGrid), |  | ||||||
|     AselfInvEven(_cbgrid), |  | ||||||
|     AselfInvOdd(_cbgrid), |  | ||||||
|     dag_factor(nbasis*nbasis) |  | ||||||
|   { |   { | ||||||
|     fillFactor(); |  | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
|   CoarsenedMatrix(GridCartesian &CoarseGrid, GridRedBlackCartesian &CoarseRBGrid, int hermitian_=0) 	: |  | ||||||
|  |  | ||||||
|     _grid(&CoarseGrid), |  | ||||||
|     _cbgrid(&CoarseRBGrid), |  | ||||||
|     geom(CoarseGrid._ndimension), |  | ||||||
|     hermitian(hermitian_), |  | ||||||
|     Stencil(&CoarseGrid,geom.npoint,Even,geom.directions,geom.displacements,0), |  | ||||||
|     StencilEven(&CoarseRBGrid,geom.npoint,Even,geom.directions,geom.displacements,0), |  | ||||||
|     StencilOdd(&CoarseRBGrid,geom.npoint,Odd,geom.directions,geom.displacements,0), |  | ||||||
|     A(geom.npoint,&CoarseGrid), |  | ||||||
|     Aeven(geom.npoint,&CoarseRBGrid), |  | ||||||
|     Aodd(geom.npoint,&CoarseRBGrid), |  | ||||||
|     AselfInv(&CoarseGrid), |  | ||||||
|     AselfInvEven(&CoarseRBGrid), |  | ||||||
|     AselfInvOdd(&CoarseRBGrid), |  | ||||||
|     dag_factor(nbasis*nbasis) |  | ||||||
|   { |  | ||||||
|     fillFactor(); |  | ||||||
|   }; |  | ||||||
|  |  | ||||||
|   void fillFactor() { |  | ||||||
|     Eigen::MatrixXd dag_factor_eigen = Eigen::MatrixXd::Ones(nbasis, nbasis); |  | ||||||
|     if(!hermitian) { |  | ||||||
|       const int nb = nbasis/2; |  | ||||||
|       dag_factor_eigen.block(0,nb,nb,nb) *= -1.0; |  | ||||||
|       dag_factor_eigen.block(nb,0,nb,nb) *= -1.0; |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     // GPU readable prefactor |  | ||||||
|     thread_for(i, nbasis*nbasis, { |  | ||||||
|       int j = i/nbasis; |  | ||||||
|       int k = i%nbasis; |  | ||||||
|       dag_factor[i] = dag_factor_eigen(j, k); |  | ||||||
|     }); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop, |   void CoarsenOperator(GridBase *FineGrid,LinearOperatorBase<Lattice<Fobj> > &linop, | ||||||
| 		       Aggregation<Fobj,CComplex,nbasis> & Subspace) | 		       Aggregation<Fobj,CComplex,nbasis> & Subspace) | ||||||
|   { |   { | ||||||
|     typedef Lattice<typename Fobj::tensor_reduced> FineComplexField; |     typedef Lattice<typename Fobj::tensor_reduced> FineComplexField; | ||||||
|     typedef typename Fobj::scalar_type scalar_type; |     typedef typename Fobj::scalar_type scalar_type; | ||||||
|  |  | ||||||
|     std::cout << GridLogMessage<< "CoarsenMatrix "<< std::endl; |  | ||||||
|  |  | ||||||
|     FineComplexField one(FineGrid); one=scalar_type(1.0,0.0); |     FineComplexField one(FineGrid); one=scalar_type(1.0,0.0); | ||||||
|     FineComplexField zero(FineGrid); zero=scalar_type(0.0,0.0); |     FineComplexField zero(FineGrid); zero=scalar_type(0.0,0.0); | ||||||
|  |  | ||||||
| @@ -868,13 +496,11 @@ public: | |||||||
|  |  | ||||||
|     CoarseScalar InnerProd(Grid());  |     CoarseScalar InnerProd(Grid());  | ||||||
|  |  | ||||||
|     std::cout << GridLogMessage<< "CoarsenMatrix Orthog "<< std::endl; |  | ||||||
|     // Orthogonalise the subblocks over the basis |     // Orthogonalise the subblocks over the basis | ||||||
|     blockOrthogonalise(InnerProd,Subspace.subspace); |     blockOrthogonalise(InnerProd,Subspace.subspace); | ||||||
|  |  | ||||||
|     // Compute the matrix elements of linop between this orthonormal |     // Compute the matrix elements of linop between this orthonormal | ||||||
|     // set of vectors. |     // set of vectors. | ||||||
|     std::cout << GridLogMessage<< "CoarsenMatrix masks "<< std::endl; |  | ||||||
|     int self_stencil=-1; |     int self_stencil=-1; | ||||||
|     for(int p=0;p<geom.npoint;p++) |     for(int p=0;p<geom.npoint;p++) | ||||||
|     {  |     {  | ||||||
| @@ -913,7 +539,7 @@ public: | |||||||
|  |  | ||||||
|       phi=Subspace.subspace[i]; |       phi=Subspace.subspace[i]; | ||||||
|  |  | ||||||
|       std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl; |       //      std::cout << GridLogMessage<< "CoarsenMatrix vector "<<i << std::endl; | ||||||
|       linop.OpDirAll(phi,Mphi_p); |       linop.OpDirAll(phi,Mphi_p); | ||||||
|       linop.OpDiag  (phi,Mphi_p[geom.npoint-1]); |       linop.OpDiag  (phi,Mphi_p[geom.npoint-1]); | ||||||
|  |  | ||||||
| @@ -942,18 +568,6 @@ public: | |||||||
| 	    autoView( A_self  , A[self_stencil], AcceleratorWrite); | 	    autoView( A_self  , A[self_stencil], AcceleratorWrite); | ||||||
|  |  | ||||||
| 	    accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_p[ss](j,i),oZProj_v(ss)); }); | 	    accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_p[ss](j,i),oZProj_v(ss)); }); | ||||||
| 	    if ( hermitian && (disp==-1) ) { |  | ||||||
| 	      for(int pp=0;pp<geom.npoint;pp++){// Find the opposite link and set <j|A|i> = <i|A|j>* |  | ||||||
| 		int dirp   = geom.directions[pp]; |  | ||||||
| 		int dispp  = geom.displacements[pp]; |  | ||||||
| 		if ( (dirp==dir) && (dispp==1) ){ |  | ||||||
| 		  auto sft = conjugate(Cshift(oZProj,dir,1)); |  | ||||||
| 		  autoView( sft_v    ,  sft  , AcceleratorWrite); |  | ||||||
| 		  autoView( A_pp     ,  A[pp], AcceleratorWrite); |  | ||||||
| 		  accelerator_for(ss, Grid()->oSites(), Fobj::Nsimd(),{ coalescedWrite(A_pp[ss](i,j),sft_v(ss)); }); |  | ||||||
| 		} |  | ||||||
| 	      } |  | ||||||
| 	    } |  | ||||||
|  |  | ||||||
| 	  } | 	  } | ||||||
| 	} | 	} | ||||||
| @@ -992,54 +606,28 @@ public: | |||||||
|     } |     } | ||||||
|     if(hermitian) { |     if(hermitian) { | ||||||
|       std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl; |       std::cout << GridLogMessage << " ForceHermitian, new code "<<std::endl; | ||||||
|  |       ForceHermitian(); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     InvertSelfStencilLink(); std::cout << GridLogMessage << "Coarse self link inverted" << std::endl; |  | ||||||
|     FillHalfCbs(); std::cout << GridLogMessage << "Coarse half checkerboards filled" << std::endl; |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   void InvertSelfStencilLink() { |   void ForceHermitian(void) { | ||||||
|     std::cout << GridLogDebug << "CoarsenedMatrix::InvertSelfStencilLink" << std::endl; |     CoarseMatrix Diff  (Grid()); | ||||||
|     int localVolume = Grid()->lSites(); |     for(int p=0;p<geom.npoint;p++){ | ||||||
|  |       int dir   = geom.directions[p]; | ||||||
|     typedef typename Cobj::scalar_object scalar_object; |       int disp  = geom.displacements[p]; | ||||||
|  |       if(disp==-1) { | ||||||
|     autoView(Aself_v,    A[geom.npoint-1], CpuRead); | 	// Find the opposite link | ||||||
|     autoView(AselfInv_v, AselfInv,         CpuWrite); | 	for(int pp=0;pp<geom.npoint;pp++){ | ||||||
|     thread_for(site, localVolume, { // NOTE: Not able to bring this to GPU because of Eigen + peek/poke | 	  int dirp   = geom.directions[pp]; | ||||||
|       Eigen::MatrixXcd selfLinkEigen    = Eigen::MatrixXcd::Zero(nbasis, nbasis); | 	  int dispp  = geom.displacements[pp]; | ||||||
|       Eigen::MatrixXcd selfLinkInvEigen = Eigen::MatrixXcd::Zero(nbasis, nbasis); | 	  if ( (dirp==dir) && (dispp==1) ){ | ||||||
|  | 	    //	    Diff = adj(Cshift(A[p],dir,1)) - A[pp];  | ||||||
|       scalar_object selfLink    = Zero(); | 	    //	    std::cout << GridLogMessage<<" Replacing stencil leg "<<pp<<" with leg "<<p<< " diff "<<norm2(Diff) <<std::endl; | ||||||
|       scalar_object selfLinkInv = Zero(); | 	    A[pp] = adj(Cshift(A[p],dir,1)); | ||||||
|  | 	  } | ||||||
|       Coordinate lcoor; | 	} | ||||||
|  |       } | ||||||
|       Grid()->LocalIndexToLocalCoor(site, lcoor); |  | ||||||
|       peekLocalSite(selfLink, Aself_v, lcoor); |  | ||||||
|  |  | ||||||
|       for (int i = 0; i < nbasis; ++i) |  | ||||||
|         for (int j = 0; j < nbasis; ++j) |  | ||||||
|           selfLinkEigen(i, j) = static_cast<ComplexD>(TensorRemove(selfLink(i, j))); |  | ||||||
|  |  | ||||||
|       selfLinkInvEigen = selfLinkEigen.inverse(); |  | ||||||
|  |  | ||||||
|       for(int i = 0; i < nbasis; ++i) |  | ||||||
|         for(int j = 0; j < nbasis; ++j) |  | ||||||
|           selfLinkInv(i, j) = selfLinkInvEigen(i, j); |  | ||||||
|  |  | ||||||
|       pokeLocalSite(selfLinkInv, AselfInv_v, lcoor); |  | ||||||
|     }); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   void FillHalfCbs() { |  | ||||||
|     std::cout << GridLogDebug << "CoarsenedMatrix::FillHalfCbs" << std::endl; |  | ||||||
|     for(int p = 0; p < geom.npoint; ++p) { |  | ||||||
|       pickCheckerboard(Even, Aeven[p], A[p]); |  | ||||||
|       pickCheckerboard(Odd, Aodd[p], A[p]); |  | ||||||
|     } |     } | ||||||
|     pickCheckerboard(Even, AselfInvEven, AselfInv); |  | ||||||
|     pickCheckerboard(Odd, AselfInvOdd, AselfInv); |  | ||||||
|   } |   } | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -173,8 +173,7 @@ template<class T> using cshiftAllocator = devAllocator<T>; | |||||||
| template<class T> using cshiftAllocator = std::allocator<T>; | template<class T> using cshiftAllocator = std::allocator<T>; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
| template<class T> using Vector        = std::vector<T,uvmAllocator<T> >;            | template<class T> using Vector     = std::vector<T,uvmAllocator<T> >;            | ||||||
| template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;            |  | ||||||
| template<class T> using commVector = std::vector<T,devAllocator<T> >; | template<class T> using commVector = std::vector<T,devAllocator<T> >; | ||||||
| template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >; | template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -34,6 +34,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|  |  | ||||||
| // Move control to configure.ac and Config.h? | // Move control to configure.ac and Config.h? | ||||||
|  |  | ||||||
|  | #define ALLOCATION_CACHE | ||||||
|  | #define GRID_ALLOC_ALIGN (2*1024*1024) | ||||||
| #define GRID_ALLOC_SMALL_LIMIT (4096) | #define GRID_ALLOC_SMALL_LIMIT (4096) | ||||||
|  |  | ||||||
| /*Pinning pages is costly*/ | /*Pinning pages is costly*/ | ||||||
|   | |||||||
| @@ -1,12 +1,11 @@ | |||||||
| #include <Grid/GridCore.h> | #include <Grid/GridCore.h> | ||||||
|  |  | ||||||
| #ifndef GRID_UVM | #ifndef GRID_UVM | ||||||
|  |  | ||||||
| #warning "Using explicit device memory copies" | #warning "Using explicit device memory copies" | ||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
| //define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout); |  | ||||||
| #define dprintf(...) | #define dprintf(...) | ||||||
|  |  | ||||||
|  |  | ||||||
| //////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////// | ||||||
| // For caching copies of data on device | // For caching copies of data on device | ||||||
| //////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////// | ||||||
| @@ -104,7 +103,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache) | |||||||
|   /////////////////////////////////////////////////////////// |   /////////////////////////////////////////////////////////// | ||||||
|   assert(AccCache.state!=Empty); |   assert(AccCache.state!=Empty); | ||||||
|    |    | ||||||
|    dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  |   //  dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  | ||||||
|   assert(AccCache.accLock==0); |   assert(AccCache.accLock==0); | ||||||
|   assert(AccCache.cpuLock==0); |   assert(AccCache.cpuLock==0); | ||||||
|   assert(AccCache.CpuPtr!=(uint64_t)NULL); |   assert(AccCache.CpuPtr!=(uint64_t)NULL); | ||||||
| @@ -112,7 +111,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache) | |||||||
|     AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes); |     AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes); | ||||||
|     DeviceBytes   -=AccCache.bytes; |     DeviceBytes   -=AccCache.bytes; | ||||||
|     LRUremove(AccCache); |     LRUremove(AccCache); | ||||||
|     dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);   |     //    dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);   | ||||||
|   } |   } | ||||||
|   uint64_t CpuPtr = AccCache.CpuPtr; |   uint64_t CpuPtr = AccCache.CpuPtr; | ||||||
|   EntryErase(CpuPtr); |   EntryErase(CpuPtr); | ||||||
| @@ -126,7 +125,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache) | |||||||
|   /////////////////////////////////////////////////////////////////////////// |   /////////////////////////////////////////////////////////////////////////// | ||||||
|   assert(AccCache.state!=Empty); |   assert(AccCache.state!=Empty); | ||||||
|    |    | ||||||
|   dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  |   //  dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);  | ||||||
|   assert(AccCache.accLock==0); |   assert(AccCache.accLock==0); | ||||||
|   assert(AccCache.cpuLock==0); |   assert(AccCache.cpuLock==0); | ||||||
|   if(AccCache.state==AccDirty) { |   if(AccCache.state==AccDirty) { | ||||||
| @@ -137,7 +136,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache) | |||||||
|     AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes); |     AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes); | ||||||
|     DeviceBytes   -=AccCache.bytes; |     DeviceBytes   -=AccCache.bytes; | ||||||
|     LRUremove(AccCache); |     LRUremove(AccCache); | ||||||
|     dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);   |     //    dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);   | ||||||
|   } |   } | ||||||
|   uint64_t CpuPtr = AccCache.CpuPtr; |   uint64_t CpuPtr = AccCache.CpuPtr; | ||||||
|   EntryErase(CpuPtr); |   EntryErase(CpuPtr); | ||||||
| @@ -150,7 +149,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache) | |||||||
|   assert(AccCache.AccPtr!=(uint64_t)NULL); |   assert(AccCache.AccPtr!=(uint64_t)NULL); | ||||||
|   assert(AccCache.CpuPtr!=(uint64_t)NULL); |   assert(AccCache.CpuPtr!=(uint64_t)NULL); | ||||||
|   acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes); |   acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes); | ||||||
|   dprintf("MemoryManager: Flush  %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); |   //  dprintf("MemoryManager: Flush  %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||||
|   DeviceToHostBytes+=AccCache.bytes; |   DeviceToHostBytes+=AccCache.bytes; | ||||||
|   DeviceToHostXfer++; |   DeviceToHostXfer++; | ||||||
|   AccCache.state=Consistent; |   AccCache.state=Consistent; | ||||||
| @@ -165,7 +164,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache) | |||||||
|     AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes); |     AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes); | ||||||
|     DeviceBytes+=AccCache.bytes; |     DeviceBytes+=AccCache.bytes; | ||||||
|   } |   } | ||||||
|   dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); |   //  dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout); | ||||||
|   acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes); |   acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes); | ||||||
|   HostToDeviceBytes+=AccCache.bytes; |   HostToDeviceBytes+=AccCache.bytes; | ||||||
|   HostToDeviceXfer++; |   HostToDeviceXfer++; | ||||||
| @@ -228,24 +227,18 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod | |||||||
|   // Find if present, otherwise get or force an empty |   // Find if present, otherwise get or force an empty | ||||||
|   //////////////////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////////////////// | ||||||
|   if ( EntryPresent(CpuPtr)==0 ){ |   if ( EntryPresent(CpuPtr)==0 ){ | ||||||
|  |     EvictVictims(bytes); | ||||||
|     EntryCreate(CpuPtr,bytes,mode,hint); |     EntryCreate(CpuPtr,bytes,mode,hint); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   auto AccCacheIterator = EntryLookup(CpuPtr); |   auto AccCacheIterator = EntryLookup(CpuPtr); | ||||||
|   auto & AccCache = AccCacheIterator->second; |   auto & AccCache = AccCacheIterator->second; | ||||||
|   if (!AccCache.AccPtr) { |    | ||||||
|     EvictVictims(bytes);  |  | ||||||
|   }  |  | ||||||
|   assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)); |   assert((mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)); | ||||||
|  |  | ||||||
|   assert(AccCache.cpuLock==0);  // Programming error |   assert(AccCache.cpuLock==0);  // Programming error | ||||||
|  |  | ||||||
|   if(AccCache.state!=Empty) { |   if(AccCache.state!=Empty) { | ||||||
|     dprintf("ViewOpen found entry %llx %llx : %lld %lld\n", |  | ||||||
| 		    (uint64_t)AccCache.CpuPtr, |  | ||||||
| 		    (uint64_t)CpuPtr, |  | ||||||
| 		    (uint64_t)AccCache.bytes, |  | ||||||
| 		    (uint64_t)bytes); |  | ||||||
|     assert(AccCache.CpuPtr == CpuPtr); |     assert(AccCache.CpuPtr == CpuPtr); | ||||||
|     assert(AccCache.bytes  ==bytes); |     assert(AccCache.bytes  ==bytes); | ||||||
|   } |   } | ||||||
| @@ -292,21 +285,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod | |||||||
|       AccCache.state  = Consistent; // CpuDirty + AccRead => Consistent |       AccCache.state  = Consistent; // CpuDirty + AccRead => Consistent | ||||||
|     } |     } | ||||||
|     AccCache.accLock++; |     AccCache.accLock++; | ||||||
|     dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock); |     //    printf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock); | ||||||
|   } else if(AccCache.state==Consistent) { |   } else if(AccCache.state==Consistent) { | ||||||
|     if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)) |     if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)) | ||||||
|       AccCache.state  = AccDirty;   // Consistent + AcceleratorWrite=> AccDirty |       AccCache.state  = AccDirty;   // Consistent + AcceleratorWrite=> AccDirty | ||||||
|     else |     else | ||||||
|       AccCache.state  = Consistent; // Consistent + AccRead => Consistent |       AccCache.state  = Consistent; // Consistent + AccRead => Consistent | ||||||
|     AccCache.accLock++; |     AccCache.accLock++; | ||||||
|     dprintf("Consistent entry into device accLock %d\n",AccCache.accLock); |     //    printf("Consistent entry into device accLock %d\n",AccCache.accLock); | ||||||
|   } else if(AccCache.state==AccDirty) { |   } else if(AccCache.state==AccDirty) { | ||||||
|     if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)) |     if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard)) | ||||||
|       AccCache.state  = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty |       AccCache.state  = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty | ||||||
|     else |     else | ||||||
|       AccCache.state  = AccDirty; // AccDirty + AccRead => AccDirty |       AccCache.state  = AccDirty; // AccDirty + AccRead => AccDirty | ||||||
|     AccCache.accLock++; |     AccCache.accLock++; | ||||||
|     dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock); |     //    printf("AccDirty entry into device accLock %d\n",AccCache.accLock); | ||||||
|   } else { |   } else { | ||||||
|     assert(0); |     assert(0); | ||||||
|   } |   } | ||||||
| @@ -368,16 +361,13 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V | |||||||
|   // Find if present, otherwise get or force an empty |   // Find if present, otherwise get or force an empty | ||||||
|   //////////////////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////////////////// | ||||||
|   if ( EntryPresent(CpuPtr)==0 ){ |   if ( EntryPresent(CpuPtr)==0 ){ | ||||||
|  |     EvictVictims(bytes); | ||||||
|     EntryCreate(CpuPtr,bytes,mode,transient); |     EntryCreate(CpuPtr,bytes,mode,transient); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   auto AccCacheIterator = EntryLookup(CpuPtr); |   auto AccCacheIterator = EntryLookup(CpuPtr); | ||||||
|   auto & AccCache = AccCacheIterator->second; |   auto & AccCache = AccCacheIterator->second; | ||||||
|  |    | ||||||
|   if (!AccCache.AccPtr) { |  | ||||||
|      EvictVictims(bytes); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   assert((mode==CpuRead)||(mode==CpuWrite)); |   assert((mode==CpuRead)||(mode==CpuWrite)); | ||||||
|   assert(AccCache.accLock==0);  // Programming error |   assert(AccCache.accLock==0);  // Programming error | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,6 +1,7 @@ | |||||||
| #include <Grid/GridCore.h> | #include <Grid/GridCore.h> | ||||||
| #ifdef GRID_UVM | #ifdef GRID_UVM | ||||||
|  |  | ||||||
|  | #warning "Grid is assuming unified virtual memory address space" | ||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
| ///////////////////////////////////////////////////////////////////////////////// | ///////////////////////////////////////////////////////////////////////////////// | ||||||
| // View management is 1:1 address space mapping | // View management is 1:1 address space mapping | ||||||
|   | |||||||
| @@ -36,7 +36,7 @@ static const int CbBlack=1; | |||||||
| static const int Even   =CbRed; | static const int Even   =CbRed; | ||||||
| static const int Odd    =CbBlack; | static const int Odd    =CbBlack; | ||||||
|  |  | ||||||
| accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex,const Coordinate &rdim,const Coordinate &chk_dim_msk) | accelerator_inline int RedBlackCheckerBoardFromOindex (int oindex, Coordinate &rdim, Coordinate &chk_dim_msk) | ||||||
| { | { | ||||||
|   int nd=rdim.size(); |   int nd=rdim.size(); | ||||||
|   Coordinate coor(nd); |   Coordinate coor(nd); | ||||||
|   | |||||||
| @@ -1,3 +1,4 @@ | |||||||
|  |  | ||||||
| /************************************************************************************* | /************************************************************************************* | ||||||
|  |  | ||||||
|     Grid physics library, www.github.com/paboyle/Grid  |     Grid physics library, www.github.com/paboyle/Grid  | ||||||
| @@ -107,8 +108,6 @@ public: | |||||||
|   //////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////// | ||||||
|   // Reduction |   // Reduction | ||||||
|   //////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////// | ||||||
|   void GlobalMax(RealD &); |  | ||||||
|   void GlobalMax(RealF &); |  | ||||||
|   void GlobalSum(RealF &); |   void GlobalSum(RealF &); | ||||||
|   void GlobalSumVector(RealF *,int N); |   void GlobalSumVector(RealF *,int N); | ||||||
|   void GlobalSum(RealD &); |   void GlobalSum(RealD &); | ||||||
|   | |||||||
| @@ -275,16 +275,6 @@ void CartesianCommunicator::GlobalXOR(uint64_t &u){ | |||||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); |   int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); | ||||||
|   assert(ierr==0); |   assert(ierr==0); | ||||||
| } | } | ||||||
| void CartesianCommunicator::GlobalMax(float &f) |  | ||||||
| { |  | ||||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_MAX,communicator); |  | ||||||
|   assert(ierr==0); |  | ||||||
| } |  | ||||||
| void CartesianCommunicator::GlobalMax(double &d) |  | ||||||
| { |  | ||||||
|   int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_MAX,communicator); |  | ||||||
|   assert(ierr==0); |  | ||||||
| } |  | ||||||
| void CartesianCommunicator::GlobalSum(float &f){ | void CartesianCommunicator::GlobalSum(float &f){ | ||||||
|   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); |   int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); | ||||||
|   assert(ierr==0); |   assert(ierr==0); | ||||||
|   | |||||||
| @@ -67,8 +67,6 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors) | |||||||
|  |  | ||||||
| CartesianCommunicator::~CartesianCommunicator(){} | CartesianCommunicator::~CartesianCommunicator(){} | ||||||
|  |  | ||||||
| void CartesianCommunicator::GlobalMax(float &){} |  | ||||||
| void CartesianCommunicator::GlobalMax(double &){} |  | ||||||
| void CartesianCommunicator::GlobalSum(float &){} | void CartesianCommunicator::GlobalSum(float &){} | ||||||
| void CartesianCommunicator::GlobalSumVector(float *,int N){} | void CartesianCommunicator::GlobalSumVector(float *,int N){} | ||||||
| void CartesianCommunicator::GlobalSum(double &){} | void CartesianCommunicator::GlobalSum(double &){} | ||||||
|   | |||||||
| @@ -102,7 +102,7 @@ public: | |||||||
|   /////////////////////////////////////////////////// |   /////////////////////////////////////////////////// | ||||||
|   static void SharedMemoryAllocate(uint64_t bytes, int flags); |   static void SharedMemoryAllocate(uint64_t bytes, int flags); | ||||||
|   static void SharedMemoryFree(void); |   static void SharedMemoryFree(void); | ||||||
|   static void SharedMemoryCopy(void *dest,void *src,size_t bytes); |   static void SharedMemoryCopy(void *dest,const void *src,size_t bytes); | ||||||
|   static void SharedMemoryZero(void *dest,size_t bytes); |   static void SharedMemoryZero(void *dest,size_t bytes); | ||||||
|  |  | ||||||
| }; | }; | ||||||
|   | |||||||
| @@ -715,7 +715,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes) | |||||||
|   bzero(dest,bytes); |   bzero(dest,bytes); | ||||||
| #endif | #endif | ||||||
| } | } | ||||||
| void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes) | void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes) | ||||||
| { | { | ||||||
| #ifdef GRID_CUDA | #ifdef GRID_CUDA | ||||||
|   cudaMemcpy(dest,src,bytes,cudaMemcpyDefault); |   cudaMemcpy(dest,src,bytes,cudaMemcpyDefault); | ||||||
|   | |||||||
| @@ -29,7 +29,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | |||||||
| #include <Grid/GridCore.h> | #include <Grid/GridCore.h> | ||||||
|  |  | ||||||
| NAMESPACE_BEGIN(Grid);  | NAMESPACE_BEGIN(Grid);  | ||||||
| #define header "SharedMemoryNone: " |  | ||||||
|  |  | ||||||
| /*Construct from an MPI communicator*/ | /*Construct from an MPI communicator*/ | ||||||
| void GlobalSharedMemory::Init(Grid_MPI_Comm comm) | void GlobalSharedMemory::Init(Grid_MPI_Comm comm) | ||||||
| @@ -56,38 +55,6 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M | |||||||
| //////////////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////////////// | ||||||
| // Hugetlbfs mapping intended, use anonymous mmap | // Hugetlbfs mapping intended, use anonymous mmap | ||||||
| //////////////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////////////// | ||||||
| #if 1 |  | ||||||
| void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) |  | ||||||
| { |  | ||||||
|   std::cout << header "SharedMemoryAllocate "<< bytes<< " GPU implementation "<<std::endl; |  | ||||||
|   void * ShmCommBuf ;  |  | ||||||
|   assert(_ShmSetup==1); |  | ||||||
|   assert(_ShmAlloc==0); |  | ||||||
|  |  | ||||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// |  | ||||||
|   // Each MPI rank should allocate our own buffer |  | ||||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// |  | ||||||
|   ShmCommBuf = acceleratorAllocDevice(bytes); |  | ||||||
|  |  | ||||||
|   if (ShmCommBuf == (void *)NULL ) { |  | ||||||
|     std::cerr << " SharedMemoryNone.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl; |  | ||||||
|     exit(EXIT_FAILURE);   |  | ||||||
|   } |  | ||||||
|   if ( WorldRank == 0 ){ |  | ||||||
|     std::cout << WorldRank << header " SharedMemoryNone.cc acceleratorAllocDevice "<< bytes  |  | ||||||
| 	      << "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl; |  | ||||||
|   } |  | ||||||
|   SharedMemoryZero(ShmCommBuf,bytes); |  | ||||||
|  |  | ||||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// |  | ||||||
|   // Loop over ranks/gpu's on our node |  | ||||||
|   /////////////////////////////////////////////////////////////////////////////////////////////////////////// |  | ||||||
|   WorldShmCommBufs[0] = ShmCommBuf; |  | ||||||
|  |  | ||||||
|   _ShmAllocBytes=bytes; |  | ||||||
|   _ShmAlloc=1; |  | ||||||
| } |  | ||||||
| #else |  | ||||||
| void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | ||||||
| { | { | ||||||
|   void * ShmCommBuf ;  |   void * ShmCommBuf ;  | ||||||
| @@ -116,15 +83,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) | |||||||
|   _ShmAllocBytes=bytes; |   _ShmAllocBytes=bytes; | ||||||
|   _ShmAlloc=1; |   _ShmAlloc=1; | ||||||
| }; | }; | ||||||
| #endif |  | ||||||
| void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes) |  | ||||||
| { |  | ||||||
|   acceleratorMemSet(dest,0,bytes); |  | ||||||
| } |  | ||||||
| void GlobalSharedMemory::SharedMemoryCopy(void *dest,void *src,size_t bytes) |  | ||||||
| { |  | ||||||
|   acceleratorCopyToDevice(src,dest,bytes); |  | ||||||
| } |  | ||||||
| //////////////////////////////////////////////////////// | //////////////////////////////////////////////////////// | ||||||
| // Global shared functionality finished | // Global shared functionality finished | ||||||
| // Now move to per communicator functionality | // Now move to per communicator functionality | ||||||
|   | |||||||
| @@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm) | |||||||
|     basis_v.push_back(basis[k].View(AcceleratorWrite)); |     basis_v.push_back(basis[k].View(AcceleratorWrite)); | ||||||
|   } |   } | ||||||
|  |  | ||||||
| #if ( (!defined(GRID_CUDA)) ) | #if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) ) | ||||||
|   int max_threads = thread_max(); |   int max_threads = thread_max(); | ||||||
|   Vector < vobj > Bt(Nm * max_threads); |   Vector < vobj > Bt(Nm * max_threads); | ||||||
|   thread_region |   thread_region | ||||||
| @@ -164,8 +164,7 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in | |||||||
|   auto basis_vp=& basis_v[0]; |   auto basis_vp=& basis_v[0]; | ||||||
|   autoView(result_v,result,AcceleratorWrite); |   autoView(result_v,result,AcceleratorWrite); | ||||||
|   accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{ |   accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{ | ||||||
|     vobj zzz=Zero(); |     auto B=coalescedRead(zz); | ||||||
|     auto B=coalescedRead(zzz); |  | ||||||
|     for(int k=k0; k<k1; ++k){ |     for(int k=k0; k<k1; ++k){ | ||||||
|       B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]); |       B +=Qt_j[k] * coalescedRead(basis_vp[k][ss]); | ||||||
|     } |     } | ||||||
|   | |||||||
| @@ -96,34 +96,8 @@ inline typename vobj::scalar_objectD sumD_cpu(const vobj *arg, Integer osites) | |||||||
|   ssobj ret = ssum; |   ssobj ret = ssum; | ||||||
|   return ret; |   return ret; | ||||||
| } | } | ||||||
| /* |  | ||||||
| Threaded max, don't use for now |  | ||||||
| template<class Double> |  | ||||||
| inline Double max(const Double *arg, Integer osites) |  | ||||||
| { |  | ||||||
|   //  const int Nsimd = vobj::Nsimd(); |  | ||||||
|   const int nthread = GridThread::GetThreads(); |  | ||||||
|  |  | ||||||
|   std::vector<Double> maxarray(nthread); |  | ||||||
|    |  | ||||||
|   thread_for(thr,nthread, { |  | ||||||
|     int nwork, mywork, myoff; |  | ||||||
|     nwork = osites; |  | ||||||
|     GridThread::GetWork(nwork,thr,mywork,myoff); |  | ||||||
|     Double max=arg[0]; |  | ||||||
|     for(int ss=myoff;ss<mywork+myoff; ss++){ |  | ||||||
|       if( arg[ss] > max ) max = arg[ss]; |  | ||||||
|     } |  | ||||||
|     maxarray[thr]=max; |  | ||||||
|   }); |  | ||||||
|    |  | ||||||
|   Double tmax=maxarray[0]; |  | ||||||
|   for(int i=0;i<nthread;i++){ |  | ||||||
|     if (maxarray[i]>tmax) tmax = maxarray[i]; |  | ||||||
|   }  |  | ||||||
|   return tmax; |  | ||||||
| } |  | ||||||
| */ |  | ||||||
| template<class vobj> | template<class vobj> | ||||||
| inline typename vobj::scalar_object sum(const vobj *arg, Integer osites) | inline typename vobj::scalar_object sum(const vobj *arg, Integer osites) | ||||||
| { | { | ||||||
| @@ -167,32 +141,6 @@ template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){ | |||||||
|   return real(nrm);  |   return real(nrm);  | ||||||
| } | } | ||||||
|  |  | ||||||
| //The global maximum of the site norm2 |  | ||||||
| template<class vobj> inline RealD maxLocalNorm2(const Lattice<vobj> &arg) |  | ||||||
| { |  | ||||||
|   typedef typename vobj::tensor_reduced vscalar;  //iScalar<iScalar<.... <vPODtype> > > |  | ||||||
|   typedef typename vscalar::scalar_object  scalar;   //iScalar<iScalar<.... <PODtype> > > |  | ||||||
|  |  | ||||||
|   Lattice<vscalar> inner = localNorm2(arg); |  | ||||||
|  |  | ||||||
|   auto grid = arg.Grid(); |  | ||||||
|  |  | ||||||
|   RealD max; |  | ||||||
|   for(int l=0;l<grid->lSites();l++){ |  | ||||||
|     Coordinate coor; |  | ||||||
|     scalar val; |  | ||||||
|     RealD r; |  | ||||||
|     grid->LocalIndexToLocalCoor(l,coor); |  | ||||||
|     peekLocalSite(val,inner,coor); |  | ||||||
|     r=real(TensorRemove(val)); |  | ||||||
|     if( (l==0) || (r>max)){ |  | ||||||
|       max=r; |  | ||||||
|     } |  | ||||||
|   } |  | ||||||
|   grid->GlobalMax(max); |  | ||||||
|   return max; |  | ||||||
| } |  | ||||||
|  |  | ||||||
| // Double inner product | // Double inner product | ||||||
| template<class vobj> | template<class vobj> | ||||||
| inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right) | inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right) | ||||||
|   | |||||||
| @@ -127,11 +127,6 @@ accelerator_inline void convertType(T1 & out, const iScalar<T2> & in) { | |||||||
|   convertType(out,in._internal); |   convertType(out,in._internal); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<typename T1, typename std::enable_if<!isGridScalar<T1>::value, T1>::type* = nullptr> |  | ||||||
| accelerator_inline void convertType(T1 & out, const iScalar<T1> & in) { |  | ||||||
|   convertType(out,in._internal); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| template<typename T1,typename T2> | template<typename T1,typename T2> | ||||||
| accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) { | accelerator_inline void convertType(iScalar<T1> & out, const T2 & in) { | ||||||
|   convertType(out._internal,in); |   convertType(out._internal,in); | ||||||
|   | |||||||
| @@ -67,13 +67,8 @@ public: | |||||||
|   accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; } |   accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; } | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
| #if 1 |   accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; }; | ||||||
|   //  accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; }; |   accelerator_inline vobj       & operator[](size_t i)       { return this->_odata[i]; }; | ||||||
|   accelerator_inline vobj       & operator[](size_t i) const { return this->_odata[i]; }; |  | ||||||
| #else |  | ||||||
|   //  accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; }; |  | ||||||
|   //  accelerator_inline vobj       & operator[](size_t i)       { return this->_odata[i]; }; |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
|   accelerator_inline uint64_t begin(void) const { return 0;}; |   accelerator_inline uint64_t begin(void) const { return 0;}; | ||||||
|   accelerator_inline uint64_t end(void)   const { return this->_odata_size; }; |   accelerator_inline uint64_t end(void)   const { return this->_odata_size; }; | ||||||
|   | |||||||
| @@ -123,7 +123,7 @@ assert(GRID_FIELD_NORM_CALC(FieldNormMetaData_, n2ck) < 1.0e-5); | |||||||
|  //////////////////////////////////////////////////////////// |  //////////////////////////////////////////////////////////// | ||||||
|  // Helper to fill out metadata |  // Helper to fill out metadata | ||||||
|  //////////////////////////////////////////////////////////// |  //////////////////////////////////////////////////////////// | ||||||
| template<class vobj> void ScidacMetaData(Lattice<vobj> & field, |  template<class vobj> void ScidacMetaData(Lattice<vobj> & field, | ||||||
| 					  FieldMetaData &header, | 					  FieldMetaData &header, | ||||||
| 					  scidacRecord & _scidacRecord, | 					  scidacRecord & _scidacRecord, | ||||||
| 					  scidacFile   & _scidacFile)  | 					  scidacFile   & _scidacFile)  | ||||||
| @@ -619,12 +619,12 @@ class IldgWriter : public ScidacWriter { | |||||||
|   // Don't require scidac records EXCEPT checksum |   // Don't require scidac records EXCEPT checksum | ||||||
|   // Use Grid MetaData object if present. |   // Use Grid MetaData object if present. | ||||||
|   //////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////// | ||||||
|   template <class stats = PeriodicGaugeStatistics> |   template <class vsimd> | ||||||
|   void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,int sequence,std::string LFN,std::string description)  |   void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,int sequence,std::string LFN,std::string description)  | ||||||
|   { |   { | ||||||
|     GridBase * grid = Umu.Grid(); |     GridBase * grid = Umu.Grid(); | ||||||
|     typedef Lattice<vLorentzColourMatrixD> GaugeField; |     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||||
|     typedef vLorentzColourMatrixD vobj; |     typedef iLorentzColourMatrix<vsimd> vobj; | ||||||
|     typedef typename vobj::scalar_object sobj; |     typedef typename vobj::scalar_object sobj; | ||||||
|  |  | ||||||
|     //////////////////////////////////////// |     //////////////////////////////////////// | ||||||
| @@ -636,9 +636,6 @@ class IldgWriter : public ScidacWriter { | |||||||
|  |  | ||||||
|     ScidacMetaData(Umu,header,_scidacRecord,_scidacFile); |     ScidacMetaData(Umu,header,_scidacRecord,_scidacFile); | ||||||
|  |  | ||||||
|     stats Stats; |  | ||||||
|     Stats(Umu,header); |  | ||||||
|      |  | ||||||
|     std::string format = header.floating_point; |     std::string format = header.floating_point; | ||||||
|     header.ensemble_id    = description; |     header.ensemble_id    = description; | ||||||
|     header.ensemble_label = description; |     header.ensemble_label = description; | ||||||
| @@ -708,10 +705,10 @@ class IldgReader : public GridLimeReader { | |||||||
|   // Else use ILDG MetaData object if present. |   // Else use ILDG MetaData object if present. | ||||||
|   // Else use SciDAC MetaData object if present. |   // Else use SciDAC MetaData object if present. | ||||||
|   //////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////// | ||||||
|   template <class stats = PeriodicGaugeStatistics> |   template <class vsimd> | ||||||
|   void readConfiguration(Lattice<vLorentzColourMatrixD> &Umu, FieldMetaData &FieldMetaData_) { |   void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, FieldMetaData &FieldMetaData_) { | ||||||
|  |  | ||||||
|     typedef Lattice<vLorentzColourMatrixD > GaugeField; |     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||||
|     typedef typename GaugeField::vector_object  vobj; |     typedef typename GaugeField::vector_object  vobj; | ||||||
|     typedef typename vobj::scalar_object sobj; |     typedef typename vobj::scalar_object sobj; | ||||||
|  |  | ||||||
| @@ -924,8 +921,7 @@ class IldgReader : public GridLimeReader { | |||||||
|  |  | ||||||
|     if ( found_FieldMetaData || found_usqcdInfo ) { |     if ( found_FieldMetaData || found_usqcdInfo ) { | ||||||
|       FieldMetaData checker; |       FieldMetaData checker; | ||||||
|       stats Stats; |       GaugeStatistics(Umu,checker); | ||||||
|       Stats(Umu,checker); |  | ||||||
|       assert(fabs(checker.plaquette  - FieldMetaData_.plaquette )<1.0e-5); |       assert(fabs(checker.plaquette  - FieldMetaData_.plaquette )<1.0e-5); | ||||||
|       assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5); |       assert(fabs(checker.link_trace - FieldMetaData_.link_trace)<1.0e-5); | ||||||
|       std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl; |       std::cout << GridLogMessage<<"Plaquette and link trace match " << std::endl; | ||||||
|   | |||||||
| @@ -176,18 +176,29 @@ template<class vobj> inline void PrepareMetaData(Lattice<vobj> & field, FieldMet | |||||||
|   GridMetaData(grid,header);  |   GridMetaData(grid,header);  | ||||||
|   MachineCharacteristics(header); |   MachineCharacteristics(header); | ||||||
| } | } | ||||||
| template<class Impl> | inline void GaugeStatistics(Lattice<vLorentzColourMatrixF> & data,FieldMetaData &header) | ||||||
| class GaugeStatistics |  | ||||||
| { | { | ||||||
| public: |   // How to convert data precision etc... | ||||||
|   void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header) |   header.link_trace=WilsonLoops<PeriodicGimplF>::linkTrace(data); | ||||||
|   { |   header.plaquette =WilsonLoops<PeriodicGimplF>::avgPlaquette(data); | ||||||
|     header.link_trace=WilsonLoops<Impl>::linkTrace(data); | } | ||||||
|     header.plaquette =WilsonLoops<Impl>::avgPlaquette(data); | inline void GaugeStatistics(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header) | ||||||
|   } | { | ||||||
| }; |   // How to convert data precision etc... | ||||||
| typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics; |   header.link_trace=WilsonLoops<PeriodicGimplD>::linkTrace(data); | ||||||
| typedef GaugeStatistics<ConjugateGimplD> ConjugateGaugeStatistics; |   header.plaquette =WilsonLoops<PeriodicGimplD>::avgPlaquette(data); | ||||||
|  | } | ||||||
|  | template<> inline void PrepareMetaData<vLorentzColourMatrixF>(Lattice<vLorentzColourMatrixF> & field, FieldMetaData &header) | ||||||
|  | { | ||||||
|  |     | ||||||
|  |   GridBase *grid = field.Grid(); | ||||||
|  |   std::string format = getFormatString<vLorentzColourMatrixF>(); | ||||||
|  |   header.floating_point = format; | ||||||
|  |   header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac | ||||||
|  |   GridMetaData(grid,header);  | ||||||
|  |   GaugeStatistics(field,header); | ||||||
|  |   MachineCharacteristics(header); | ||||||
|  | } | ||||||
| template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header) | template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzColourMatrixD> & field, FieldMetaData &header) | ||||||
| { | { | ||||||
|   GridBase *grid = field.Grid(); |   GridBase *grid = field.Grid(); | ||||||
| @@ -195,6 +206,7 @@ template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzCo | |||||||
|   header.floating_point = format; |   header.floating_point = format; | ||||||
|   header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac |   header.checksum = 0x0; // Nersc checksum unused in ILDG, Scidac | ||||||
|   GridMetaData(grid,header);  |   GridMetaData(grid,header);  | ||||||
|  |   GaugeStatistics(field,header); | ||||||
|   MachineCharacteristics(header); |   MachineCharacteristics(header); | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -40,8 +40,6 @@ using namespace Grid; | |||||||
| class NerscIO : public BinaryIO {  | class NerscIO : public BinaryIO {  | ||||||
| public: | public: | ||||||
|  |  | ||||||
|   typedef Lattice<vLorentzColourMatrixD> GaugeField; |  | ||||||
|  |  | ||||||
|   static inline void truncate(std::string file){ |   static inline void truncate(std::string file){ | ||||||
|     std::ofstream fout(file,std::ios::out); |     std::ofstream fout(file,std::ios::out); | ||||||
|   } |   } | ||||||
| @@ -131,12 +129,12 @@ public: | |||||||
|   // Now the meat: the object readers |   // Now the meat: the object readers | ||||||
|   ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |   ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||||
|  |  | ||||||
|   template<class GaugeStats=PeriodicGaugeStatistics> |   template<class vsimd> | ||||||
|   static inline void readConfiguration(GaugeField &Umu, |   static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, | ||||||
| 				       FieldMetaData& header, | 				       FieldMetaData& header, | ||||||
| 				       std::string file, | 				       std::string file) | ||||||
| 				       GaugeStats GaugeStatisticsCalculator=GaugeStats()) |  | ||||||
|   { |   { | ||||||
|  |     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||||
|  |  | ||||||
|     GridBase *grid = Umu.Grid(); |     GridBase *grid = Umu.Grid(); | ||||||
|     uint64_t offset = readHeader(file,Umu.Grid(),header); |     uint64_t offset = readHeader(file,Umu.Grid(),header); | ||||||
| @@ -155,23 +153,23 @@ public: | |||||||
|     // munger is a function of <floating point, Real, data_type> |     // munger is a function of <floating point, Real, data_type> | ||||||
|     if ( header.data_type == std::string("4D_SU3_GAUGE") ) { |     if ( header.data_type == std::string("4D_SU3_GAUGE") ) { | ||||||
|       if ( ieee32 || ieee32big ) { |       if ( ieee32 || ieee32big ) { | ||||||
| 	BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F>  | 	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>  | ||||||
| 	  (Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format, | 	  (Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format, | ||||||
| 	   nersc_csum,scidac_csuma,scidac_csumb); | 	   nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|       } |       } | ||||||
|       if ( ieee64 || ieee64big ) { |       if ( ieee64 || ieee64big ) { | ||||||
| 	BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3D>  | 	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>  | ||||||
| 	  (Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format, | 	  (Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format, | ||||||
| 	   nersc_csum,scidac_csuma,scidac_csumb); | 	   nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|       } |       } | ||||||
|     } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) { |     } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) { | ||||||
|       if ( ieee32 || ieee32big ) { |       if ( ieee32 || ieee32big ) { | ||||||
| 	BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF> | 	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF> | ||||||
| 	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format, | 	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format, | ||||||
| 	   nersc_csum,scidac_csuma,scidac_csumb); | 	   nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|       } |       } | ||||||
|       if ( ieee64 || ieee64big ) { |       if ( ieee64 || ieee64big ) { | ||||||
| 	BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixD> | 	BinaryIO::readLatticeObject<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD> | ||||||
| 	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format, | 	  (Umu,file,GaugeSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format, | ||||||
| 	   nersc_csum,scidac_csuma,scidac_csumb); | 	   nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|       } |       } | ||||||
| @@ -179,7 +177,7 @@ public: | |||||||
|       assert(0); |       assert(0); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     GaugeStats Stats; Stats(Umu,clone); |     GaugeStatistics(Umu,clone); | ||||||
|  |  | ||||||
|     std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec |     std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<nersc_csum<< std::dec | ||||||
| 	     <<" header   "<<std::hex<<header.checksum<<std::dec <<std::endl; | 	     <<" header   "<<std::hex<<header.checksum<<std::dec <<std::endl; | ||||||
| @@ -205,13 +203,15 @@ public: | |||||||
|     std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl; |     std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   template<class GaugeStats=PeriodicGaugeStatistics> |   template<class vsimd> | ||||||
|   static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu, |   static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu, | ||||||
| 					std::string file,  | 					std::string file,  | ||||||
| 					int two_row, | 					int two_row, | ||||||
| 					int bits32) | 					int bits32) | ||||||
|   { |   { | ||||||
|     typedef vLorentzColourMatrixD vobj; |     typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField; | ||||||
|  |  | ||||||
|  |     typedef iLorentzColourMatrix<vsimd> vobj; | ||||||
|     typedef typename vobj::scalar_object sobj; |     typedef typename vobj::scalar_object sobj; | ||||||
|  |  | ||||||
|     FieldMetaData header; |     FieldMetaData header; | ||||||
| @@ -229,7 +229,7 @@ public: | |||||||
|  |  | ||||||
|     GridMetaData(grid,header); |     GridMetaData(grid,header); | ||||||
|     assert(header.nd==4); |     assert(header.nd==4); | ||||||
|     GaugeStats Stats; Stats(Umu,header); |     GaugeStatistics(Umu,header); | ||||||
|     MachineCharacteristics(header); |     MachineCharacteristics(header); | ||||||
|  |  | ||||||
| 	uint64_t offset; | 	uint64_t offset; | ||||||
| @@ -238,19 +238,19 @@ public: | |||||||
|     header.floating_point = std::string("IEEE64BIG"); |     header.floating_point = std::string("IEEE64BIG"); | ||||||
|     header.data_type      = std::string("4D_SU3_GAUGE_3x3"); |     header.data_type      = std::string("4D_SU3_GAUGE_3x3"); | ||||||
|     GaugeSimpleUnmunger<fobj3D,sobj> munge; |     GaugeSimpleUnmunger<fobj3D,sobj> munge; | ||||||
|     if ( grid->IsBoss() ) {  | 	if ( grid->IsBoss() ) {  | ||||||
|       truncate(file); | 	  truncate(file); | ||||||
|       offset = writeHeader(header,file); |     offset = writeHeader(header,file); | ||||||
|     } | 	} | ||||||
|     grid->Broadcast(0,(void *)&offset,sizeof(offset)); | 	grid->Broadcast(0,(void *)&offset,sizeof(offset)); | ||||||
|  |  | ||||||
|     uint32_t nersc_csum,scidac_csuma,scidac_csumb; |     uint32_t nersc_csum,scidac_csuma,scidac_csumb; | ||||||
|     BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point, |     BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point, | ||||||
| 					      nersc_csum,scidac_csuma,scidac_csumb); | 					      nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|     header.checksum = nersc_csum; |     header.checksum = nersc_csum; | ||||||
|     if ( grid->IsBoss() ) {  | 	if ( grid->IsBoss() ) {  | ||||||
|       writeHeader(header,file); |     writeHeader(header,file); | ||||||
|     } | 	} | ||||||
|  |  | ||||||
|     std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum " |     std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum " | ||||||
| 	     <<std::hex<<header.checksum | 	     <<std::hex<<header.checksum | ||||||
|   | |||||||
| @@ -154,7 +154,7 @@ public: | |||||||
|     grid->Barrier(); timer.Stop(); |     grid->Barrier(); timer.Stop(); | ||||||
|     std::cout << Grid::GridLogMessage << "OpenQcdIO::readConfiguration: redistribute overhead " << timer.Elapsed() << std::endl; |     std::cout << Grid::GridLogMessage << "OpenQcdIO::readConfiguration: redistribute overhead " << timer.Elapsed() << std::endl; | ||||||
|  |  | ||||||
|     PeriodicGaugeStatistics Stats; Stats(Umu, clone); |     GaugeStatistics(Umu, clone); | ||||||
|  |  | ||||||
|     RealD plaq_diff = fabs(clone.plaquette - header.plaquette); |     RealD plaq_diff = fabs(clone.plaquette - header.plaquette); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -208,7 +208,7 @@ public: | |||||||
|  |  | ||||||
|     FieldMetaData clone(header); |     FieldMetaData clone(header); | ||||||
|  |  | ||||||
|     PeriodicGaugeStatistics Stats; Stats(Umu, clone); |     GaugeStatistics(Umu, clone); | ||||||
|  |  | ||||||
|     RealD plaq_diff = fabs(clone.plaquette - header.plaquette); |     RealD plaq_diff = fabs(clone.plaquette - header.plaquette); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -80,13 +80,6 @@ template<typename T> struct isSpinor { | |||||||
| template <typename T> using IfSpinor    = Invoke<std::enable_if< isSpinor<T>::value,int> > ; | template <typename T> using IfSpinor    = Invoke<std::enable_if< isSpinor<T>::value,int> > ; | ||||||
| template <typename T> using IfNotSpinor = Invoke<std::enable_if<!isSpinor<T>::value,int> > ; | template <typename T> using IfNotSpinor = Invoke<std::enable_if<!isSpinor<T>::value,int> > ; | ||||||
|  |  | ||||||
| const int CoarseIndex = 4; |  | ||||||
| template<typename T> struct isCoarsened { |  | ||||||
|    static constexpr bool value = (CoarseIndex<=T::TensorLevel); |  | ||||||
| }; |  | ||||||
| template <typename T> using IfCoarsened    = Invoke<std::enable_if< isCoarsened<T>::value,int> > ; |  | ||||||
| template <typename T> using IfNotCoarsened = Invoke<std::enable_if<!isCoarsened<T>::value,int> > ; |  | ||||||
|  |  | ||||||
| // ChrisK very keen to add extra space for Gparity doubling. | // ChrisK very keen to add extra space for Gparity doubling. | ||||||
| // | // | ||||||
| // Also add domain wall index, in a way where Wilson operator  | // Also add domain wall index, in a way where Wilson operator  | ||||||
|   | |||||||
| @@ -88,7 +88,7 @@ public: | |||||||
| 					  const _Spinor &chi,  | 					  const _Spinor &chi,  | ||||||
| 					  int mu,  | 					  int mu,  | ||||||
| 					  StencilEntry *SE, | 					  StencilEntry *SE, | ||||||
| 					  const StencilView &St)  | 					  StencilView &St)  | ||||||
|   { |   { | ||||||
|     int direction = St._directions[mu]; |     int direction = St._directions[mu]; | ||||||
|     int distance  = St._distances[mu]; |     int distance  = St._distances[mu]; | ||||||
| @@ -97,30 +97,42 @@ public: | |||||||
|     Coordinate icoor; |     Coordinate icoor; | ||||||
|  |  | ||||||
| #ifdef GRID_SIMT | #ifdef GRID_SIMT | ||||||
|  |     _Spinor tmp; | ||||||
|  |  | ||||||
|     const int Nsimd =SiteDoubledGaugeField::Nsimd(); |     const int Nsimd =SiteDoubledGaugeField::Nsimd(); | ||||||
|     int s = acceleratorSIMTlane(Nsimd); |     int s = acceleratorSIMTlane(Nsimd); | ||||||
|     St.iCoorFromIindex(icoor,s); |     St.iCoorFromIindex(icoor,s); | ||||||
|  |  | ||||||
|     int mmu = mu % Nd; |     int mmu = mu % Nd; | ||||||
|  |     if ( SE->_around_the_world && St.parameters.twists[mmu] ) { | ||||||
|  |        | ||||||
|  |       int permute_lane = (sl==1)  | ||||||
|  |     	|| ((distance== 1)&&(icoor[direction]==1)) | ||||||
|  | 	|| ((distance==-1)&&(icoor[direction]==0)); | ||||||
|  |  | ||||||
|     auto UU0=coalescedRead(U(0)(mu)); |       if ( permute_lane ) {  | ||||||
|     auto UU1=coalescedRead(U(1)(mu)); | 	tmp(0) = chi(1); | ||||||
|      | 	tmp(1) = chi(0); | ||||||
|     //Decide whether we do a G-parity flavor twist |       } else { | ||||||
|     //Note: this assumes (but does not check) that sl==1 || sl==2 i.e. max 2 SIMD lanes in G-parity dir | 	tmp(0) = chi(0); | ||||||
|     //It also assumes (but does not check) that abs(distance) == 1 | 	tmp(1) = chi(1); | ||||||
|     int permute_lane = (sl==1)  |       } | ||||||
|     || ((distance== 1)&&(icoor[direction]==1)) |  | ||||||
|     || ((distance==-1)&&(icoor[direction]==0)); |  | ||||||
|  |  | ||||||
|     permute_lane = permute_lane && SE->_around_the_world && St.parameters.twists[mmu]; //only if we are going around the world |       auto UU0=coalescedRead(U(0)(mu)); | ||||||
|  |       auto UU1=coalescedRead(U(1)(mu)); | ||||||
|  |  | ||||||
|     //Apply the links |       mult(&phi(0),&UU0,&tmp(0)); | ||||||
|     int f_upper = permute_lane ? 1 : 0; |       mult(&phi(1),&UU1,&tmp(1)); | ||||||
|     int f_lower = !f_upper; |  | ||||||
|  |  | ||||||
|     mult(&phi(0),&UU0,&chi(f_upper)); |     } else { | ||||||
|     mult(&phi(1),&UU1,&chi(f_lower)); |  | ||||||
|  |       auto UU0=coalescedRead(U(0)(mu)); | ||||||
|  |       auto UU1=coalescedRead(U(1)(mu)); | ||||||
|  |  | ||||||
|  |       mult(&phi(0),&UU0,&chi(0)); | ||||||
|  |       mult(&phi(1),&UU1,&chi(1)); | ||||||
|  |  | ||||||
|  |     } | ||||||
|  |  | ||||||
| #else | #else | ||||||
|     typedef _Spinor vobj; |     typedef _Spinor vobj; | ||||||
|   | |||||||
| @@ -85,7 +85,7 @@ class MADWF | |||||||
|       maxiter     =_maxiter; |       maxiter     =_maxiter; | ||||||
|     }; |     }; | ||||||
|     |     | ||||||
|   void operator() (const FermionFieldo &src,FermionFieldo &sol5) |   void operator() (const FermionFieldo &src4,FermionFieldo &sol5) | ||||||
|   { |   { | ||||||
|     std::cout << GridLogMessage<< " ************************************************" << std::endl; |     std::cout << GridLogMessage<< " ************************************************" << std::endl; | ||||||
|     std::cout << GridLogMessage<< "  MADWF-like algorithm                           " << std::endl; |     std::cout << GridLogMessage<< "  MADWF-like algorithm                           " << std::endl; | ||||||
| @@ -114,16 +114,8 @@ class MADWF | |||||||
|     /////////////////////////////////////// |     /////////////////////////////////////// | ||||||
|     //Import source, include Dminus factors |     //Import source, include Dminus factors | ||||||
|     /////////////////////////////////////// |     /////////////////////////////////////// | ||||||
|     GridBase *src_grid = src.Grid(); |     Mato.ImportPhysicalFermionSource(src4,b);  | ||||||
|  |     std::cout << GridLogMessage << " src4 " <<norm2(src4)<<std::endl; | ||||||
|     assert( (src_grid == Mato.GaugeGrid()) || (src_grid == Mato.FermionGrid())); |  | ||||||
|  |  | ||||||
|     if ( src_grid == Mato.GaugeGrid() ) { |  | ||||||
|       Mato.ImportPhysicalFermionSource(src,b); |  | ||||||
|     } else { |  | ||||||
|       b=src; |  | ||||||
|     } |  | ||||||
|     std::cout << GridLogMessage << " src " <<norm2(src)<<std::endl; |  | ||||||
|     std::cout << GridLogMessage << " b    " <<norm2(b)<<std::endl; |     std::cout << GridLogMessage << " b    " <<norm2(b)<<std::endl; | ||||||
|  |  | ||||||
|     defect = b; |     defect = b; | ||||||
|   | |||||||
| @@ -56,12 +56,8 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub | |||||||
| 		 DoubledGaugeField &U, | 		 DoubledGaugeField &U, | ||||||
| 		 const FermionField &in, FermionField &out, int dag, int interior,int exterior); | 		 const FermionField &in, FermionField &out, int dag, int interior,int exterior); | ||||||
|    |    | ||||||
|   void DhopDirKernel(StencilImpl &st, |   void DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf, | ||||||
| 		     const DoubledGaugeFieldView &U, | 		     int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp); | ||||||
| 		     const DoubledGaugeFieldView &UUU, SiteSpinor * buf, |  | ||||||
| 		     int sF, int sU, |  | ||||||
| 		     const FermionFieldView &in, |  | ||||||
| 		     const FermionFieldView &out, int dir,int disp); |  | ||||||
|  protected:     |  protected:     | ||||||
|  |  | ||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
| @@ -69,67 +65,53 @@ template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , pub | |||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
|    template<int Naik>  |    template<int Naik>  | ||||||
|    static accelerator_inline |    static accelerator_inline | ||||||
|    void DhopSiteGeneric(const StencilView &st,  |    void DhopSiteGeneric(StencilView &st,  | ||||||
| 			const DoubledGaugeFieldView &U, | 			DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,  | ||||||
| 			const DoubledGaugeFieldView &UUU,  |  | ||||||
| 			SiteSpinor * buf, int LLs, int sU,  | 			SiteSpinor * buf, int LLs, int sU,  | ||||||
| 			const FermionFieldView &in, | 			const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 			const FermionFieldView &out,int dag); |  | ||||||
|     |     | ||||||
|    template<int Naik> static accelerator_inline |    template<int Naik> static accelerator_inline | ||||||
|    void DhopSiteGenericInt(const StencilView &st,  |    void DhopSiteGenericInt(StencilView &st,  | ||||||
| 			   const DoubledGaugeFieldView &U, | 			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU,  | ||||||
| 			   const DoubledGaugeFieldView &UUU,  |  | ||||||
| 			   SiteSpinor * buf, int LLs, int sU,  | 			   SiteSpinor * buf, int LLs, int sU,  | ||||||
| 			   const FermionFieldView &in, | 			   const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 			   const FermionFieldView &out,int dag); |  | ||||||
|     |     | ||||||
|    template<int Naik> static accelerator_inline |    template<int Naik> static accelerator_inline | ||||||
|    void DhopSiteGenericExt(const StencilView &st,  |    void DhopSiteGenericExt(StencilView &st,  | ||||||
| 			   const DoubledGaugeFieldView &U, | 			   DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
| 			   const DoubledGaugeFieldView &UUU, | 			   SiteSpinor * buf, int LLs, int sU,  | ||||||
|  			   SiteSpinor * buf, int LLs, int sU,  | 			   const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 			   const FermionFieldView &in, |  | ||||||
| 			   const FermionFieldView &out,int dag); |  | ||||||
|  |  | ||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
|    // Nc=3 specific kernels |    // Nc=3 specific kernels | ||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
|     |     | ||||||
|    template<int Naik> static accelerator_inline |    template<int Naik> static accelerator_inline | ||||||
|    void DhopSiteHand(const StencilView &st,  |    void DhopSiteHand(StencilView &st,  | ||||||
| 		     const DoubledGaugeFieldView &U, | 		     DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,  | ||||||
| 		     const DoubledGaugeFieldView &UUU,  | 		     SiteSpinor * buf, int LLs, int sU,  | ||||||
|  		     SiteSpinor * buf, int LLs, int sU,  | 		     const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 		     const FermionFieldView &in, |  | ||||||
| 		     const FermionFieldView &out,int dag); |  | ||||||
|     |     | ||||||
|    template<int Naik> static accelerator_inline |    template<int Naik> static accelerator_inline | ||||||
|    void DhopSiteHandInt(const StencilView &st,  |    void DhopSiteHandInt(StencilView &st,  | ||||||
| 			const DoubledGaugeFieldView &U, | 			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,  | ||||||
| 			const DoubledGaugeFieldView &UUU,  | 			SiteSpinor * buf, int LLs, int sU,  | ||||||
|  			SiteSpinor * buf, int LLs, int sU,  | 			const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 			const FermionFieldView &in, |  | ||||||
| 			const FermionFieldView &out,int dag); |  | ||||||
|     |     | ||||||
|    template<int Naik> static accelerator_inline |    template<int Naik> static accelerator_inline | ||||||
|    void DhopSiteHandExt(const StencilView &st,  |    void DhopSiteHandExt(StencilView &st,  | ||||||
| 			const DoubledGaugeFieldView &U, | 			DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,  | ||||||
| 			const DoubledGaugeFieldView &UUU,  | 			SiteSpinor * buf, int LLs, int sU,  | ||||||
|  			SiteSpinor * buf, int LLs, int sU,  | 			const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 			const FermionFieldView &in, |  | ||||||
| 			const FermionFieldView &out,int dag); |  | ||||||
|  |  | ||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
|    // Asm Nc=3 specific kernels |    // Asm Nc=3 specific kernels | ||||||
|    /////////////////////////////////////////////////////////////////////////////////////// |    /////////////////////////////////////////////////////////////////////////////////////// | ||||||
|     |     | ||||||
|    void DhopSiteAsm(const StencilView &st,  |    void DhopSiteAsm(StencilView &st,  | ||||||
| 		    const DoubledGaugeFieldView &U, | 		    DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU,  | ||||||
| 		    const DoubledGaugeFieldView &UUU,  | 		    SiteSpinor * buf, int LLs, int sU,  | ||||||
|  		    SiteSpinor * buf, int LLs, int sU,  | 		    const FermionFieldView &in, FermionFieldView &out,int dag); | ||||||
| 		    const FermionFieldView &in, |  | ||||||
| 		    const FermionFieldView &out,int dag); |  | ||||||
|    |    | ||||||
| public: | public: | ||||||
|  |  | ||||||
|   | |||||||
| @@ -61,7 +61,7 @@ public: | |||||||
|   typedef typename SiteHalfSpinor::vector_type     vComplexHigh; |   typedef typename SiteHalfSpinor::vector_type     vComplexHigh; | ||||||
|   constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh); |   constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh); | ||||||
|  |  | ||||||
|   accelerator_inline int CommDatumSize(void) const { |   accelerator_inline int CommDatumSize(void) { | ||||||
|     return sizeof(SiteHalfCommSpinor); |     return sizeof(SiteHalfCommSpinor); | ||||||
|   } |   } | ||||||
|  |  | ||||||
| @@ -69,7 +69,7 @@ public: | |||||||
|   /* Compress includes precision change if mpi data is not same */ |   /* Compress includes precision change if mpi data is not same */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   template<class _SiteHalfSpinor, class _SiteSpinor> |   template<class _SiteHalfSpinor, class _SiteSpinor> | ||||||
|   accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const { |   accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) { | ||||||
|     _SiteHalfSpinor tmp; |     _SiteHalfSpinor tmp; | ||||||
|     projector::Proj(tmp,in,mu,dag); |     projector::Proj(tmp,in,mu,dag); | ||||||
|     vstream(buf[o],tmp); |     vstream(buf[o],tmp); | ||||||
| @@ -81,7 +81,7 @@ public: | |||||||
|   accelerator_inline void Exchange(SiteHalfSpinor *mp, |   accelerator_inline void Exchange(SiteHalfSpinor *mp, | ||||||
| 				   const SiteHalfSpinor * __restrict__ vp0, | 				   const SiteHalfSpinor * __restrict__ vp0, | ||||||
| 				   const SiteHalfSpinor * __restrict__ vp1, | 				   const SiteHalfSpinor * __restrict__ vp1, | ||||||
| 				   Integer type,Integer o) const { | 				   Integer type,Integer o){ | ||||||
|     SiteHalfSpinor tmp1; |     SiteHalfSpinor tmp1; | ||||||
|     SiteHalfSpinor tmp2; |     SiteHalfSpinor tmp2; | ||||||
|     exchange(tmp1,tmp2,vp0[o],vp1[o],type); |     exchange(tmp1,tmp2,vp0[o],vp1[o],type); | ||||||
| @@ -93,7 +93,7 @@ public: | |||||||
|   /* Have a decompression step if mpi data is not same */ |   /* Have a decompression step if mpi data is not same */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   accelerator_inline void Decompress(SiteHalfSpinor * __restrict__ out, |   accelerator_inline void Decompress(SiteHalfSpinor * __restrict__ out, | ||||||
| 				     SiteHalfSpinor * __restrict__ in, Integer o) const {     | 				     SiteHalfSpinor * __restrict__ in, Integer o) {     | ||||||
|     assert(0); |     assert(0); | ||||||
|   } |   } | ||||||
|  |  | ||||||
| @@ -103,7 +103,7 @@ public: | |||||||
|   accelerator_inline void CompressExchange(SiteHalfSpinor * __restrict__ out0, |   accelerator_inline void CompressExchange(SiteHalfSpinor * __restrict__ out0, | ||||||
| 					   SiteHalfSpinor * __restrict__ out1, | 					   SiteHalfSpinor * __restrict__ out1, | ||||||
| 					   const SiteSpinor * __restrict__ in, | 					   const SiteSpinor * __restrict__ in, | ||||||
| 					   Integer j,Integer k, Integer m,Integer type) const | 					   Integer j,Integer k, Integer m,Integer type) | ||||||
|   { |   { | ||||||
|     SiteHalfSpinor temp1, temp2; |     SiteHalfSpinor temp1, temp2; | ||||||
|     SiteHalfSpinor temp3, temp4; |     SiteHalfSpinor temp3, temp4; | ||||||
| @@ -117,7 +117,7 @@ public: | |||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   /* Pass the info to the stencil */ |   /* Pass the info to the stencil */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   accelerator_inline bool DecompressionStep(void) const { return false; } |   accelerator_inline bool DecompressionStep(void) { return false; } | ||||||
|  |  | ||||||
| }; | }; | ||||||
|  |  | ||||||
| @@ -142,7 +142,7 @@ public: | |||||||
|   typedef typename SiteHalfSpinor::vector_type     vComplexHigh; |   typedef typename SiteHalfSpinor::vector_type     vComplexHigh; | ||||||
|   constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh); |   constexpr static int Nw=sizeof(SiteHalfSpinor)/sizeof(vComplexHigh); | ||||||
|  |  | ||||||
|   accelerator_inline int CommDatumSize(void) const { |   accelerator_inline int CommDatumSize(void) { | ||||||
|     return sizeof(SiteHalfCommSpinor); |     return sizeof(SiteHalfCommSpinor); | ||||||
|   } |   } | ||||||
|  |  | ||||||
| @@ -150,7 +150,7 @@ public: | |||||||
|   /* Compress includes precision change if mpi data is not same */ |   /* Compress includes precision change if mpi data is not same */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   template<class _SiteHalfSpinor, class _SiteSpinor> |   template<class _SiteHalfSpinor, class _SiteSpinor> | ||||||
|   accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) const { |   accelerator_inline void Compress(_SiteHalfSpinor *buf,Integer o,const _SiteSpinor &in) { | ||||||
|     _SiteHalfSpinor hsp; |     _SiteHalfSpinor hsp; | ||||||
|     SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf; |     SiteHalfCommSpinor *hbuf = (SiteHalfCommSpinor *)buf; | ||||||
|     projector::Proj(hsp,in,mu,dag); |     projector::Proj(hsp,in,mu,dag); | ||||||
| @@ -163,7 +163,7 @@ public: | |||||||
|   accelerator_inline void Exchange(SiteHalfSpinor *mp, |   accelerator_inline void Exchange(SiteHalfSpinor *mp, | ||||||
|                        SiteHalfSpinor *vp0, |                        SiteHalfSpinor *vp0, | ||||||
|                        SiteHalfSpinor *vp1, |                        SiteHalfSpinor *vp1, | ||||||
| 		       Integer type,Integer o) const { | 		       Integer type,Integer o){ | ||||||
|     SiteHalfSpinor vt0,vt1; |     SiteHalfSpinor vt0,vt1; | ||||||
|     SiteHalfCommSpinor *vpp0 = (SiteHalfCommSpinor *)vp0; |     SiteHalfCommSpinor *vpp0 = (SiteHalfCommSpinor *)vp0; | ||||||
|     SiteHalfCommSpinor *vpp1 = (SiteHalfCommSpinor *)vp1; |     SiteHalfCommSpinor *vpp1 = (SiteHalfCommSpinor *)vp1; | ||||||
| @@ -175,7 +175,7 @@ public: | |||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   /* Have a decompression step if mpi data is not same */ |   /* Have a decompression step if mpi data is not same */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o) const { |   accelerator_inline void Decompress(SiteHalfSpinor *out, SiteHalfSpinor *in, Integer o){ | ||||||
|     SiteHalfCommSpinor *hin=(SiteHalfCommSpinor *)in; |     SiteHalfCommSpinor *hin=(SiteHalfCommSpinor *)in; | ||||||
|     precisionChange((vComplexHigh *)&out[o],(vComplexLow *)&hin[o],Nw); |     precisionChange((vComplexHigh *)&out[o],(vComplexLow *)&hin[o],Nw); | ||||||
|   } |   } | ||||||
| @@ -186,7 +186,7 @@ public: | |||||||
|   accelerator_inline void CompressExchange(SiteHalfSpinor *out0, |   accelerator_inline void CompressExchange(SiteHalfSpinor *out0, | ||||||
| 			       SiteHalfSpinor *out1, | 			       SiteHalfSpinor *out1, | ||||||
| 			       const SiteSpinor *in, | 			       const SiteSpinor *in, | ||||||
| 			       Integer j,Integer k, Integer m,Integer type) const { | 			       Integer j,Integer k, Integer m,Integer type){ | ||||||
|     SiteHalfSpinor temp1, temp2,temp3,temp4; |     SiteHalfSpinor temp1, temp2,temp3,temp4; | ||||||
|     SiteHalfCommSpinor *hout0 = (SiteHalfCommSpinor *)out0; |     SiteHalfCommSpinor *hout0 = (SiteHalfCommSpinor *)out0; | ||||||
|     SiteHalfCommSpinor *hout1 = (SiteHalfCommSpinor *)out1; |     SiteHalfCommSpinor *hout1 = (SiteHalfCommSpinor *)out1; | ||||||
| @@ -200,7 +200,7 @@ public: | |||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   /* Pass the info to the stencil */ |   /* Pass the info to the stencil */ | ||||||
|   /*****************************************************/ |   /*****************************************************/ | ||||||
|   accelerator_inline bool DecompressionStep(void) const { return true; } |   accelerator_inline bool DecompressionStep(void) { return true; } | ||||||
|  |  | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -95,7 +95,7 @@ public: | |||||||
| 					  const _Spinor &chi, | 					  const _Spinor &chi, | ||||||
| 					  int mu, | 					  int mu, | ||||||
| 					  StencilEntry *SE, | 					  StencilEntry *SE, | ||||||
| 					  const StencilView &St)  | 					  StencilView &St)  | ||||||
|   { |   { | ||||||
|     multLink(phi,U,chi,mu); |     multLink(phi,U,chi,mu); | ||||||
|   } |   } | ||||||
| @@ -106,15 +106,11 @@ public: | |||||||
| 			    const _SpinorField & phi, | 			    const _SpinorField & phi, | ||||||
| 			    int mu) | 			    int mu) | ||||||
|   { |   { | ||||||
|     const int Nsimd = SiteHalfSpinor::Nsimd(); |  | ||||||
|     autoView( out_v, out, AcceleratorWrite); |     autoView( out_v, out, AcceleratorWrite); | ||||||
|     autoView( phi_v, phi, AcceleratorRead); |     autoView( phi_v, phi, AcceleratorRead); | ||||||
|     autoView( Umu_v, Umu, AcceleratorRead); |     autoView( Umu_v, Umu, AcceleratorRead); | ||||||
|     typedef decltype(coalescedRead(out_v[0]))   calcSpinor; |     accelerator_for(sss,out.Grid()->oSites(),1,{ | ||||||
|     accelerator_for(sss,out.Grid()->oSites(),Nsimd,{ | 	multLink(out_v[sss],Umu_v[sss],phi_v[sss],mu); | ||||||
| 	calcSpinor tmp; |  | ||||||
| 	multLink(tmp,Umu_v[sss],phi_v(sss),mu); |  | ||||||
| 	coalescedWrite(out_v[sss],tmp); |  | ||||||
|     }); |     }); | ||||||
|   } |   } | ||||||
| 					    | 					    | ||||||
|   | |||||||
| @@ -49,7 +49,6 @@ public: | |||||||
|  |  | ||||||
|   INHERIT_IMPL_TYPES(Impl); |   INHERIT_IMPL_TYPES(Impl); | ||||||
|   typedef FermionOperator<Impl> Base; |   typedef FermionOperator<Impl> Base; | ||||||
|   typedef AcceleratorVector<int,STENCIL_MAX> StencilVector; |  | ||||||
|     |     | ||||||
| public: | public: | ||||||
|  |  | ||||||
| @@ -69,87 +68,73 @@ public: | |||||||
|  |  | ||||||
| private: | private: | ||||||
|  |  | ||||||
|   static accelerator_inline void DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U, |   static accelerator_inline void DhopDirK(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor * buf, | ||||||
| 					  SiteHalfSpinor * buf, int sF, int sU, | 				   int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dirdisp, int gamma); | ||||||
| 					  const FermionFieldView &in,const FermionFieldView &out, int dirdisp, int gamma); |  | ||||||
|  |  | ||||||
|   static accelerator_inline void DhopDirXp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |   static accelerator_inline void DhopDirXp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |   static accelerator_inline void DhopDirYp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
|   static accelerator_inline void DhopDirYp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |   static accelerator_inline void DhopDirZp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |   static accelerator_inline void DhopDirTp(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
|   static accelerator_inline void DhopDirZp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |   static accelerator_inline void DhopDirXm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |   static accelerator_inline void DhopDirYm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
|   static accelerator_inline void DhopDirTp(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |   static accelerator_inline void DhopDirZm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |   static accelerator_inline void DhopDirTm(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU,const FermionFieldView &in,FermionFieldView &out,int dirdisp); | ||||||
|   static accelerator_inline void DhopDirXm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |  | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |  | ||||||
|   static accelerator_inline void DhopDirYm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |  | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |  | ||||||
|   static accelerator_inline void DhopDirZm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |  | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |  | ||||||
|   static accelerator_inline void DhopDirTm(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf,int sF,int sU, |  | ||||||
| 					   const FermionFieldView &in, const FermionFieldView &out,int dirdisp); |  | ||||||
|        |        | ||||||
|   // Specialised variants |   // Specialised variants | ||||||
|   static accelerator void GenericDhopSite(const StencilView &st, |   static accelerator void GenericDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 					  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |        | ||||||
|         |   static accelerator void GenericDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void GenericDhopSiteDag(const StencilView &st, const  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 						    int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |    | ||||||
|     |   static accelerator void GenericDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 						    int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |        | ||||||
|         |   static accelerator void GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 						int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 						int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |    | ||||||
|     |   static accelerator void GenericDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |        | ||||||
|         |   static accelerator void GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | 						       int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
| 						int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |  | ||||||
|  |  | ||||||
| // Keep Hand unrolled  |   static void AsmDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|   static accelerator void HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p,  SiteDoubledGaugeField *U, SiteHalfSpinor * buf, | 			  int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out); | ||||||
| 					   int sF, int sU, const SiteSpinor *in, SiteSpinor *out); |    | ||||||
|  |   static void AsmDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|  | 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out); | ||||||
|  |    | ||||||
|  |   static void AsmDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|  | 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out); | ||||||
|  |    | ||||||
|  |   static void AsmDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|  | 				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out); | ||||||
|  |    | ||||||
|  |   static void AsmDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|  | 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,FermionFieldView &out); | ||||||
|  |    | ||||||
|  |   static void AsmDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|  | 				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, FermionFieldView &out); | ||||||
|  |  | ||||||
|   static accelerator void HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | // Keep Hand unrolled temporarily   | ||||||
| 				       int sF, int sU, const FermionFieldView &in,const FermionFieldView &out); |   static accelerator void HandDhopSite(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
|     | 				       int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|   static accelerator void HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |  | ||||||
|     |  | ||||||
|   static accelerator void HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |  | ||||||
|    |    | ||||||
|   static accelerator void HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |   static accelerator void HandDhopSiteDag(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); | 					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|    |    | ||||||
|   static accelerator void HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |   static accelerator void HandDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 					  int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); | 					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|     |  | ||||||
|   static accelerator void HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 					     int sF, int sU, const FermionFieldView &in, const FermionFieldView &out); |  | ||||||
|   //AVX 512 ASM |  | ||||||
|   static void AsmDhopSite(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 			  int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out); |  | ||||||
|    |    | ||||||
|   static void AsmDhopSiteDag(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |   static accelerator void HandDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out); | 					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|    |    | ||||||
|   static void AsmDhopSiteInt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |   static accelerator void HandDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out); | 					  int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|    |    | ||||||
|   static void AsmDhopSiteDagInt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |   static accelerator void HandDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, SiteHalfSpinor * buf, | ||||||
| 				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out); | 					     int sF, int sU, const FermionFieldView &in, FermionFieldView &out); | ||||||
|    |  | ||||||
|   static void AsmDhopSiteExt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 			     int sF, int sU, int Ls, int Nsite, const FermionFieldView &in,const FermionFieldView &out); |  | ||||||
|    |  | ||||||
|   static void AsmDhopSiteDagExt(const StencilView &st,  const DoubledGaugeFieldView &U, SiteHalfSpinor * buf, |  | ||||||
| 				int sF, int sU, int Ls, int Nsite, const FermionFieldView &in, const FermionFieldView &out); |  | ||||||
|  |  | ||||||
|  public: |  public: | ||||||
|  WilsonKernels(const ImplParams &p = ImplParams()) : Base(p){}; |  WilsonKernels(const ImplParams &p = ImplParams()) : Base(p){}; | ||||||
| }; | }; | ||||||
|   | |||||||
| @@ -642,7 +642,7 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1, | |||||||
| 						      Current curr_type, | 						      Current curr_type, | ||||||
| 						      unsigned int mu) | 						      unsigned int mu) | ||||||
| { | { | ||||||
| #if (!defined(GRID_HIP)) | #if (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) | ||||||
|   Gamma::Algebra Gmu [] = { |   Gamma::Algebra Gmu [] = { | ||||||
|     Gamma::Algebra::GammaX, |     Gamma::Algebra::GammaX, | ||||||
|     Gamma::Algebra::GammaY, |     Gamma::Algebra::GammaY, | ||||||
| @@ -826,7 +826,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in, | |||||||
|   } |   } | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
| #if (!defined(GRID_HIP)) | #if (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) | ||||||
|   int tshift = (mu == Nd-1) ? 1 : 0; |   int tshift = (mu == Nd-1) ? 1 : 0; | ||||||
|   //////////////////////////////////////////////// |   //////////////////////////////////////////////// | ||||||
|   // GENERAL CAYLEY CASE |   // GENERAL CAYLEY CASE | ||||||
|   | |||||||
| @@ -618,13 +618,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | |||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
| template <class Impl> | template <class Impl> | ||||||
| void StaggeredKernels<Impl>::DhopSiteAsm(const StencilView &st, | void StaggeredKernels<Impl>::DhopSiteAsm(StencilView &st, | ||||||
| 					 const DoubledGaugeFieldView &U, | 					 DoubledGaugeFieldView &U, | ||||||
| 					 const DoubledGaugeFieldView &UUU, | 					 DoubledGaugeFieldView &UUU, | ||||||
|  					 SiteSpinor *buf, int sF, | 					 SiteSpinor *buf, int sF, | ||||||
| 					 int sU, | 					 int sU, const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 					 const FermionFieldView &in, |  | ||||||
| 					 const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| }; | }; | ||||||
| @@ -685,13 +683,11 @@ void StaggeredKernels<Impl>::DhopSiteAsm(const StencilView &st, | |||||||
|  |  | ||||||
|   // This is the single precision 5th direction vectorised kernel |   // This is the single precision 5th direction vectorised kernel | ||||||
| #include <Grid/simd/Intel512single.h> | #include <Grid/simd/Intel512single.h> | ||||||
| template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(const StencilView &st, | template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilView &st, | ||||||
| 								    const DoubledGaugeFieldView &U, | 								    DoubledGaugeFieldView &U, | ||||||
| 								    const DoubledGaugeFieldView &UUU, | 								    DoubledGaugeFieldView &UUU, | ||||||
|  								    SiteSpinor *buf, int sF, | 								    SiteSpinor *buf, int sF, | ||||||
| 								    int sU, | 								    int sU, const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 								    const FermionFieldView &in, |  | ||||||
| 								    const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
| #ifdef AVX512 | #ifdef AVX512 | ||||||
|   uint64_t gauge0,gauge1,gauge2,gauge3; |   uint64_t gauge0,gauge1,gauge2,gauge3; | ||||||
| @@ -742,13 +738,11 @@ template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(const Stenci | |||||||
| } | } | ||||||
|  |  | ||||||
| #include <Grid/simd/Intel512double.h> | #include <Grid/simd/Intel512double.h> | ||||||
| template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(const StencilView &st,  | template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilView &st,  | ||||||
| 								    const DoubledGaugeFieldView &U, | 								    DoubledGaugeFieldView &U, | ||||||
| 								    const DoubledGaugeFieldView &UUU, | 								    DoubledGaugeFieldView &UUU, | ||||||
|  								    SiteSpinor *buf, int sF, | 								    SiteSpinor *buf, int sF, | ||||||
| 								    int sU, | 								    int sU, const FermionFieldView &in, FermionFieldView &out, int dag)  | ||||||
| 								    const FermionFieldView &in, |  | ||||||
| 								    const FermionFieldView &out, int dag)  |  | ||||||
| { | { | ||||||
| #ifdef AVX512 | #ifdef AVX512 | ||||||
|   uint64_t gauge0,gauge1,gauge2,gauge3; |   uint64_t gauge0,gauge1,gauge2,gauge3; | ||||||
| @@ -830,13 +824,11 @@ template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(const Stenci | |||||||
|   // This is the single precision 5th direction vectorised kernel |   // This is the single precision 5th direction vectorised kernel | ||||||
|  |  | ||||||
| #include <Grid/simd/Intel512single.h> | #include <Grid/simd/Intel512single.h> | ||||||
| template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(const StencilView &st,  | template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilView &st,  | ||||||
| 							       const DoubledGaugeFieldView &U, | 							       DoubledGaugeFieldView &U, | ||||||
| 							       const DoubledGaugeFieldView &UUU, | 							       DoubledGaugeFieldView &UUU, | ||||||
|  							       SiteSpinor *buf, int sF, | 							       SiteSpinor *buf, int sF, | ||||||
| 							       int sU, | 							       int sU, const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 							       const FermionFieldView &in, |  | ||||||
| 							       const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
| #ifdef AVX512 | #ifdef AVX512 | ||||||
|   uint64_t gauge0,gauge1,gauge2,gauge3; |   uint64_t gauge0,gauge1,gauge2,gauge3; | ||||||
| @@ -901,13 +893,11 @@ template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(const StencilView | |||||||
| } | } | ||||||
|  |  | ||||||
| #include <Grid/simd/Intel512double.h> | #include <Grid/simd/Intel512double.h> | ||||||
| template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(const StencilView &st,  | template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilView &st,  | ||||||
| 							       const DoubledGaugeFieldView &U, | 							       DoubledGaugeFieldView &U, | ||||||
| 							       const DoubledGaugeFieldView &UUU, | 							       DoubledGaugeFieldView &UUU, | ||||||
|  							       SiteSpinor *buf, int sF, | 							       SiteSpinor *buf, int sF, | ||||||
| 							       int sU, | 							       int sU, const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 							       const FermionFieldView &in, |  | ||||||
| 							       const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
| #ifdef AVX512 | #ifdef AVX512 | ||||||
|   uint64_t gauge0,gauge1,gauge2,gauge3; |   uint64_t gauge0,gauge1,gauge2,gauge3; | ||||||
|   | |||||||
| @@ -147,12 +147,10 @@ NAMESPACE_BEGIN(Grid); | |||||||
|  |  | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteHand(const StencilView &st, | void StaggeredKernels<Impl>::DhopSiteHand(StencilView &st, | ||||||
| 					  const DoubledGaugeFieldView &U, | 					  DoubledGaugeFieldView &U,DoubledGaugeFieldView &UUU, | ||||||
| 					  const DoubledGaugeFieldView &UUU, | 					  SiteSpinor *buf, int sF, int sU,  | ||||||
|  					  SiteSpinor *buf, int sF, int sU,  | 					  const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 					  const FermionFieldView &in, |  | ||||||
| 					  const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
| @@ -224,12 +222,10 @@ void StaggeredKernels<Impl>::DhopSiteHand(const StencilView &st, | |||||||
|  |  | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteHandInt(const StencilView &st,  | void StaggeredKernels<Impl>::DhopSiteHandInt(StencilView &st,  | ||||||
| 					     const DoubledGaugeFieldView &U, | 					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
| 					     const DoubledGaugeFieldView &UUU, | 					     SiteSpinor *buf, int sF, int sU,  | ||||||
|  					     SiteSpinor *buf, int sF, int sU,  | 					     const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 					     const FermionFieldView &in, |  | ||||||
| 					     const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
| @@ -305,12 +301,10 @@ void StaggeredKernels<Impl>::DhopSiteHandInt(const StencilView &st, | |||||||
|  |  | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteHandExt(const StencilView &st, | void StaggeredKernels<Impl>::DhopSiteHandExt(StencilView &st, | ||||||
| 					     const DoubledGaugeFieldView &U, | 					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
| 					     const DoubledGaugeFieldView &UUU, | 					     SiteSpinor *buf, int sF, int sU,  | ||||||
|  					     SiteSpinor *buf, int sF, int sU,  | 					     const FermionFieldView &in, FermionFieldView &out,int dag)  | ||||||
| 					     const FermionFieldView &in, |  | ||||||
| 					     const FermionFieldView &out,int dag)  |  | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
|   | |||||||
| @@ -79,10 +79,10 @@ StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){}; | |||||||
| //////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////// | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteGeneric(const StencilView &st,  | void StaggeredKernels<Impl>::DhopSiteGeneric(StencilView &st,  | ||||||
| 					     const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU, | 					     DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
|  					     SiteSpinor *buf, int sF, int sU,  | 					     SiteSpinor *buf, int sF, int sU,  | ||||||
| 					     const FermionFieldView &in, const FermionFieldView &out, int dag)  | 					     const FermionFieldView &in, FermionFieldView &out, int dag)  | ||||||
| { | { | ||||||
|   const SiteSpinor *chi_p; |   const SiteSpinor *chi_p; | ||||||
|   SiteSpinor chi; |   SiteSpinor chi; | ||||||
| @@ -127,11 +127,10 @@ void StaggeredKernels<Impl>::DhopSiteGeneric(const StencilView &st, | |||||||
|   /////////////////////////////////////////////////// |   /////////////////////////////////////////////////// | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteGenericInt(const StencilView &st,  | void StaggeredKernels<Impl>::DhopSiteGenericInt(StencilView &st,  | ||||||
| 						const DoubledGaugeFieldView &U, const DoubledGaugeFieldView &UUU, | 						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
|  						SiteSpinor *buf, int sF, int sU,  | 						SiteSpinor *buf, int sF, int sU,  | ||||||
| 						const FermionFieldView &in, const FermionFieldView &out,int dag) | 						const FermionFieldView &in, FermionFieldView &out,int dag) { | ||||||
| { |  | ||||||
|   const SiteSpinor *chi_p; |   const SiteSpinor *chi_p; | ||||||
|   SiteSpinor chi; |   SiteSpinor chi; | ||||||
|   SiteSpinor Uchi; |   SiteSpinor Uchi; | ||||||
| @@ -176,13 +175,10 @@ void StaggeredKernels<Impl>::DhopSiteGenericInt(const StencilView &st, | |||||||
|   /////////////////////////////////////////////////// |   /////////////////////////////////////////////////// | ||||||
| template <class Impl> | template <class Impl> | ||||||
| template <int Naik> accelerator_inline | template <int Naik> accelerator_inline | ||||||
| void StaggeredKernels<Impl>::DhopSiteGenericExt(const StencilView &st,  | void StaggeredKernels<Impl>::DhopSiteGenericExt(StencilView &st,  | ||||||
| 						const DoubledGaugeFieldView &U, | 						DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, | ||||||
| 						const DoubledGaugeFieldView &UUU, | 						SiteSpinor *buf, int sF, int sU, | ||||||
|  						SiteSpinor *buf, int sF, int sU, | 						const FermionFieldView &in, FermionFieldView &out,int dag) { | ||||||
| 						const FermionFieldView &in, |  | ||||||
| 						const FermionFieldView &out,int dag) |  | ||||||
| { |  | ||||||
|   const SiteSpinor *chi_p; |   const SiteSpinor *chi_p; | ||||||
|   //  SiteSpinor chi; |   //  SiteSpinor chi; | ||||||
|   SiteSpinor Uchi; |   SiteSpinor Uchi; | ||||||
| @@ -229,13 +225,8 @@ void StaggeredKernels<Impl>::DhopSiteGenericExt(const StencilView &st, | |||||||
| // Driving / wrapping routine to select right kernel | // Driving / wrapping routine to select right kernel | ||||||
| //////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////// | ||||||
| template <class Impl>  | template <class Impl>  | ||||||
| void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, | void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, DoubledGaugeFieldView &U, DoubledGaugeFieldView &UUU, SiteSpinor * buf, | ||||||
| 					   const DoubledGaugeFieldView &U, | 					   int sF, int sU, const FermionFieldView &in, FermionFieldView &out, int dir,int disp) | ||||||
| 					   const DoubledGaugeFieldView &UUU, |  | ||||||
| 					   SiteSpinor * buf, |  | ||||||
| 					   int sF, int sU, |  | ||||||
| 					   const FermionFieldView &in, |  | ||||||
| 					   const FermionFieldView &out, int dir,int disp) |  | ||||||
| { | { | ||||||
|   // Disp should be either +1,-1,+3,-3 |   // Disp should be either +1,-1,+3,-3 | ||||||
|   // What about "dag" ? |   // What about "dag" ? | ||||||
| @@ -263,8 +254,7 @@ void StaggeredKernels<Impl>::DhopDirKernel(StencilImpl &st, | |||||||
|   }); |   }); | ||||||
|  |  | ||||||
| template <class Impl>  | template <class Impl>  | ||||||
| void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, | void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo,  | ||||||
| 					  LebesgueOrder &lo,  |  | ||||||
| 					  DoubledGaugeField &U, DoubledGaugeField &UUU,  | 					  DoubledGaugeField &U, DoubledGaugeField &UUU,  | ||||||
| 					  const FermionField &in, FermionField &out, int dag, int interior,int exterior) | 					  const FermionField &in, FermionField &out, int dag, int interior,int exterior) | ||||||
| { | { | ||||||
|   | |||||||
| @@ -92,16 +92,20 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu) | |||||||
|   int lvol = _Umu.Grid()->lSites(); |   int lvol = _Umu.Grid()->lSites(); | ||||||
|   int DimRep = Impl::Dimension; |   int DimRep = Impl::Dimension; | ||||||
|  |  | ||||||
|  |   Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep); | ||||||
|  |   Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep); | ||||||
|  |  | ||||||
|  |   Coordinate lcoor; | ||||||
|  |   typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero(); | ||||||
|  |  | ||||||
|   { |   { | ||||||
|     autoView(CTv,CloverTerm,CpuRead); |     autoView(CTv,CloverTerm,CpuRead); | ||||||
|     autoView(CTIv,CloverTermInv,CpuWrite); |     autoView(CTIv,CloverTermInv,CpuWrite); | ||||||
|     thread_for(site, lvol, { |     for (int site = 0; site < lvol; site++) { | ||||||
|       Coordinate lcoor; |  | ||||||
|       grid->LocalIndexToLocalCoor(site, lcoor); |       grid->LocalIndexToLocalCoor(site, lcoor); | ||||||
|       Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep); |       EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep); | ||||||
|       Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep); |  | ||||||
|       typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero(); |  | ||||||
|       peekLocalSite(Qx, CTv, lcoor); |       peekLocalSite(Qx, CTv, lcoor); | ||||||
|  |       Qxinv = Zero(); | ||||||
|       //if (csw!=0){ |       //if (csw!=0){ | ||||||
|       for (int j = 0; j < Ns; j++) |       for (int j = 0; j < Ns; j++) | ||||||
| 	for (int k = 0; k < Ns; k++) | 	for (int k = 0; k < Ns; k++) | ||||||
| @@ -122,7 +126,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu) | |||||||
|       //    if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl; |       //    if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl; | ||||||
|       //  } |       //  } | ||||||
|       pokeLocalSite(Qxinv, CTIv, lcoor); |       pokeLocalSite(Qxinv, CTIv, lcoor); | ||||||
|     }); |     } | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   // Separate the even and odd parts |   // Separate the even and odd parts | ||||||
|   | |||||||
| @@ -38,6 +38,9 @@ Author: Nils Meyer  <nils.meyer@ur.de>  Regensburg University | |||||||
| // undefine everything related to kernels | // undefine everything related to kernels | ||||||
| #include <simd/Fujitsu_A64FX_undef.h> | #include <simd/Fujitsu_A64FX_undef.h> | ||||||
|  |  | ||||||
|  | // enable A64FX body | ||||||
|  | #define WILSONKERNELSASMBODYA64FX | ||||||
|  | //#pragma message("A64FX Dslash: WilsonKernelsAsmBodyA64FX.h") | ||||||
|  |  | ||||||
|     /////////////////////////////////////////////////////////// |     /////////////////////////////////////////////////////////// | ||||||
|     // If we are A64FX specialise the single precision routine |     // If we are A64FX specialise the single precision routine | ||||||
| @@ -60,89 +63,119 @@ Author: Nils Meyer  <nils.meyer@ur.de>  Regensburg University | |||||||
| #define INTERIOR_AND_EXTERIOR | #define INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #define INTERIOR | #define INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #define EXTERIOR | #define EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
| ///////////////////////////////////////////////////////////////// | ///////////////////////////////////////////////////////////////// | ||||||
| @@ -152,89 +185,119 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV | |||||||
| #define INTERIOR_AND_EXTERIOR | #define INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #define INTERIOR | #define INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #define EXTERIOR | #define EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
| // undefine | // undefine | ||||||
| @@ -267,89 +330,119 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFie | |||||||
| #define INTERIOR_AND_EXTERIOR | #define INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #define INTERIOR | #define INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #define EXTERIOR | #define EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| ///////////////////////////////////////////////////////////////// | ///////////////////////////////////////////////////////////////// | ||||||
| // XYZT vectorised, dag Kernel, double | // XYZT vectorised, dag Kernel, double | ||||||
| @@ -358,93 +451,124 @@ WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV | |||||||
| #define INTERIOR_AND_EXTERIOR | #define INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #define INTERIOR | #define INTERIOR | ||||||
| #undef EXTERIOR | #undef EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #undef INTERIOR_AND_EXTERIOR | #undef INTERIOR_AND_EXTERIOR | ||||||
| #undef INTERIOR | #undef INTERIOR | ||||||
| #define EXTERIOR | #define EXTERIOR | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
| #pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2") |  | ||||||
| template<> void | template<> void | ||||||
| WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf, | ||||||
| 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | 						int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
|  | #if defined (WILSONKERNELSASMBODYA64FX) | ||||||
| #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h> | ||||||
|  | #else | ||||||
|  | #include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h> | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| // undefs | // undefs | ||||||
|  | #undef WILSONKERNELSASMBODYA64FX | ||||||
| #include <simd/Fujitsu_A64FX_undef.h> | #include <simd/Fujitsu_A64FX_undef.h> | ||||||
|  |  | ||||||
| #endif //A64FXASM | #endif //A64FXASM | ||||||
|   | |||||||
| @@ -25,11 +25,6 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University | |||||||
|     See the full license in the file "LICENSE" in the top level distribution directory |     See the full license in the file "LICENSE" in the top level distribution directory | ||||||
| *************************************************************************************/ | *************************************************************************************/ | ||||||
| /*  END LEGAL */ | /*  END LEGAL */ | ||||||
|  |  | ||||||
| // GCC 10 messes up SVE instruction scheduling using -O3, but |  | ||||||
| // -O3 -fno-schedule-insns -fno-schedule-insns2 does wonders |  | ||||||
| // performance now is better than armclang 20.2 |  | ||||||
|  |  | ||||||
| #ifdef KERNEL_DAG | #ifdef KERNEL_DAG | ||||||
| #define DIR0_PROJ    XP_PROJ | #define DIR0_PROJ    XP_PROJ | ||||||
| #define DIR1_PROJ    YP_PROJ | #define DIR1_PROJ    YP_PROJ | ||||||
| @@ -102,7 +97,7 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University | |||||||
|     PROJ;							                        \ |     PROJ;							                        \ | ||||||
|     MAYBEPERM(PERMUTE_DIR,perm);					        \ |     MAYBEPERM(PERMUTE_DIR,perm);					        \ | ||||||
|       } else {								                \ |       } else {								                \ | ||||||
| 	  LOAD_CHI(base);							                \ | 	LOAD_CHI(base);							                \ | ||||||
|       }									                    \ |       }									                    \ | ||||||
|       base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\ |       base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\ | ||||||
|     MULT_2SPIN_1(Dir);					                    \ |     MULT_2SPIN_1(Dir);					                    \ | ||||||
| @@ -115,11 +110,6 @@ Author:  Nils Meyer  <nils.meyer@ur.de>  Regensburg University | |||||||
|     }                                                       \ |     }                                                       \ | ||||||
|     RECON;								                    \ |     RECON;								                    \ | ||||||
|  |  | ||||||
| /* |  | ||||||
| NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty |  | ||||||
|     though I expected that it would improve on performance |  | ||||||
| */ |  | ||||||
|  |  | ||||||
| #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \ | #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \ | ||||||
|   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \ |   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \ | ||||||
|   PREFETCH1_CHIMU(base);						            \ |   PREFETCH1_CHIMU(base);						            \ | ||||||
| @@ -136,63 +126,73 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|  |  | ||||||
| #define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | #define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | ||||||
|       basep = st.GetPFInfo(nent,plocal); nent++;			\ |       basep = st.GetPFInfo(nent,plocal); nent++;			\ | ||||||
|       if ( local ) {							\ |       if ( local ) {							            \ | ||||||
|   LOAD_CHIMU(base);                                       \ |     LOAD_CHIMU(base);                                       \ | ||||||
|   LOAD_TABLE(PERMUTE_DIR);                                \ |     LOAD_TABLE(PERMUTE_DIR);                                \ | ||||||
|   PROJ;							                        \ |     PROJ;							                        \ | ||||||
|   MAYBEPERM(PERMUTE_DIR,perm);					        \ |     MAYBEPERM(PERMUTE_DIR,perm);					        \ | ||||||
|       }else if ( st.same_node[Dir] ) {LOAD_CHI(base);}			\ |       }else if ( st.same_node[Dir] ) {LOAD_CHI(base);}	    \ | ||||||
|       if ( local || st.same_node[Dir] ) {				\ |       base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\ | ||||||
|   MULT_2SPIN_1(Dir);					                    \ |       if ( local || st.same_node[Dir] ) {				    \ | ||||||
|   MULT_2SPIN_2;					                        \ |     MULT_2SPIN_1(Dir);					                    \ | ||||||
|   RECON;								\ |     PREFETCH_CHIMU(base);                                   \ | ||||||
|       }									\ |     /* PREFETCH_GAUGE_L1(NxtDir); */                        \ | ||||||
|   base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++;	\ |     MULT_2SPIN_2;					                        \ | ||||||
|   PREFETCH_CHIMU(base);						\ |     if (s == 0) {                                           \ | ||||||
|   PREFETCH_CHIMU_L2(basep);                               \ |        if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \ | ||||||
|  |     }                                                       \ | ||||||
|  |     RECON;								                    \ | ||||||
|  |     PREFETCH_CHIMU_L2(basep);                               \ | ||||||
|  |       } else { PREFETCH_CHIMU(base); }								                    \ | ||||||
|  |  | ||||||
| #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | ||||||
|   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\ |   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\ | ||||||
|   PREFETCH1_CHIMU(base);						\ |   PREFETCH1_CHIMU(base);						\ | ||||||
|   { ZERO_PSI; }								\ |  | ||||||
|   ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) |   ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) | ||||||
|  |  | ||||||
| #define RESULT(base,basep) SAVE_RESULT(base,basep); | #define RESULT(base,basep) SAVE_RESULT(base,basep); | ||||||
|  |  | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
| //////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////// | ||||||
| // Post comms kernel | // Post comms kernel | ||||||
| //////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////// | ||||||
| #ifdef EXTERIOR | #ifdef EXTERIOR | ||||||
|  |  | ||||||
|  |  | ||||||
| #define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | #define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | ||||||
|   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\ |   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \ | ||||||
|   if((!local)&&(!st.same_node[Dir]) ) {					\ |   if((!local)&&(!st.same_node[Dir]) ) {					    \ | ||||||
|     LOAD_CHI(base);							\ |     LOAD_CHI(base);							                \ | ||||||
|     MULT_2SPIN_1(Dir);					                    \ |     MULT_2SPIN_1(Dir);					                    \ | ||||||
|  |     PREFETCH_CHIMU(base);                                   \ | ||||||
|  |     /* PREFETCH_GAUGE_L1(NxtDir); */                        \ | ||||||
|     MULT_2SPIN_2;					                        \ |     MULT_2SPIN_2;					                        \ | ||||||
|     RECON;								\ |     if (s == 0) {                                           \ | ||||||
|     nmu++;								\ |       if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \ | ||||||
|  |     }                                                       \ | ||||||
|  |     RECON;								                    \ | ||||||
|  |     nmu++;								                    \ | ||||||
|   } |   } | ||||||
|  |  | ||||||
| #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)			\ | #define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)	    \ | ||||||
|   nmu=0;								\ |   nmu=0;								                    \ | ||||||
|   { ZERO_PSI;}								\ |   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;\ | ||||||
|   base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;		\ |   if((!local)&&(!st.same_node[Dir]) ) {					    \ | ||||||
|   if((!local)&&(!st.same_node[Dir]) ) {					\ |     LOAD_CHI(base);							                \ | ||||||
|     LOAD_CHI(base);							\ |  | ||||||
|     MULT_2SPIN_1(Dir);					                    \ |     MULT_2SPIN_1(Dir);					                    \ | ||||||
|  |     PREFETCH_CHIMU(base);                                   \ | ||||||
|  |     /* PREFETCH_GAUGE_L1(NxtDir); */                        \ | ||||||
|     MULT_2SPIN_2;					                        \ |     MULT_2SPIN_2;					                        \ | ||||||
|     RECON;								\ |     if (s == 0) {                                           \ | ||||||
|     nmu++;								\ |       if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \ | ||||||
|  |     }                                                       \ | ||||||
|  |     RECON;								                    \ | ||||||
|  |     nmu++;								                    \ | ||||||
|   } |   } | ||||||
|  |  | ||||||
| #define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);} | #define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);} | ||||||
|  |  | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
| { | { | ||||||
|   int nmu; |   int nmu; | ||||||
|   int local,perm, ptype; |   int local,perm, ptype; | ||||||
| @@ -209,6 +209,7 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|     int ssn=ssU+1;     if(ssn>=nmax) ssn=0; |     int ssn=ssU+1;     if(ssn>=nmax) ssn=0; | ||||||
|     //    int sUn=lo.Reorder(ssn); |     //    int sUn=lo.Reorder(ssn); | ||||||
|     int sUn=ssn; |     int sUn=ssn; | ||||||
|  |     LOCK_GAUGE(0); | ||||||
| #else | #else | ||||||
|     int sU =ssU; |     int sU =ssU; | ||||||
|     int ssn=ssU+1;     if(ssn>=nmax) ssn=0; |     int ssn=ssU+1;     if(ssn>=nmax) ssn=0; | ||||||
| @@ -294,11 +295,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|       std::cout << "----------------------------------------------------" << std::endl; |       std::cout << "----------------------------------------------------" << std::endl; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|       // DC ZVA test |  | ||||||
|       // { uint64_t basestore = (uint64_t)&out[ss]; |  | ||||||
|       //   PREFETCH_RESULT_L2_STORE(basestore); } |  | ||||||
|  |  | ||||||
|  |  | ||||||
|       ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON); |       ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON); | ||||||
|  |  | ||||||
| #ifdef SHOW | #ifdef SHOW | ||||||
| @@ -312,11 +308,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|       std::cout << "----------------------------------------------------" << std::endl; |       std::cout << "----------------------------------------------------" << std::endl; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|       // DC ZVA test |  | ||||||
|       //{ uint64_t basestore = (uint64_t)&out[ss]; |  | ||||||
|       //  PREFETCH_RESULT_L2_STORE(basestore); } |  | ||||||
|  |  | ||||||
|  |  | ||||||
|       ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON); |       ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON); | ||||||
|  |  | ||||||
| #ifdef SHOW | #ifdef SHOW | ||||||
| @@ -330,11 +321,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|       std::cout << "----------------------------------------------------" << std::endl; |       std::cout << "----------------------------------------------------" << std::endl; | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|       // DC ZVA test |  | ||||||
|       //{ uint64_t basestore = (uint64_t)&out[ss]; |  | ||||||
|       //  PREFETCH_RESULT_L2_STORE(basestore); } |  | ||||||
|  |  | ||||||
|  |  | ||||||
|       ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON); |       ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON); | ||||||
|  |  | ||||||
| #ifdef SHOW | #ifdef SHOW | ||||||
| @@ -355,7 +341,6 @@ NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty | |||||||
|       base = (uint64_t) &out[ss]; |       base = (uint64_t) &out[ss]; | ||||||
|       basep= st.GetPFInfo(nent,plocal); ent++; |       basep= st.GetPFInfo(nent,plocal); ent++; | ||||||
|       basep = (uint64_t) &out[ssn]; |       basep = (uint64_t) &out[ssn]; | ||||||
|       //PREFETCH_RESULT_L1_STORE(base); |  | ||||||
|       RESULT(base,basep); |       RESULT(base,basep); | ||||||
|  |  | ||||||
| #ifdef SHOW | #ifdef SHOW | ||||||
|   | |||||||
| @@ -38,46 +38,46 @@ NAMESPACE_BEGIN(Grid); | |||||||
|  |  | ||||||
| /////////////////////////////////////////////////////////// | /////////////////////////////////////////////////////////// | ||||||
| // Default to no assembler implementation | // Default to no assembler implementation | ||||||
| // Will specialise to AVX512 if available | // Will specialise to  | ||||||
| /////////////////////////////////////////////////////////// | /////////////////////////////////////////////////////////// | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 				  int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 				  int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 				     int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> void  | template<class Impl> void  | ||||||
| WilsonKernels<Impl >::AsmDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | WilsonKernels<Impl >::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, const FermionFieldView &out) | 					int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   assert(0); |   assert(0); | ||||||
| } | } | ||||||
|   | |||||||
| @@ -646,14 +646,9 @@ NAMESPACE_BEGIN(Grid); | |||||||
|   HAND_RESULT_EXT(ss,F) |   HAND_RESULT_EXT(ss,F) | ||||||
|  |  | ||||||
| #define HAND_SPECIALISE_GPARITY(IMPL)					\ | #define HAND_SPECIALISE_GPARITY(IMPL)					\ | ||||||
|   template<> accelerator_inline void					\ |  | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteSycl(StencilVector st_perm, StencilEntry *st_p, \ |  | ||||||
| 					SiteDoubledGaugeField *U, SiteHalfSpinor * buf, \ |  | ||||||
| 					int sF, int sU, const SiteSpinor *in, SiteSpinor *out) {} \ |  | ||||||
|   									\ |  | ||||||
|   template<> accelerator_inline void						\ |   template<> accelerator_inline void						\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ |   WilsonKernels<IMPL>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ | ||||||
| 				    int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 				    int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
| @@ -668,8 +663,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|   }									\ |   }									\ | ||||||
| 									\ | 									\ | ||||||
|   template<> accelerator_inline void						\ |   template<> accelerator_inline void						\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ |   WilsonKernels<IMPL>::HandDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ | ||||||
| 				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
| @@ -684,8 +679,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|   }									\ |   }									\ | ||||||
| 									\ | 									\ | ||||||
|   template<> accelerator_inline void						\ |   template<> accelerator_inline void						\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ |   WilsonKernels<IMPL>::HandDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ | ||||||
| 				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
| @@ -700,8 +695,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|   }									\ |   }									\ | ||||||
| 									\ | 									\ | ||||||
|   template<> accelerator_inline void						\ |   template<> accelerator_inline void						\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ |   WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ | ||||||
| 					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
| @@ -716,8 +711,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|   }									\ |   }									\ | ||||||
| 									\ | 									\ | ||||||
|   template<> accelerator_inline void							\ |   template<> accelerator_inline void							\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ |   WilsonKernels<IMPL>::HandDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, \ | ||||||
| 				       int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 				       int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
| @@ -733,8 +728,8 @@ NAMESPACE_BEGIN(Grid); | |||||||
|     HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \ |     HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \ | ||||||
|   }									\ |   }									\ | ||||||
|   template<> accelerator_inline void						\ |   template<> accelerator_inline void						\ | ||||||
|   WilsonKernels<IMPL>::HandDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ |   WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, \ | ||||||
| 					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) \ | 					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) \ | ||||||
|   {									\ |   {									\ | ||||||
|     typedef IMPL Impl;							\ |     typedef IMPL Impl;							\ | ||||||
|     typedef typename Simd::scalar_type S;				\ |     typedef typename Simd::scalar_type S;				\ | ||||||
|   | |||||||
| @@ -496,8 +496,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk> | |||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
| template<class Impl> accelerator_inline void  | template<class Impl> accelerator_inline void  | ||||||
| WilsonKernels<Impl>::HandDhopSite(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | ||||||
| 				  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 				  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
| // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
| @@ -520,8 +520,8 @@ WilsonKernels<Impl>::HandDhopSite(const StencilView &st, const DoubledGaugeField | |||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl>  accelerator_inline | template<class Impl>  accelerator_inline | ||||||
| void WilsonKernels<Impl>::HandDhopSiteDag(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
| @@ -543,8 +543,8 @@ void WilsonKernels<Impl>::HandDhopSiteDag(const StencilView &st,const DoubledGau | |||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl>  accelerator_inline void  | template<class Impl>  accelerator_inline void  | ||||||
| WilsonKernels<Impl>::HandDhopSiteInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | ||||||
| 					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
| // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
| @@ -567,8 +567,8 @@ WilsonKernels<Impl>::HandDhopSiteInt(const StencilView &st,const DoubledGaugeFie | |||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl> accelerator_inline | template<class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::HandDhopSiteDagInt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 						  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
| @@ -590,8 +590,8 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(const StencilView &st,const Doubled | |||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl>  accelerator_inline void  | template<class Impl>  accelerator_inline void  | ||||||
| WilsonKernels<Impl>::HandDhopSiteExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor  *buf, | ||||||
| 					  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 					  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
| // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
| @@ -615,8 +615,8 @@ WilsonKernels<Impl>::HandDhopSiteExt(const StencilView &st,const DoubledGaugeFie | |||||||
| } | } | ||||||
|  |  | ||||||
| template<class Impl>  accelerator_inline | template<class Impl>  accelerator_inline | ||||||
| void WilsonKernels<Impl>::HandDhopSiteDagExt(const StencilView &st,const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf, | ||||||
| 						  int ss,int sU,const FermionFieldView &in, const FermionFieldView &out) | 						  int ss,int sU,const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef typename Simd::scalar_type S; |   typedef typename Simd::scalar_type S; | ||||||
|   typedef typename Simd::vector_type V; |   typedef typename Simd::vector_type V; | ||||||
| @@ -682,4 +682,3 @@ NAMESPACE_END(Grid); | |||||||
| #undef HAND_RESULT | #undef HAND_RESULT | ||||||
| #undef HAND_RESULT_INT | #undef HAND_RESULT_INT | ||||||
| #undef HAND_RESULT_EXT | #undef HAND_RESULT_EXT | ||||||
| #undef HAND_DECLARATIONS |  | ||||||
|   | |||||||
| @@ -1,598 +0,0 @@ | |||||||
|    /************************************************************************************* |  | ||||||
|  |  | ||||||
|     Grid physics library, www.github.com/paboyle/Grid  |  | ||||||
|  |  | ||||||
|     Source file: ./lib/qcd/action/fermion/WilsonKernelsHand.cc |  | ||||||
|  |  | ||||||
|     Copyright (C) 2015 |  | ||||||
|  |  | ||||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> |  | ||||||
| Author: paboyle <paboyle@ph.ed.ac.uk> |  | ||||||
|  |  | ||||||
|     This program is free software; you can redistribute it and/or modify |  | ||||||
|     it under the terms of the GNU General Public License as published by |  | ||||||
|     the Free Software Foundation; either version 2 of the License, or |  | ||||||
|     (at your option) any later version. |  | ||||||
|  |  | ||||||
|     This program is distributed in the hope that it will be useful, |  | ||||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
|     GNU General Public License for more details. |  | ||||||
|  |  | ||||||
|     You should have received a copy of the GNU General Public License along |  | ||||||
|     with this program; if not, write to the Free Software Foundation, Inc., |  | ||||||
|     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |  | ||||||
|  |  | ||||||
|     See the full license in the file "LICENSE" in the top level distribution directory |  | ||||||
|     *************************************************************************************/ |  | ||||||
|     /*  END LEGAL */ |  | ||||||
|  |  | ||||||
| #pragma once |  | ||||||
|  |  | ||||||
| #include <Grid/qcd/action/fermion/FermionCore.h> |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #undef LOAD_CHIMU   |  | ||||||
| #undef LOAD_CHI  |  | ||||||
| #undef MULT_2SPIN |  | ||||||
| #undef PERMUTE_DIR |  | ||||||
| #undef XP_PROJ   |  | ||||||
| #undef YP_PROJ   |  | ||||||
| #undef ZP_PROJ   |  | ||||||
| #undef TP_PROJ   |  | ||||||
| #undef XM_PROJ   |  | ||||||
| #undef YM_PROJ   |  | ||||||
| #undef ZM_PROJ   |  | ||||||
| #undef TM_PROJ   |  | ||||||
| #undef XP_RECON  |  | ||||||
| #undef XP_RECON_ACCUM  |  | ||||||
| #undef XM_RECON  |  | ||||||
| #undef XM_RECON_ACCUM  |  | ||||||
| #undef YP_RECON_ACCUM  |  | ||||||
| #undef YM_RECON_ACCUM  |  | ||||||
| #undef ZP_RECON_ACCUM  |  | ||||||
| #undef ZM_RECON_ACCUM  |  | ||||||
| #undef TP_RECON_ACCUM  |  | ||||||
| #undef TM_RECON_ACCUM  |  | ||||||
| #undef ZERO_RESULT				  |  | ||||||
| #undef Chimu_00 |  | ||||||
| #undef Chimu_01 |  | ||||||
| #undef Chimu_02 |  | ||||||
| #undef Chimu_10 |  | ||||||
| #undef Chimu_11 |  | ||||||
| #undef Chimu_12 |  | ||||||
| #undef Chimu_20 |  | ||||||
| #undef Chimu_21 |  | ||||||
| #undef Chimu_22 |  | ||||||
| #undef Chimu_30 |  | ||||||
| #undef Chimu_31 |  | ||||||
| #undef Chimu_32 |  | ||||||
| #undef HAND_STENCIL_LEG |  | ||||||
| #undef HAND_STENCIL_LEG_INT |  | ||||||
| #undef HAND_STENCIL_LEG_EXT |  | ||||||
| #undef HAND_RESULT |  | ||||||
| #undef HAND_RESULT_INT |  | ||||||
| #undef HAND_RESULT_EXT |  | ||||||
|  |  | ||||||
| #define REGISTER |  | ||||||
|  |  | ||||||
| #ifdef GRID_SIMT |  | ||||||
| #define LOAD_CHIMU(ptype)		\ |  | ||||||
|   {const SiteSpinor & ref (in[offset]);	\ |  | ||||||
|     Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm);	\ |  | ||||||
|     Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm);	\ |  | ||||||
|     Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm);	\ |  | ||||||
|     Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm);	\ |  | ||||||
|     Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm);	\ |  | ||||||
|     Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm);	\ |  | ||||||
|     Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm);	\ |  | ||||||
|     Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm);	\ |  | ||||||
|     Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm);	\ |  | ||||||
|     Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm);	\ |  | ||||||
|     Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm);	\ |  | ||||||
|     Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm);	} |  | ||||||
|  |  | ||||||
| #define PERMUTE_DIR(dir) ; |  | ||||||
| #else |  | ||||||
| #define LOAD_CHIMU(ptype)		\ |  | ||||||
|   {const SiteSpinor & ref (in[offset]);	\ |  | ||||||
|     Chimu_00=coalescedRead(ref()(0)(0));	\ |  | ||||||
|     Chimu_01=coalescedRead(ref()(0)(1));	\ |  | ||||||
|     Chimu_02=coalescedRead(ref()(0)(2));	\ |  | ||||||
|     Chimu_10=coalescedRead(ref()(1)(0));	\ |  | ||||||
|     Chimu_11=coalescedRead(ref()(1)(1));	\ |  | ||||||
|     Chimu_12=coalescedRead(ref()(1)(2));	\ |  | ||||||
|     Chimu_20=coalescedRead(ref()(2)(0));	\ |  | ||||||
|     Chimu_21=coalescedRead(ref()(2)(1));	\ |  | ||||||
|     Chimu_22=coalescedRead(ref()(2)(2));	\ |  | ||||||
|     Chimu_30=coalescedRead(ref()(3)(0));	\ |  | ||||||
|     Chimu_31=coalescedRead(ref()(3)(1));	\ |  | ||||||
|     Chimu_32=coalescedRead(ref()(3)(2));	} |  | ||||||
|  |  | ||||||
| #define PERMUTE_DIR(dir)			\ |  | ||||||
|   permute##dir(Chi_00,Chi_00);	\ |  | ||||||
|       permute##dir(Chi_01,Chi_01);\ |  | ||||||
|       permute##dir(Chi_02,Chi_02);\ |  | ||||||
|       permute##dir(Chi_10,Chi_10);	\ |  | ||||||
|       permute##dir(Chi_11,Chi_11);\ |  | ||||||
|       permute##dir(Chi_12,Chi_12); |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
| #define MULT_2SPIN(A)\ |  | ||||||
|   {auto & ref(U[sU](A));					\ |  | ||||||
|   U_00=coalescedRead(ref()(0,0));				\ |  | ||||||
|   U_10=coalescedRead(ref()(1,0));					\ |  | ||||||
|   U_20=coalescedRead(ref()(2,0));					\ |  | ||||||
|   U_01=coalescedRead(ref()(0,1));					\ |  | ||||||
|   U_11=coalescedRead(ref()(1,1));					\ |  | ||||||
|   U_21=coalescedRead(ref()(2,1));					\ |  | ||||||
|     UChi_00 = U_00*Chi_00;					\ |  | ||||||
|     UChi_10 = U_00*Chi_10;					\ |  | ||||||
|     UChi_01 = U_10*Chi_00;					\ |  | ||||||
|     UChi_11 = U_10*Chi_10;					\ |  | ||||||
|     UChi_02 = U_20*Chi_00;					\ |  | ||||||
|     UChi_12 = U_20*Chi_10;					\ |  | ||||||
|     UChi_00+= U_01*Chi_01;					\ |  | ||||||
|     UChi_10+= U_01*Chi_11;					\ |  | ||||||
|     UChi_01+= U_11*Chi_01;					\ |  | ||||||
|     UChi_11+= U_11*Chi_11;					\ |  | ||||||
|     UChi_02+= U_21*Chi_01;					\ |  | ||||||
|     UChi_12+= U_21*Chi_11;					\ |  | ||||||
|     U_00=coalescedRead(ref()(0,2));				\ |  | ||||||
|     U_10=coalescedRead(ref()(1,2));				\ |  | ||||||
|     U_20=coalescedRead(ref()(2,2));				\ |  | ||||||
|     UChi_00+= U_00*Chi_02;					\ |  | ||||||
|     UChi_10+= U_00*Chi_12;					\ |  | ||||||
|     UChi_01+= U_10*Chi_02;					\ |  | ||||||
|     UChi_11+= U_10*Chi_12;					\ |  | ||||||
|     UChi_02+= U_20*Chi_02;					\ |  | ||||||
|     UChi_12+= U_20*Chi_12;} |  | ||||||
|  |  | ||||||
| #define LOAD_CHI				\ |  | ||||||
|   {const SiteHalfSpinor &ref(buf[offset]);	\ |  | ||||||
|     Chi_00 = coalescedRead(ref()(0)(0));	\ |  | ||||||
|     Chi_01 = coalescedRead(ref()(0)(1));	\ |  | ||||||
|     Chi_02 = coalescedRead(ref()(0)(2));	\ |  | ||||||
|     Chi_10 = coalescedRead(ref()(1)(0));	\ |  | ||||||
|     Chi_11 = coalescedRead(ref()(1)(1));	\ |  | ||||||
|     Chi_12 = coalescedRead(ref()(1)(2));} |  | ||||||
|  |  | ||||||
| //      hspin(0)=fspin(0)+timesI(fspin(3)); |  | ||||||
| //      hspin(1)=fspin(1)+timesI(fspin(2)); |  | ||||||
| #define XP_PROJ \ |  | ||||||
|     Chi_00 = Chimu_00+timesI(Chimu_30);\ |  | ||||||
|     Chi_01 = Chimu_01+timesI(Chimu_31);\ |  | ||||||
|     Chi_02 = Chimu_02+timesI(Chimu_32);\ |  | ||||||
|     Chi_10 = Chimu_10+timesI(Chimu_20);\ |  | ||||||
|     Chi_11 = Chimu_11+timesI(Chimu_21);\ |  | ||||||
|     Chi_12 = Chimu_12+timesI(Chimu_22); |  | ||||||
|  |  | ||||||
| #define YP_PROJ \ |  | ||||||
|     Chi_00 = Chimu_00-Chimu_30;\ |  | ||||||
|     Chi_01 = Chimu_01-Chimu_31;\ |  | ||||||
|     Chi_02 = Chimu_02-Chimu_32;\ |  | ||||||
|     Chi_10 = Chimu_10+Chimu_20;\ |  | ||||||
|     Chi_11 = Chimu_11+Chimu_21;\ |  | ||||||
|     Chi_12 = Chimu_12+Chimu_22; |  | ||||||
|  |  | ||||||
| #define ZP_PROJ \ |  | ||||||
|   Chi_00 = Chimu_00+timesI(Chimu_20);		\ |  | ||||||
|   Chi_01 = Chimu_01+timesI(Chimu_21);		\ |  | ||||||
|   Chi_02 = Chimu_02+timesI(Chimu_22);		\ |  | ||||||
|   Chi_10 = Chimu_10-timesI(Chimu_30);		\ |  | ||||||
|   Chi_11 = Chimu_11-timesI(Chimu_31);		\ |  | ||||||
|   Chi_12 = Chimu_12-timesI(Chimu_32); |  | ||||||
|  |  | ||||||
| #define TP_PROJ \ |  | ||||||
|   Chi_00 = Chimu_00+Chimu_20;		\ |  | ||||||
|   Chi_01 = Chimu_01+Chimu_21;		\ |  | ||||||
|   Chi_02 = Chimu_02+Chimu_22;		\ |  | ||||||
|   Chi_10 = Chimu_10+Chimu_30;		\ |  | ||||||
|   Chi_11 = Chimu_11+Chimu_31;		\ |  | ||||||
|   Chi_12 = Chimu_12+Chimu_32; |  | ||||||
|  |  | ||||||
|  |  | ||||||
| //      hspin(0)=fspin(0)-timesI(fspin(3)); |  | ||||||
| //      hspin(1)=fspin(1)-timesI(fspin(2)); |  | ||||||
| #define XM_PROJ \ |  | ||||||
|     Chi_00 = Chimu_00-timesI(Chimu_30);\ |  | ||||||
|     Chi_01 = Chimu_01-timesI(Chimu_31);\ |  | ||||||
|     Chi_02 = Chimu_02-timesI(Chimu_32);\ |  | ||||||
|     Chi_10 = Chimu_10-timesI(Chimu_20);\ |  | ||||||
|     Chi_11 = Chimu_11-timesI(Chimu_21);\ |  | ||||||
|     Chi_12 = Chimu_12-timesI(Chimu_22); |  | ||||||
|  |  | ||||||
| #define YM_PROJ \ |  | ||||||
|     Chi_00 = Chimu_00+Chimu_30;\ |  | ||||||
|     Chi_01 = Chimu_01+Chimu_31;\ |  | ||||||
|     Chi_02 = Chimu_02+Chimu_32;\ |  | ||||||
|     Chi_10 = Chimu_10-Chimu_20;\ |  | ||||||
|     Chi_11 = Chimu_11-Chimu_21;\ |  | ||||||
|     Chi_12 = Chimu_12-Chimu_22; |  | ||||||
|  |  | ||||||
| #define ZM_PROJ \ |  | ||||||
|   Chi_00 = Chimu_00-timesI(Chimu_20);		\ |  | ||||||
|   Chi_01 = Chimu_01-timesI(Chimu_21);		\ |  | ||||||
|   Chi_02 = Chimu_02-timesI(Chimu_22);		\ |  | ||||||
|   Chi_10 = Chimu_10+timesI(Chimu_30);		\ |  | ||||||
|   Chi_11 = Chimu_11+timesI(Chimu_31);		\ |  | ||||||
|   Chi_12 = Chimu_12+timesI(Chimu_32); |  | ||||||
|  |  | ||||||
| #define TM_PROJ \ |  | ||||||
|   Chi_00 = Chimu_00-Chimu_20;		\ |  | ||||||
|   Chi_01 = Chimu_01-Chimu_21;		\ |  | ||||||
|   Chi_02 = Chimu_02-Chimu_22;		\ |  | ||||||
|   Chi_10 = Chimu_10-Chimu_30;		\ |  | ||||||
|   Chi_11 = Chimu_11-Chimu_31;		\ |  | ||||||
|   Chi_12 = Chimu_12-Chimu_32; |  | ||||||
|  |  | ||||||
| //      fspin(0)=hspin(0); |  | ||||||
| //      fspin(1)=hspin(1); |  | ||||||
| //      fspin(2)=timesMinusI(hspin(1)); |  | ||||||
| //      fspin(3)=timesMinusI(hspin(0)); |  | ||||||
| #define XP_RECON\ |  | ||||||
|   result_00 = UChi_00;\ |  | ||||||
|   result_01 = UChi_01;\ |  | ||||||
|   result_02 = UChi_02;\ |  | ||||||
|   result_10 = UChi_10;\ |  | ||||||
|   result_11 = UChi_11;\ |  | ||||||
|   result_12 = UChi_12;\ |  | ||||||
|   result_20 = timesMinusI(UChi_10);\ |  | ||||||
|   result_21 = timesMinusI(UChi_11);\ |  | ||||||
|   result_22 = timesMinusI(UChi_12);\ |  | ||||||
|   result_30 = timesMinusI(UChi_00);\ |  | ||||||
|   result_31 = timesMinusI(UChi_01);\ |  | ||||||
|   result_32 = timesMinusI(UChi_02); |  | ||||||
|  |  | ||||||
| #define XP_RECON_ACCUM\ |  | ||||||
|   result_00+=UChi_00;\ |  | ||||||
|   result_01+=UChi_01;\ |  | ||||||
|   result_02+=UChi_02;\ |  | ||||||
|   result_10+=UChi_10;\ |  | ||||||
|   result_11+=UChi_11;\ |  | ||||||
|   result_12+=UChi_12;\ |  | ||||||
|   result_20-=timesI(UChi_10);\ |  | ||||||
|   result_21-=timesI(UChi_11);\ |  | ||||||
|   result_22-=timesI(UChi_12);\ |  | ||||||
|   result_30-=timesI(UChi_00);\ |  | ||||||
|   result_31-=timesI(UChi_01);\ |  | ||||||
|   result_32-=timesI(UChi_02); |  | ||||||
|  |  | ||||||
| #define XM_RECON\ |  | ||||||
|   result_00 = UChi_00;\ |  | ||||||
|   result_01 = UChi_01;\ |  | ||||||
|   result_02 = UChi_02;\ |  | ||||||
|   result_10 = UChi_10;\ |  | ||||||
|   result_11 = UChi_11;\ |  | ||||||
|   result_12 = UChi_12;\ |  | ||||||
|   result_20 = timesI(UChi_10);\ |  | ||||||
|   result_21 = timesI(UChi_11);\ |  | ||||||
|   result_22 = timesI(UChi_12);\ |  | ||||||
|   result_30 = timesI(UChi_00);\ |  | ||||||
|   result_31 = timesI(UChi_01);\ |  | ||||||
|   result_32 = timesI(UChi_02); |  | ||||||
|  |  | ||||||
| #define XM_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20+= timesI(UChi_10);\ |  | ||||||
|   result_21+= timesI(UChi_11);\ |  | ||||||
|   result_22+= timesI(UChi_12);\ |  | ||||||
|   result_30+= timesI(UChi_00);\ |  | ||||||
|   result_31+= timesI(UChi_01);\ |  | ||||||
|   result_32+= timesI(UChi_02); |  | ||||||
|  |  | ||||||
| #define YP_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20+= UChi_10;\ |  | ||||||
|   result_21+= UChi_11;\ |  | ||||||
|   result_22+= UChi_12;\ |  | ||||||
|   result_30-= UChi_00;\ |  | ||||||
|   result_31-= UChi_01;\ |  | ||||||
|   result_32-= UChi_02; |  | ||||||
|  |  | ||||||
| #define YM_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20-= UChi_10;\ |  | ||||||
|   result_21-= UChi_11;\ |  | ||||||
|   result_22-= UChi_12;\ |  | ||||||
|   result_30+= UChi_00;\ |  | ||||||
|   result_31+= UChi_01;\ |  | ||||||
|   result_32+= UChi_02; |  | ||||||
|  |  | ||||||
| #define ZP_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20-= timesI(UChi_00);			\ |  | ||||||
|   result_21-= timesI(UChi_01);			\ |  | ||||||
|   result_22-= timesI(UChi_02);			\ |  | ||||||
|   result_30+= timesI(UChi_10);			\ |  | ||||||
|   result_31+= timesI(UChi_11);			\ |  | ||||||
|   result_32+= timesI(UChi_12); |  | ||||||
|  |  | ||||||
| #define ZM_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20+= timesI(UChi_00);			\ |  | ||||||
|   result_21+= timesI(UChi_01);			\ |  | ||||||
|   result_22+= timesI(UChi_02);			\ |  | ||||||
|   result_30-= timesI(UChi_10);			\ |  | ||||||
|   result_31-= timesI(UChi_11);			\ |  | ||||||
|   result_32-= timesI(UChi_12); |  | ||||||
|  |  | ||||||
| #define TP_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20+= UChi_00;			\ |  | ||||||
|   result_21+= UChi_01;			\ |  | ||||||
|   result_22+= UChi_02;			\ |  | ||||||
|   result_30+= UChi_10;			\ |  | ||||||
|   result_31+= UChi_11;			\ |  | ||||||
|   result_32+= UChi_12; |  | ||||||
|  |  | ||||||
| #define TM_RECON_ACCUM\ |  | ||||||
|   result_00+= UChi_00;\ |  | ||||||
|   result_01+= UChi_01;\ |  | ||||||
|   result_02+= UChi_02;\ |  | ||||||
|   result_10+= UChi_10;\ |  | ||||||
|   result_11+= UChi_11;\ |  | ||||||
|   result_12+= UChi_12;\ |  | ||||||
|   result_20-= UChi_00;	\ |  | ||||||
|   result_21-= UChi_01;	\ |  | ||||||
|   result_22-= UChi_02;	\ |  | ||||||
|   result_30-= UChi_10;	\ |  | ||||||
|   result_31-= UChi_11;	\ |  | ||||||
|   result_32-= UChi_12; |  | ||||||
|  |  | ||||||
| #define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON)	\ |  | ||||||
|   SE=&st_p[DIR+8*ss];			\ |  | ||||||
|   ptype=st_perm[DIR];			\ |  | ||||||
|   offset = SE->_offset;				\ |  | ||||||
|   local  = SE->_is_local;			\ |  | ||||||
|   perm   = SE->_permute;			\ |  | ||||||
|   if ( local ) {				\ |  | ||||||
|     LOAD_CHIMU(PERM);				\ |  | ||||||
|     PROJ;					\ |  | ||||||
|     if ( perm) {				\ |  | ||||||
|       PERMUTE_DIR(PERM);			\ |  | ||||||
|     }						\ |  | ||||||
|   } else {					\ |  | ||||||
|     LOAD_CHI;					\ |  | ||||||
|   }						\ |  | ||||||
|   MULT_2SPIN(DIR);				\ |  | ||||||
|   RECON;					 |  | ||||||
|  |  | ||||||
| #define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON)	\ |  | ||||||
|   SE=&st_p[DIR+8*ss];			\ |  | ||||||
|   ptype=st_perm[DIR];			\ |  | ||||||
|   offset = SE->_offset;				\ |  | ||||||
|   local  = SE->_is_local;			\ |  | ||||||
|   perm   = SE->_permute;			\ |  | ||||||
|   LOAD_CHIMU(PERM);				\ |  | ||||||
|   PROJ;						\ |  | ||||||
|   MULT_2SPIN(DIR);				\ |  | ||||||
|   RECON;					 |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON)	\ |  | ||||||
|   SE=&st_p[DIR+8*ss];					\ |  | ||||||
|   ptype=st_perm[DIR];					\ |  | ||||||
|   offset = SE->_offset;				\ |  | ||||||
|   local  = SE->_is_local;			\ |  | ||||||
|   perm   = SE->_permute;			\ |  | ||||||
|   if ( local ) {				\ |  | ||||||
|     LOAD_CHIMU;					\ |  | ||||||
|     PROJ;					\ |  | ||||||
|     if ( perm) {				\ |  | ||||||
|       PERMUTE_DIR(PERM);			\ |  | ||||||
|     }						\ |  | ||||||
|   } else if ( st.same_node[DIR] ) {		\ |  | ||||||
|     LOAD_CHI;					\ |  | ||||||
|   }						\ |  | ||||||
|   if (local || st.same_node[DIR] ) {		\ |  | ||||||
|     MULT_2SPIN(DIR);				\ |  | ||||||
|     RECON;					\ |  | ||||||
|   } |  | ||||||
|  |  | ||||||
| #define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON)	\ |  | ||||||
|   SE=st.GetEntry(ptype,DIR,ss);			\ |  | ||||||
|   offset = SE->_offset;				\ |  | ||||||
|   if((!SE->_is_local)&&(!st.same_node[DIR]) ) {	\ |  | ||||||
|     LOAD_CHI;					\ |  | ||||||
|     MULT_2SPIN(DIR);				\ |  | ||||||
|     RECON;					\ |  | ||||||
|     nmu++;					\ |  | ||||||
|   } |  | ||||||
|  |  | ||||||
| #define HAND_RESULT(ss)				\ |  | ||||||
|   {						\ |  | ||||||
|     SiteSpinor & ref (out[ss]);			\ |  | ||||||
|     coalescedWrite(ref()(0)(0),result_00);		\ |  | ||||||
|     coalescedWrite(ref()(0)(1),result_01);		\ |  | ||||||
|     coalescedWrite(ref()(0)(2),result_02);		\ |  | ||||||
|     coalescedWrite(ref()(1)(0),result_10);		\ |  | ||||||
|     coalescedWrite(ref()(1)(1),result_11);		\ |  | ||||||
|     coalescedWrite(ref()(1)(2),result_12);		\ |  | ||||||
|     coalescedWrite(ref()(2)(0),result_20);		\ |  | ||||||
|     coalescedWrite(ref()(2)(1),result_21);		\ |  | ||||||
|     coalescedWrite(ref()(2)(2),result_22);		\ |  | ||||||
|     coalescedWrite(ref()(3)(0),result_30);		\ |  | ||||||
|     coalescedWrite(ref()(3)(1),result_31);		\ |  | ||||||
|     coalescedWrite(ref()(3)(2),result_32);		\ |  | ||||||
|   } |  | ||||||
|  |  | ||||||
| #define HAND_RESULT_EXT(ss)			\ |  | ||||||
|   if (nmu){					\ |  | ||||||
|     SiteSpinor & ref (out[ss]);		\ |  | ||||||
|     ref()(0)(0)+=result_00;		\ |  | ||||||
|     ref()(0)(1)+=result_01;		\ |  | ||||||
|     ref()(0)(2)+=result_02;		\ |  | ||||||
|     ref()(1)(0)+=result_10;		\ |  | ||||||
|     ref()(1)(1)+=result_11;		\ |  | ||||||
|     ref()(1)(2)+=result_12;		\ |  | ||||||
|     ref()(2)(0)+=result_20;		\ |  | ||||||
|     ref()(2)(1)+=result_21;		\ |  | ||||||
|     ref()(2)(2)+=result_22;		\ |  | ||||||
|     ref()(3)(0)+=result_30;		\ |  | ||||||
|     ref()(3)(1)+=result_31;		\ |  | ||||||
|     ref()(3)(2)+=result_32;		\ |  | ||||||
|   } |  | ||||||
|  |  | ||||||
| #define HAND_DECLARATIONS(Simd)			\ |  | ||||||
|   Simd result_00;				\ |  | ||||||
|   Simd result_01;				\ |  | ||||||
|   Simd result_02;				\ |  | ||||||
|   Simd result_10;				\ |  | ||||||
|   Simd result_11;				\ |  | ||||||
|   Simd result_12;				\ |  | ||||||
|   Simd result_20;				\ |  | ||||||
|   Simd result_21;				\ |  | ||||||
|   Simd result_22;				\ |  | ||||||
|   Simd result_30;				\ |  | ||||||
|   Simd result_31;				\ |  | ||||||
|   Simd result_32;				\ |  | ||||||
|   Simd Chi_00;					\ |  | ||||||
|   Simd Chi_01;					\ |  | ||||||
|   Simd Chi_02;					\ |  | ||||||
|   Simd Chi_10;					\ |  | ||||||
|   Simd Chi_11;					\ |  | ||||||
|   Simd Chi_12;					\ |  | ||||||
|   Simd UChi_00;					\ |  | ||||||
|   Simd UChi_01;					\ |  | ||||||
|   Simd UChi_02;					\ |  | ||||||
|   Simd UChi_10;					\ |  | ||||||
|   Simd UChi_11;					\ |  | ||||||
|   Simd UChi_12;					\ |  | ||||||
|   Simd U_00;					\ |  | ||||||
|   Simd U_10;					\ |  | ||||||
|   Simd U_20;					\ |  | ||||||
|   Simd U_01;					\ |  | ||||||
|   Simd U_11;					\ |  | ||||||
|   Simd U_21; |  | ||||||
|  |  | ||||||
| #define ZERO_RESULT				\ |  | ||||||
|   result_00=Zero();				\ |  | ||||||
|   result_01=Zero();				\ |  | ||||||
|   result_02=Zero();				\ |  | ||||||
|   result_10=Zero();				\ |  | ||||||
|   result_11=Zero();				\ |  | ||||||
|   result_12=Zero();				\ |  | ||||||
|   result_20=Zero();				\ |  | ||||||
|   result_21=Zero();				\ |  | ||||||
|   result_22=Zero();				\ |  | ||||||
|   result_30=Zero();				\ |  | ||||||
|   result_31=Zero();				\ |  | ||||||
|   result_32=Zero();			 |  | ||||||
|  |  | ||||||
| #define Chimu_00 Chi_00 |  | ||||||
| #define Chimu_01 Chi_01 |  | ||||||
| #define Chimu_02 Chi_02 |  | ||||||
| #define Chimu_10 Chi_10 |  | ||||||
| #define Chimu_11 Chi_11 |  | ||||||
| #define Chimu_12 Chi_12 |  | ||||||
| #define Chimu_20 UChi_00 |  | ||||||
| #define Chimu_21 UChi_01 |  | ||||||
| #define Chimu_22 UChi_02 |  | ||||||
| #define Chimu_30 UChi_10 |  | ||||||
| #define Chimu_31 UChi_11 |  | ||||||
| #define Chimu_32 UChi_12 |  | ||||||
|  |  | ||||||
| NAMESPACE_BEGIN(Grid); |  | ||||||
|  |  | ||||||
| template<class Impl> accelerator_inline void  |  | ||||||
| WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p, SiteDoubledGaugeField *U,SiteHalfSpinor  *buf, |  | ||||||
| 				      int ss,int sU,const SiteSpinor *in, SiteSpinor *out) |  | ||||||
| { |  | ||||||
| // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc... |  | ||||||
|   typedef typename Simd::scalar_type S; |  | ||||||
|   typedef typename Simd::vector_type V; |  | ||||||
|   typedef iSinglet<Simd> vCplx; |  | ||||||
|   //  typedef decltype( coalescedRead( vCplx()()() )) Simt; |  | ||||||
|   typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt; |  | ||||||
|  |  | ||||||
|   HAND_DECLARATIONS(Simt); |  | ||||||
|  |  | ||||||
|   int offset,local,perm, ptype; |  | ||||||
|   StencilEntry *SE; |  | ||||||
|   HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON); |  | ||||||
|   HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM); |  | ||||||
|   HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM); |  | ||||||
|   HAND_RESULT(ss); |  | ||||||
| } |  | ||||||
|  |  | ||||||
| ////////////// Wilson ; uses this implementation ///////////////////// |  | ||||||
|  |  | ||||||
| NAMESPACE_END(Grid); |  | ||||||
| #undef LOAD_CHIMU   |  | ||||||
| #undef LOAD_CHI  |  | ||||||
| #undef MULT_2SPIN |  | ||||||
| #undef PERMUTE_DIR |  | ||||||
| #undef XP_PROJ   |  | ||||||
| #undef YP_PROJ   |  | ||||||
| #undef ZP_PROJ   |  | ||||||
| #undef TP_PROJ   |  | ||||||
| #undef XM_PROJ   |  | ||||||
| #undef YM_PROJ   |  | ||||||
| #undef ZM_PROJ   |  | ||||||
| #undef TM_PROJ   |  | ||||||
| #undef XP_RECON  |  | ||||||
| #undef XP_RECON_ACCUM  |  | ||||||
| #undef XM_RECON  |  | ||||||
| #undef XM_RECON_ACCUM  |  | ||||||
| #undef YP_RECON_ACCUM  |  | ||||||
| #undef YM_RECON_ACCUM  |  | ||||||
| #undef ZP_RECON_ACCUM  |  | ||||||
| #undef ZM_RECON_ACCUM  |  | ||||||
| #undef TP_RECON_ACCUM  |  | ||||||
| #undef TM_RECON_ACCUM  |  | ||||||
| #undef ZERO_RESULT				  |  | ||||||
| #undef Chimu_00 |  | ||||||
| #undef Chimu_01 |  | ||||||
| #undef Chimu_02 |  | ||||||
| #undef Chimu_10 |  | ||||||
| #undef Chimu_11 |  | ||||||
| #undef Chimu_12 |  | ||||||
| #undef Chimu_20 |  | ||||||
| #undef Chimu_21 |  | ||||||
| #undef Chimu_22 |  | ||||||
| #undef Chimu_30 |  | ||||||
| #undef Chimu_31 |  | ||||||
| #undef Chimu_32 |  | ||||||
| #undef HAND_STENCIL_LEG |  | ||||||
| #undef HAND_STENCIL_LEG_INT |  | ||||||
| #undef HAND_STENCIL_LEG_EXT |  | ||||||
| #undef HAND_RESULT |  | ||||||
| #undef HAND_RESULT_INT |  | ||||||
| #undef HAND_RESULT_EXT |  | ||||||
| #undef HAND_DECLARATIONS |  | ||||||
| @@ -115,9 +115,9 @@ accelerator_inline void get_stencil(StencilEntry * mem, StencilEntry &chip) | |||||||
|   // All legs kernels ; comms then compute |   // All legs kernels ; comms then compute | ||||||
|   //////////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////////// | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSiteDag(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, | ||||||
|  					     SiteHalfSpinor *buf, int sF, | 					     SiteHalfSpinor *buf, int sF, | ||||||
| 					     int sU, const FermionFieldView &in, const FermionFieldView &out) | 					     int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0]))   calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0]))   calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0])) calcSpinor; |   typedef decltype(coalescedRead(in[0])) calcSpinor; | ||||||
| @@ -141,9 +141,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDag(const StencilView &st, const Double | |||||||
| }; | }; | ||||||
|  |  | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSite(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSite(StencilView &st, DoubledGaugeFieldView &U, | ||||||
|  					  SiteHalfSpinor *buf, int sF, | 					  SiteHalfSpinor *buf, int sF, | ||||||
| 					  int sU, const FermionFieldView &in, const FermionFieldView &out) | 					  int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -170,9 +170,9 @@ void WilsonKernels<Impl>::GenericDhopSite(const StencilView &st, const DoubledGa | |||||||
|   // Interior kernels |   // Interior kernels | ||||||
|   //////////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////////// | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSiteDagInt(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSiteDagInt(StencilView &st,  DoubledGaugeFieldView &U, | ||||||
|  						SiteHalfSpinor *buf, int sF, | 						SiteHalfSpinor *buf, int sF, | ||||||
| 						int sU, const FermionFieldView &in, const FermionFieldView &out) | 						int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -198,9 +198,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDagInt(const StencilView &st, const Dou | |||||||
| }; | }; | ||||||
|  |  | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSiteInt(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSiteInt(StencilView &st,  DoubledGaugeFieldView &U, | ||||||
| 					     SiteHalfSpinor *buf, int sF, | 							 SiteHalfSpinor *buf, int sF, | ||||||
| 					     int sU, const FermionFieldView &in, const FermionFieldView &out) | 							 int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -228,9 +228,9 @@ void WilsonKernels<Impl>::GenericDhopSiteInt(const StencilView &st, const Double | |||||||
| // Exterior kernels | // Exterior kernels | ||||||
| //////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////// | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSiteDagExt(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSiteDagExt(StencilView &st,  DoubledGaugeFieldView &U, | ||||||
|  						SiteHalfSpinor *buf, int sF, | 						SiteHalfSpinor *buf, int sF, | ||||||
| 						int sU, const FermionFieldView &in, const FermionFieldView &out) | 						int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -259,9 +259,9 @@ void WilsonKernels<Impl>::GenericDhopSiteDagExt(const StencilView &st, const Dou | |||||||
| }; | }; | ||||||
|  |  | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::GenericDhopSiteExt(const StencilView &st, const DoubledGaugeFieldView &U, | void WilsonKernels<Impl>::GenericDhopSiteExt(StencilView &st,  DoubledGaugeFieldView &U, | ||||||
|  					     SiteHalfSpinor *buf, int sF, | 					     SiteHalfSpinor *buf, int sF, | ||||||
| 					     int sU, const FermionFieldView &in, const FermionFieldView &out) | 					     int sU, const FermionFieldView &in, FermionFieldView &out) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -291,8 +291,8 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(const StencilView &st, const Double | |||||||
|  |  | ||||||
| #define DhopDirMacro(Dir,spProj,spRecon)	\ | #define DhopDirMacro(Dir,spProj,spRecon)	\ | ||||||
|   template <class Impl> accelerator_inline				\ |   template <class Impl> accelerator_inline				\ | ||||||
|   void WilsonKernels<Impl>::DhopDir##Dir(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \ |   void WilsonKernels<Impl>::DhopDir##Dir(StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, \ | ||||||
| 					 int sU, const FermionFieldView &in, const FermionFieldView &out, int dir) \ | 					 int sU, const FermionFieldView &in, FermionFieldView &out, int dir) \ | ||||||
|   {									\ |   {									\ | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;		\ |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor;		\ | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor;			\ |   typedef decltype(coalescedRead(in[0]))  calcSpinor;			\ | ||||||
| @@ -302,8 +302,8 @@ void WilsonKernels<Impl>::GenericDhopSiteExt(const StencilView &st, const Double | |||||||
|   StencilEntry *SE;							\ |   StencilEntry *SE;							\ | ||||||
|   int ptype;								\ |   int ptype;								\ | ||||||
|   const int Nsimd = SiteHalfSpinor::Nsimd();				\ |   const int Nsimd = SiteHalfSpinor::Nsimd();				\ | ||||||
|   const int lane=acceleratorSIMTlane(Nsimd);				\ |   const int lane=acceleratorSIMTlane(Nsimd);					\ | ||||||
|   									\ | 									\ | ||||||
|   SE = st.GetEntry(ptype, dir, sF);					\ |   SE = st.GetEntry(ptype, dir, sF);					\ | ||||||
|   GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,spRecon);				\ |   GENERIC_DHOPDIR_LEG_BODY(Dir,spProj,spRecon);				\ | ||||||
|   coalescedWrite(out[sF], result,lane);					\ |   coalescedWrite(out[sF], result,lane);					\ | ||||||
| @@ -319,8 +319,8 @@ DhopDirMacro(Zm,spProjZm,spReconZm); | |||||||
| DhopDirMacro(Tm,spProjTm,spReconTm); | DhopDirMacro(Tm,spProjTm,spReconTm); | ||||||
|  |  | ||||||
| template <class Impl> accelerator_inline | template <class Impl> accelerator_inline | ||||||
| void WilsonKernels<Impl>::DhopDirK(const StencilView &st, const DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, | void WilsonKernels<Impl>::DhopDirK( StencilView &st, DoubledGaugeFieldView &U,SiteHalfSpinor *buf, int sF, | ||||||
| 				   int sU, const FermionFieldView &in, const FermionFieldView &out, int dir, int gamma) | 				    int sU, const FermionFieldView &in, FermionFieldView &out, int dir, int gamma) | ||||||
| { | { | ||||||
|   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; |   typedef decltype(coalescedRead(buf[0])) calcHalfSpinor; | ||||||
|   typedef decltype(coalescedRead(in[0]))  calcSpinor; |   typedef decltype(coalescedRead(in[0]))  calcSpinor; | ||||||
| @@ -345,8 +345,8 @@ void WilsonKernels<Impl>::DhopDirK(const StencilView &st, const DoubledGaugeFiel | |||||||
| } | } | ||||||
|  |  | ||||||
| template <class Impl> | template <class Impl> | ||||||
| void WilsonKernels<Impl>::DhopDirAll(StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls, | void WilsonKernels<Impl>::DhopDirAll( StencilImpl &st, DoubledGaugeField &U,SiteHalfSpinor *buf, int Ls, | ||||||
| 				     int Nsite, const FermionField &in, std::vector<FermionField> &out) | 				      int Nsite, const FermionField &in, std::vector<FermionField> &out) | ||||||
| { | { | ||||||
|    autoView(U_v  ,U,AcceleratorRead); |    autoView(U_v  ,U,AcceleratorRead); | ||||||
|    autoView(in_v ,in,AcceleratorRead); |    autoView(in_v ,in,AcceleratorRead); | ||||||
| @@ -424,20 +424,6 @@ void WilsonKernels<Impl>::DhopDirKernel( StencilImpl &st, DoubledGaugeField &U,S | |||||||
|       WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v);		\ |       WilsonKernels<Impl>::A(st_v,U_v,buf,sF,sU,in_v,out_v);		\ | ||||||
|   }); |   }); | ||||||
|  |  | ||||||
| #define KERNEL_CALL_TMP(A) \ |  | ||||||
|   const uint64_t    NN = Nsite*Ls;					\ |  | ||||||
|   auto U_p = & U_v[0];							\ |  | ||||||
|   auto in_p = & in_v[0];						\ |  | ||||||
|   auto out_p = & out_v[0];						\ |  | ||||||
|   auto st_p = st_v._entries_p;						\ |  | ||||||
|   auto st_perm = st_v._permute_type;					\ |  | ||||||
|   accelerator_forNB( ss, NN, Simd::Nsimd(), {				\ |  | ||||||
|       int sF = ss;							\ |  | ||||||
|       int sU = ss/Ls;							\ |  | ||||||
|       WilsonKernels<Impl>::A(st_perm,st_p,U_p,buf,sF,sU,in_p,out_p);	\ |  | ||||||
|     });									\ |  | ||||||
|   accelerator_barrier(); |  | ||||||
|  |  | ||||||
| #define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier(); | #define KERNEL_CALL(A) KERNEL_CALLNB(A); accelerator_barrier(); | ||||||
|  |  | ||||||
| #define ASM_CALL(A)							\ | #define ASM_CALL(A)							\ | ||||||
| @@ -460,8 +446,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st,  DoubledGaugeField | |||||||
|    if( interior && exterior ) { |    if( interior && exterior ) { | ||||||
|      if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSite); return;} |      if (Opt == WilsonKernelsStatic::OptGeneric    ) { KERNEL_CALL(GenericDhopSite); return;} | ||||||
| #ifndef GRID_CUDA | #ifndef GRID_CUDA | ||||||
|      if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_TMP(HandDhopSiteSycl);    return; } |      if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;} | ||||||
|      //     if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite);    return;} |  | ||||||
|      if (Opt == WilsonKernelsStatic::OptInlineAsm  ) {  ASM_CALL(AsmDhopSite);    return;} |      if (Opt == WilsonKernelsStatic::OptInlineAsm  ) {  ASM_CALL(AsmDhopSite);    return;} | ||||||
| #endif | #endif | ||||||
|    } else if( interior ) { |    } else if( interior ) { | ||||||
|   | |||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -32,7 +32,6 @@ directory | |||||||
| #include <Grid/qcd/action/fermion/FermionCore.h> | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
| #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
| #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
| #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementationSycl.h> |  | ||||||
|  |  | ||||||
| #ifndef AVX512 | #ifndef AVX512 | ||||||
| #ifndef QPX | #ifndef QPX | ||||||
|   | |||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1 +0,0 @@ | |||||||
| ../WilsonKernelsInstantiation.cc.master |  | ||||||
| @@ -0,0 +1,51 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  | Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  | Source file: ./lib/qcd/action/fermion/WilsonKernels.cc | ||||||
|  |  | ||||||
|  | Copyright (C) 2015, 2020 | ||||||
|  |  | ||||||
|  | Author: Peter Boyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> | ||||||
|  | Author: paboyle <paboyle@ph.ed.ac.uk> | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> Regensburg University | ||||||
|  |  | ||||||
|  | This program is free software; you can redistribute it and/or modify | ||||||
|  | it under the terms of the GNU General Public License as published by | ||||||
|  | the Free Software Foundation; either version 2 of the License, or | ||||||
|  | (at your option) any later version. | ||||||
|  |  | ||||||
|  | This program is distributed in the hope that it will be useful, | ||||||
|  | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  | GNU General Public License for more details. | ||||||
|  |  | ||||||
|  | You should have received a copy of the GNU General Public License along | ||||||
|  | with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  | See the full license in the file "LICENSE" in the top level distribution | ||||||
|  | directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #include <Grid/qcd/action/fermion/FermionCore.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h> | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h> | ||||||
|  |  | ||||||
|  | #ifndef AVX512 | ||||||
|  | #ifndef QPX | ||||||
|  | #ifndef A64FX | ||||||
|  | #ifndef A64FXFIXEDSIZE | ||||||
|  | #include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h> | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  | #endif | ||||||
|  |  | ||||||
|  | NAMESPACE_BEGIN(Grid); | ||||||
|  |  | ||||||
|  | #include "impl.h" | ||||||
|  | template class WilsonKernels<IMPLEMENTATION>; | ||||||
|  |  | ||||||
|  | NAMESPACE_END(Grid); | ||||||
| @@ -1,38 +0,0 @@ | |||||||
| /************************************************************************************* |  | ||||||
|  |  | ||||||
| Grid physics library, www.github.com/paboyle/Grid |  | ||||||
|  |  | ||||||
| Source file: ./lib/qcd/action/gauge/Gauge.cc |  | ||||||
|  |  | ||||||
| Copyright (C) 2020 |  | ||||||
|  |  | ||||||
| Author: Peter Boyle <paboyle@ph.ed.ac.uk> |  | ||||||
| Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local> |  | ||||||
| Author: paboyle <paboyle@ph.ed.ac.uk> |  | ||||||
|  |  | ||||||
| This program is free software; you can redistribute it and/or modify |  | ||||||
| it under the terms of the GNU General Public License as published by |  | ||||||
| the Free Software Foundation; either version 2 of the License, or |  | ||||||
| (at your option) any later version. |  | ||||||
|  |  | ||||||
| This program is distributed in the hope that it will be useful, |  | ||||||
| but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| GNU General Public License for more details. |  | ||||||
|  |  | ||||||
| You should have received a copy of the GNU General Public License along |  | ||||||
| with this program; if not, write to the Free Software Foundation, Inc., |  | ||||||
| 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |  | ||||||
|  |  | ||||||
| See the full license in the file "LICENSE" in the top level distribution |  | ||||||
| directory |  | ||||||
| *************************************************************************************/ |  | ||||||
| /*  END LEGAL */ |  | ||||||
| #include <Grid/qcd/action/fermion/FermionCore.h> |  | ||||||
|  |  | ||||||
| NAMESPACE_BEGIN(Grid); |  | ||||||
|  |  | ||||||
| std::vector<int> ConjugateGaugeImplBase::_conjDirs; |  | ||||||
|  |  | ||||||
| NAMESPACE_END(Grid); |  | ||||||
|  |  | ||||||
| @@ -154,10 +154,6 @@ public: | |||||||
|     return Hsum.real(); |     return Hsum.real(); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline void Project(Field &U) { |  | ||||||
|     ProjectSUn(U); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { |   static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { | ||||||
|     SU<Nc>::HotConfiguration(pRNG, U); |     SU<Nc>::HotConfiguration(pRNG, U); | ||||||
|   } |   } | ||||||
|   | |||||||
| @@ -59,14 +59,14 @@ public: | |||||||
|   } |   } | ||||||
|   static inline GaugeLinkField |   static inline GaugeLinkField | ||||||
|   CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) { |   CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) { | ||||||
|     return PeriodicBC::CovShiftIdentityBackward(Link, mu); |     return Cshift(adj(Link), mu, -1); | ||||||
|   } |   } | ||||||
|   static inline GaugeLinkField |   static inline GaugeLinkField | ||||||
|   CovShiftIdentityForward(const GaugeLinkField &Link, int mu) { |   CovShiftIdentityForward(const GaugeLinkField &Link, int mu) { | ||||||
|     return PeriodicBC::CovShiftIdentityForward(Link,mu); |     return Link; | ||||||
|   } |   } | ||||||
|   static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) { |   static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) { | ||||||
|     return PeriodicBC::ShiftStaple(Link,mu); |     return Cshift(Link, mu, 1); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline bool isPeriodicGaugeField(void) { return true; } |   static inline bool isPeriodicGaugeField(void) { return true; } | ||||||
| @@ -74,13 +74,7 @@ public: | |||||||
|  |  | ||||||
| // Composition with smeared link, bc's etc.. probably need multiple inheritance | // Composition with smeared link, bc's etc.. probably need multiple inheritance | ||||||
| // Variable precision "S" and variable Nc | // Variable precision "S" and variable Nc | ||||||
| class ConjugateGaugeImplBase { | template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes { | ||||||
| protected: |  | ||||||
|   static std::vector<int> _conjDirs; |  | ||||||
| }; |  | ||||||
|  |  | ||||||
|   template <class GimplTypes> class ConjugateGaugeImpl : public GimplTypes, ConjugateGaugeImplBase { |  | ||||||
| private: |  | ||||||
| public: | public: | ||||||
|   INHERIT_GIMPL_TYPES(GimplTypes); |   INHERIT_GIMPL_TYPES(GimplTypes); | ||||||
|  |  | ||||||
| @@ -90,56 +84,47 @@ public: | |||||||
|   //////////////////////////////////////////////////////////////////////////////////////////////////////////// |   //////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||||
|   template <class covariant> |   template <class covariant> | ||||||
|   static Lattice<covariant> CovShiftForward(const GaugeLinkField &Link, int mu, |   static Lattice<covariant> CovShiftForward(const GaugeLinkField &Link, int mu, | ||||||
|                                             const Lattice<covariant> &field) |                                             const Lattice<covariant> &field) { | ||||||
|   { |     return ConjugateBC::CovShiftForward(Link, mu, field); | ||||||
|     assert(_conjDirs.size() == Nd); |  | ||||||
|     if(_conjDirs[mu])  |  | ||||||
|       return ConjugateBC::CovShiftForward(Link, mu, field); |  | ||||||
|     else |  | ||||||
|       return PeriodicBC::CovShiftForward(Link, mu, field); |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   template <class covariant> |   template <class covariant> | ||||||
|   static Lattice<covariant> CovShiftBackward(const GaugeLinkField &Link, int mu, |   static Lattice<covariant> CovShiftBackward(const GaugeLinkField &Link, int mu, | ||||||
|                                              const Lattice<covariant> &field) |                                              const Lattice<covariant> &field) { | ||||||
|   { |     return ConjugateBC::CovShiftBackward(Link, mu, field); | ||||||
|     assert(_conjDirs.size() == Nd); |  | ||||||
|     if(_conjDirs[mu])  |  | ||||||
|       return ConjugateBC::CovShiftBackward(Link, mu, field); |  | ||||||
|     else  |  | ||||||
|       return PeriodicBC::CovShiftBackward(Link, mu, field); |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline GaugeLinkField |   static inline GaugeLinkField | ||||||
|   CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) |   CovShiftIdentityBackward(const GaugeLinkField &Link, int mu) { | ||||||
|   { |     GridBase *grid = Link.Grid(); | ||||||
|     assert(_conjDirs.size() == Nd); |     int Lmu = grid->GlobalDimensions()[mu] - 1; | ||||||
|     if(_conjDirs[mu])  |  | ||||||
|       return ConjugateBC::CovShiftIdentityBackward(Link, mu); |     Lattice<iScalar<vInteger>> coor(grid); | ||||||
|     else  |     LatticeCoordinate(coor, mu); | ||||||
|       return PeriodicBC::CovShiftIdentityBackward(Link, mu); |  | ||||||
|  |     GaugeLinkField tmp(grid); | ||||||
|  |     tmp = adj(Link); | ||||||
|  |     tmp = where(coor == Lmu, conjugate(tmp), tmp); | ||||||
|  |     return Cshift(tmp, mu, -1); // moves towards positive mu | ||||||
|   } |   } | ||||||
|   static inline GaugeLinkField |   static inline GaugeLinkField | ||||||
|   CovShiftIdentityForward(const GaugeLinkField &Link, int mu) |   CovShiftIdentityForward(const GaugeLinkField &Link, int mu) { | ||||||
|   { |     return Link; | ||||||
|     assert(_conjDirs.size() == Nd); |  | ||||||
|     if(_conjDirs[mu])  |  | ||||||
|       return ConjugateBC::CovShiftIdentityForward(Link,mu); |  | ||||||
|     else |  | ||||||
|       return PeriodicBC::CovShiftIdentityForward(Link,mu); |  | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) |   static inline GaugeLinkField ShiftStaple(const GaugeLinkField &Link, int mu) { | ||||||
|   { |     GridBase *grid = Link.Grid(); | ||||||
|     assert(_conjDirs.size() == Nd); |     int Lmu = grid->GlobalDimensions()[mu] - 1; | ||||||
|     if(_conjDirs[mu])  |  | ||||||
|       return ConjugateBC::ShiftStaple(Link,mu); |     Lattice<iScalar<vInteger>> coor(grid); | ||||||
|     else      |     LatticeCoordinate(coor, mu); | ||||||
|       return PeriodicBC::ShiftStaple(Link,mu); |  | ||||||
|  |     GaugeLinkField tmp(grid); | ||||||
|  |     tmp = Cshift(Link, mu, 1); | ||||||
|  |     tmp = where(coor == Lmu, conjugate(tmp), tmp); | ||||||
|  |     return tmp; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline void       setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; } |  | ||||||
|   static inline std::vector<int> getDirections(void) { return _conjDirs; } |  | ||||||
|   static inline bool isPeriodicGaugeField(void) { return false; } |   static inline bool isPeriodicGaugeField(void) { return false; } | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -54,10 +54,6 @@ public: | |||||||
|   static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) { |   static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) { | ||||||
|     U = 1.0; |     U = 1.0; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline void Project(Field &U) { |  | ||||||
|     return; |  | ||||||
|   } |  | ||||||
|      |      | ||||||
|   static void MomentumSpacePropagator(Field &out, RealD m) |   static void MomentumSpacePropagator(Field &out, RealD m) | ||||||
|   { |   { | ||||||
| @@ -238,10 +234,6 @@ public: | |||||||
| #endif //USE_FFT_ACCELERATION | #endif //USE_FFT_ACCELERATION | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   static inline void Project(Field &U) { |  | ||||||
|     return; |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { |   static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { | ||||||
|     Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U); |     Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U); | ||||||
|   } |   } | ||||||
|   | |||||||
| @@ -159,13 +159,6 @@ private: | |||||||
|       Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U, |       Resources.GetCheckPointer()->CheckpointRestore(Parameters.StartTrajectory, U, | ||||||
| 						     Resources.GetSerialRNG(), | 						     Resources.GetSerialRNG(), | ||||||
| 						     Resources.GetParallelRNG()); | 						     Resources.GetParallelRNG()); | ||||||
|     } else { |  | ||||||
|       // others |  | ||||||
|       std::cout << GridLogError << "Unrecognized StartingType\n"; |  | ||||||
|       std::cout |  | ||||||
| 	<< GridLogError |  | ||||||
| 	<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n"; |  | ||||||
|       exit(1); |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     Smearing.set_Field(U); |     Smearing.set_Field(U); | ||||||
|   | |||||||
| @@ -95,7 +95,7 @@ private: | |||||||
|  |  | ||||||
|   typedef typename IntegratorType::Field Field; |   typedef typename IntegratorType::Field Field; | ||||||
|   typedef std::vector< HmcObservable<Field> * > ObsListType; |   typedef std::vector< HmcObservable<Field> * > ObsListType; | ||||||
|  |    | ||||||
|   //pass these from the resource manager |   //pass these from the resource manager | ||||||
|   GridSerialRNG &sRNG;    |   GridSerialRNG &sRNG;    | ||||||
|   GridParallelRNG &pRNG;  |   GridParallelRNG &pRNG;  | ||||||
|   | |||||||
| @@ -74,7 +74,7 @@ public: | |||||||
|       conf_file = os.str(); |       conf_file = os.str(); | ||||||
|     } |     } | ||||||
|   }  |   }  | ||||||
|   virtual ~BaseHmcCheckpointer(){}; |  | ||||||
|   void check_filename(const std::string &filename){ |   void check_filename(const std::string &filename){ | ||||||
|     std::ifstream f(filename.c_str()); |     std::ifstream f(filename.c_str()); | ||||||
|     if(!f.good()){ |     if(!f.good()){ | ||||||
| @@ -82,6 +82,7 @@ public: | |||||||
|       abort(); |       abort(); | ||||||
|     }; |     }; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   virtual void initialize(const CheckpointerParameters &Params) = 0; |   virtual void initialize(const CheckpointerParameters &Params) = 0; | ||||||
|  |  | ||||||
|   virtual void CheckpointRestore(int traj, typename Impl::Field &U, |   virtual void CheckpointRestore(int traj, typename Impl::Field &U, | ||||||
|   | |||||||
| @@ -45,7 +45,6 @@ private: | |||||||
|  |  | ||||||
| public: | public: | ||||||
|   INHERIT_GIMPL_TYPES(Implementation); |   INHERIT_GIMPL_TYPES(Implementation); | ||||||
|   typedef GaugeStatistics<Implementation> GaugeStats; |  | ||||||
|  |  | ||||||
|   ILDGHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); } |   ILDGHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); } | ||||||
|  |  | ||||||
| @@ -79,7 +78,7 @@ public: | |||||||
|       BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb); |       BinaryIO::writeRNG(sRNG, pRNG, rng, 0,nersc_csum,scidac_csuma,scidac_csumb); | ||||||
|       IldgWriter _IldgWriter(grid->IsBoss()); |       IldgWriter _IldgWriter(grid->IsBoss()); | ||||||
|       _IldgWriter.open(config); |       _IldgWriter.open(config); | ||||||
|       _IldgWriter.writeConfiguration<GaugeStats>(U, traj, config, config); |       _IldgWriter.writeConfiguration(U, traj, config, config); | ||||||
|       _IldgWriter.close(); |       _IldgWriter.close(); | ||||||
|  |  | ||||||
|       std::cout << GridLogMessage << "Written ILDG Configuration on " << config |       std::cout << GridLogMessage << "Written ILDG Configuration on " << config | ||||||
| @@ -106,7 +105,7 @@ public: | |||||||
|     FieldMetaData header; |     FieldMetaData header; | ||||||
|     IldgReader _IldgReader; |     IldgReader _IldgReader; | ||||||
|     _IldgReader.open(config); |     _IldgReader.open(config); | ||||||
|     _IldgReader.readConfiguration<GaugeStats>(U,header);  // format from the header |     _IldgReader.readConfiguration(U,header);  // format from the header | ||||||
|     _IldgReader.close(); |     _IldgReader.close(); | ||||||
|  |  | ||||||
|     std::cout << GridLogMessage << "Read ILDG Configuration from " << config |     std::cout << GridLogMessage << "Read ILDG Configuration from " << config | ||||||
|   | |||||||
| @@ -43,8 +43,7 @@ private: | |||||||
|  |  | ||||||
| public: | public: | ||||||
|   INHERIT_GIMPL_TYPES(Gimpl);  // only for gauge configurations |   INHERIT_GIMPL_TYPES(Gimpl);  // only for gauge configurations | ||||||
|   typedef GaugeStatistics<Gimpl> GaugeStats; |  | ||||||
|    |  | ||||||
|   NerscHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); } |   NerscHmcCheckpointer(const CheckpointerParameters &Params_) { initialize(Params_); } | ||||||
|  |  | ||||||
|   void initialize(const CheckpointerParameters &Params_) { |   void initialize(const CheckpointerParameters &Params_) { | ||||||
| @@ -61,7 +60,7 @@ public: | |||||||
|       int precision32 = 1; |       int precision32 = 1; | ||||||
|       int tworow = 0; |       int tworow = 0; | ||||||
|       NerscIO::writeRNGState(sRNG, pRNG, rng); |       NerscIO::writeRNGState(sRNG, pRNG, rng); | ||||||
|       NerscIO::writeConfiguration<GaugeStats>(U, config, tworow, precision32); |       NerscIO::writeConfiguration(U, config, tworow, precision32); | ||||||
|     } |     } | ||||||
|   }; |   }; | ||||||
|  |  | ||||||
| @@ -75,7 +74,7 @@ public: | |||||||
|  |  | ||||||
|     FieldMetaData header; |     FieldMetaData header; | ||||||
|     NerscIO::readRNGState(sRNG, pRNG, header, rng); |     NerscIO::readRNGState(sRNG, pRNG, header, rng); | ||||||
|     NerscIO::readConfiguration<GaugeStats>(U, header, config); |     NerscIO::readConfiguration(U, header, config); | ||||||
|   }; |   }; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -313,8 +313,6 @@ public: | |||||||
|       std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl; |       std::cout << GridLogIntegrator << " times[" << level << "]= " << t_P[level] << " " << t_U << std::endl; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     FieldImplementation::Project(U); |  | ||||||
|  |  | ||||||
|     // and that we indeed got to the end of the trajectory |     // and that we indeed got to the end of the trajectory | ||||||
|     assert(fabs(t_U - Params.trajL) < 1.0e-6); |     assert(fabs(t_U - Params.trajL) < 1.0e-6); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -99,7 +99,7 @@ public: | |||||||
|   virtual Prod* getPtr() = 0; |   virtual Prod* getPtr() = 0; | ||||||
|  |  | ||||||
|   // add a getReference?  |   // add a getReference?  | ||||||
|   virtual ~HMCModuleBase(){}; |    | ||||||
|   virtual void print_parameters(){};  // default to nothing |   virtual void print_parameters(){};  // default to nothing | ||||||
| }; | }; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -128,6 +128,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProjTm (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProjTm (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   hspin(0)=fspin(0)-fspin(2); |   hspin(0)=fspin(0)-fspin(2); | ||||||
|   hspin(1)=fspin(1)-fspin(3); |   hspin(1)=fspin(1)-fspin(3); | ||||||
| } | } | ||||||
| @@ -137,50 +138,40 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
|  *  0 0 -1  0 |  *  0 0 -1  0 | ||||||
|  *  0 0  0 -1 |  *  0 0  0 -1 | ||||||
|  */ |  */ | ||||||
|  |  | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   hspin(0)=fspin(0); |   hspin(0)=fspin(0); | ||||||
|   hspin(1)=fspin(1); |   hspin(1)=fspin(1); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Nhs> &hspin,const iVector<vtype,Ns> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   hspin(0)=fspin(2); |   hspin(0)=fspin(2); | ||||||
|   hspin(1)=fspin(3); |   hspin(1)=fspin(3); | ||||||
| } | } | ||||||
|    |    | ||||||
|  | //  template<class vtype> accelerator_inline void fspProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5p (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   rfspin(0)=fspin(0); |   rfspin(0)=fspin(0); | ||||||
|   rfspin(1)=fspin(1); |   rfspin(1)=fspin(1); | ||||||
|   rfspin(2)=Zero(); |   rfspin(2)=Zero(); | ||||||
|   rfspin(3)=Zero(); |   rfspin(3)=Zero(); | ||||||
| } | } | ||||||
|  | //  template<class vtype> accelerator_inline void fspProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spProj5m (iVector<vtype,Ns> &rfspin,const iVector<vtype,Ns> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   rfspin(0)=Zero(); |   rfspin(0)=Zero(); | ||||||
|   rfspin(1)=Zero(); |   rfspin(1)=Zero(); | ||||||
|   rfspin(2)=fspin(2); |   rfspin(2)=fspin(2); | ||||||
|   rfspin(3)=fspin(3); |   rfspin(3)=fspin(3); | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin) |  | ||||||
| { |  | ||||||
|   const int hN = N>>1; |  | ||||||
|   for(int s=0;s<hN;s++){ |  | ||||||
|     rfspin(s)=fspin(s); |  | ||||||
|     rfspin(s+hN)=Zero(); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &rfspin,const iVector<vtype,N> &fspin) |  | ||||||
| { |  | ||||||
|   const int hN = N>>1; |  | ||||||
|   for(int s=0;s<hN;s++){ |  | ||||||
|     rfspin(s)=Zero(); |  | ||||||
|     rfspin(s+hN)=fspin(s+hN); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||||
| // Reconstruction routines to move back again to four spin | // Reconstruction routines to move back again to four spin | ||||||
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | //////////////////////////////////////////////////////////////////////////////////////////////////////////////// | ||||||
| @@ -192,6 +183,7 @@ template<class vtype,int N,IfCoarsened<iVector<vtype,N> > = 0> accelerator_inlin | |||||||
|  */ |  */ | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=timesMinusI(hspin(1)); |   fspin(2)=timesMinusI(hspin(1)); | ||||||
| @@ -199,6 +191,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=timesI(hspin(1)); |   fspin(2)=timesI(hspin(1)); | ||||||
| @@ -206,6 +199,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)-=timesI(hspin(1)); |   fspin(2)-=timesI(hspin(1)); | ||||||
| @@ -213,6 +207,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconXm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)+=timesI(hspin(1)); |   fspin(2)+=timesI(hspin(1)); | ||||||
| @@ -226,6 +221,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
|  |  | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)= hspin(1); |   fspin(2)= hspin(1); | ||||||
| @@ -233,6 +229,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=-hspin(1); |   fspin(2)=-hspin(1); | ||||||
| @@ -240,6 +237,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)+=hspin(1); |   fspin(2)+=hspin(1); | ||||||
| @@ -247,6 +245,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconYm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)-=hspin(1); |   fspin(2)-=hspin(1); | ||||||
| @@ -261,6 +260,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
|  */ |  */ | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=timesMinusI(hspin(0)); |   fspin(2)=timesMinusI(hspin(0)); | ||||||
| @@ -268,6 +268,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=     timesI(hspin(0)); |   fspin(2)=     timesI(hspin(0)); | ||||||
| @@ -275,6 +276,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)-=timesI(hspin(0)); |   fspin(2)-=timesI(hspin(0)); | ||||||
| @@ -282,6 +284,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconZm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)+=timesI(hspin(0)); |   fspin(2)+=timesI(hspin(0)); | ||||||
| @@ -295,6 +298,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
|  */ |  */ | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=hspin(0); |   fspin(2)=hspin(0); | ||||||
| @@ -302,6 +306,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0); |   fspin(0)=hspin(0); | ||||||
|   fspin(1)=hspin(1); |   fspin(1)=hspin(1); | ||||||
|   fspin(2)=-hspin(0); |   fspin(2)=-hspin(0); | ||||||
| @@ -309,6 +314,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTp (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)+=hspin(0); |   fspin(2)+=hspin(0); | ||||||
| @@ -316,6 +322,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumReconTm (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0); |   fspin(0)+=hspin(0); | ||||||
|   fspin(1)+=hspin(1); |   fspin(1)+=hspin(1); | ||||||
|   fspin(2)-=hspin(0); |   fspin(2)-=hspin(0); | ||||||
| @@ -329,6 +336,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
|  */ |  */ | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=hspin(0)+hspin(0); // add is lower latency than mul |   fspin(0)=hspin(0)+hspin(0); // add is lower latency than mul | ||||||
|   fspin(1)=hspin(1)+hspin(1); // probably no measurable diffence though |   fspin(1)=hspin(1)+hspin(1); // probably no measurable diffence though | ||||||
|   fspin(2)=Zero(); |   fspin(2)=Zero(); | ||||||
| @@ -336,6 +344,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5m (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void spRecon5m (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)=Zero(); |   fspin(0)=Zero(); | ||||||
|   fspin(1)=Zero(); |   fspin(1)=Zero(); | ||||||
|   fspin(2)=hspin(0)+hspin(0); |   fspin(2)=hspin(0)+hspin(0); | ||||||
| @@ -343,6 +352,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void s | |||||||
| } | } | ||||||
| template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void accumRecon5p (iVector<vtype,Ns> &fspin,const iVector<vtype,Nhs> &hspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,Ns>,SpinorIndex>::value,iVector<vtype,Ns> >::type *SFINAE; | ||||||
|   fspin(0)+=hspin(0)+hspin(0); |   fspin(0)+=hspin(0)+hspin(0); | ||||||
|   fspin(1)+=hspin(1)+hspin(1); |   fspin(1)+=hspin(1)+hspin(1); | ||||||
| } | } | ||||||
| @@ -362,6 +372,7 @@ template<class vtype,IfSpinor<iVector<vtype,Ns> > = 0> accelerator_inline void a | |||||||
| ////////// | ////////// | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjXp(hspin._internal[i],fspin._internal[i]); |     spProjXp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| @@ -415,21 +426,26 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconXp (iM | |||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| //////// | //////// | ||||||
| // Xm | // Xm | ||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjXm(hspin._internal,fspin._internal); |   spProjXm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjXm(hspin._internal[i],fspin._internal[i]); |     spProjXm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjXm(hspin._internal[i][j],fspin._internal[i][j]); |       spProjXm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -439,16 +455,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjXm (iMatri | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconXm(hspin._internal,fspin._internal); |   spReconXm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconXm(hspin._internal[i],fspin._internal[i]); |     spReconXm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconXm(hspin._internal[i][j],fspin._internal[i][j]); |       spReconXm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -457,37 +476,45 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconXm (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconXm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconXm(hspin._internal,fspin._internal); |   accumReconXm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconXm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconXm(hspin._internal[i],fspin._internal[i]); |     accumReconXm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconXm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconXm(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconXm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| //////// | //////// | ||||||
| // Yp | // Yp | ||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjYp(hspin._internal,fspin._internal); |   spProjYp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjYp(hspin._internal[i],fspin._internal[i]); |     spProjYp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjYp(hspin._internal[i][j],fspin._internal[i][j]); |       spProjYp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -497,16 +524,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjYp (iMatri | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconYp(hspin._internal,fspin._internal); |   spReconYp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconYp(hspin._internal[i],fspin._internal[i]); |     spReconYp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconYp(hspin._internal[i][j],fspin._internal[i][j]); |       spReconYp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -515,55 +545,66 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYp (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconYp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconYp(hspin._internal,fspin._internal); |   accumReconYp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconYp(hspin._internal[i],fspin._internal[i]); |     accumReconYp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconYp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconYp(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconYp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| //////// | //////// | ||||||
| // Ym | // Ym | ||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjYm(hspin._internal,fspin._internal); |   spProjYm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjYm(hspin._internal[i],fspin._internal[i]); |     spProjYm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjYm(hspin._internal[i][j],fspin._internal[i][j]); |       spProjYm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconYm(hspin._internal,fspin._internal); |   spReconYm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,const iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconYm(hspin._internal[i],fspin._internal[i]); |     spReconYm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconYm(hspin._internal[i][j],fspin._internal[i][j]); |       spReconYm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -572,16 +613,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconYm (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconYm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconYm(hspin._internal,fspin._internal); |   accumReconYm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconYm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconYm(hspin._internal[i],fspin._internal[i]); |     accumReconYm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconYm(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconYm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -594,57 +638,66 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconYm (iM | |||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjZp(hspin._internal,fspin._internal); |   spProjZp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjZp(hspin._internal[i],fspin._internal[i]); |     spProjZp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjZp(hspin._internal[i][j],fspin._internal[i][j]); |       spProjZp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconZp(hspin._internal,fspin._internal); |   spReconZp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconZp(hspin._internal[i],fspin._internal[i]); |     spReconZp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconZp(hspin._internal[i][j],fspin._internal[i][j]); |       spReconZp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconZp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconZp(hspin._internal,fspin._internal); |   accumReconZp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconZp(hspin._internal[i],fspin._internal[i]); |     accumReconZp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconZp(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconZp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -653,53 +706,62 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZp (iM | |||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjZm(hspin._internal,fspin._internal); |   spProjZm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjZm(hspin._internal[i],fspin._internal[i]); |     spProjZm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjZm(hspin._internal[i][j],fspin._internal[i][j]); |       spProjZm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconZm(hspin._internal,fspin._internal); |   spReconZm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconZm(hspin._internal[i],fspin._internal[i]); |     spReconZm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconZm(hspin._internal[i][j],fspin._internal[i][j]); |       spReconZm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconZm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconZm(hspin._internal,fspin._internal); |   accumReconZm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconZm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconZm(hspin._internal[i],fspin._internal[i]); |     accumReconZm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconZm(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconZm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -712,35 +774,41 @@ template<class rtype,class vtype,int N> accelerator_inline void accumReconZm (iM | |||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjTp(hspin._internal,fspin._internal); |   spProjTp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjTp(hspin._internal[i],fspin._internal[i]); |     spProjTp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjTp(hspin._internal[i][j],fspin._internal[i][j]); |       spProjTp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconTp (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconTp(hspin._internal,fspin._internal); |   spReconTp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTp (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconTp(hspin._internal[i],fspin._internal[i]); |     spReconTp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconTp(hspin._internal[i][j],fspin._internal[i][j]); |       spReconTp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -749,37 +817,44 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTp (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconTp (iScalar<rtype> &hspin, iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconTp (iScalar<rtype> &hspin, iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconTp(hspin._internal,fspin._internal); |   accumReconTp(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTp (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTp (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconTp(hspin._internal[i],fspin._internal[i]); |     accumReconTp(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconTp (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconTp (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconTp(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconTp(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| //////// | //////// | ||||||
| // Tm | // Tm | ||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype> accelerator_inline void spProjTm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProjTm (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProjTm(hspin._internal,fspin._internal); |   spProjTm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProjTm (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProjTm(hspin._internal[i],fspin._internal[i]); |     spProjTm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProjTm(hspin._internal[i][j],fspin._internal[i][j]); |       spProjTm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -789,16 +864,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spProjTm (iMatri | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spReconTm(hspin._internal,fspin._internal); |   spReconTm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spReconTm(hspin._internal[i],fspin._internal[i]); |     spReconTm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spReconTm(hspin._internal[i][j],fspin._internal[i][j]); |       spReconTm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -807,37 +885,44 @@ template<class rtype,class vtype,int N> accelerator_inline void spReconTm (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumReconTm (iScalar<rtype> &hspin, const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumReconTm(hspin._internal,fspin._internal); |   accumReconTm(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumReconTm (iVector<rtype,N> &hspin, const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumReconTm(hspin._internal[i],fspin._internal[i]); |     accumReconTm(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumReconTm (iMatrix<rtype,N> &hspin, const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumReconTm(hspin._internal[i][j],fspin._internal[i][j]); |       accumReconTm(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|     }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| //////// | //////// | ||||||
| // 5p | // 5p | ||||||
| //////// | //////// | ||||||
| template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProj5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProj5p(hspin._internal,fspin._internal); |   spProj5p(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProj5p(hspin._internal[i],fspin._internal[i]); |     spProj5p(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProj5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProj5p(hspin._internal[i][j],fspin._internal[i][j]); |       spProj5p(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -846,16 +931,19 @@ template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> acce | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spRecon5p(hspin._internal,fspin._internal); |   spRecon5p(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spRecon5p(hspin._internal[i],fspin._internal[i]); |     spRecon5p(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spRecon5p(hspin._internal[i][j],fspin._internal[i][j]); |       spRecon5p(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -864,16 +952,19 @@ template<class rtype,class vtype,int N> accelerator_inline void spRecon5p (iMatr | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumRecon5p (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumRecon5p(hspin._internal,fspin._internal); |   accumRecon5p(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5p (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumRecon5p(hspin._internal[i],fspin._internal[i]); |     accumRecon5p(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumRecon5p(hspin._internal[i][j],fspin._internal[i][j]); |       accumRecon5p(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -881,18 +972,24 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5p (iM | |||||||
| } | } | ||||||
|  |  | ||||||
| // four spinor projectors for chiral proj | // four spinor projectors for chiral proj | ||||||
| template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | //  template<class vtype> accelerator_inline void fspProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | ||||||
|  | template<class vtype> accelerator_inline void spProj5p (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProj5p(hspin._internal,fspin._internal); |   spProj5p(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin) | //  template<class vtype,int N> accelerator_inline void fspProj5p (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin) | ||||||
|  | template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5p (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProj5p(hspin._internal[i],fspin._internal[i]); |     spProj5p(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin) | //  template<class vtype,int N> accelerator_inline void fspProj5p (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin) | ||||||
|  | template<class vtype,int N> accelerator_inline void spProj5p (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProj5p(hspin._internal[i][j],fspin._internal[i][j]); |       spProj5p(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -904,17 +1001,17 @@ template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inli | |||||||
| // 5m | // 5m | ||||||
| //////// | //////// | ||||||
|  |  | ||||||
| template<class rtype,class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spProj5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|   spProj5m(hspin._internal,fspin._internal); |   spProj5m(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<rtype,N> > = 0> accelerator_inline void spProj5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProj5m(hspin._internal[i],fspin._internal[i]); |     spProj5m(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spProj5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
| @@ -924,34 +1021,40 @@ template<class rtype,class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> acce | |||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void spRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void spRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spRecon5m(hspin._internal,fspin._internal); |   spRecon5m(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spRecon5m(hspin._internal[i],fspin._internal[i]); |     spRecon5m(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void spRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void spRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spRecon5m(hspin._internal[i][j],fspin._internal[i][j]); |       spRecon5m(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   }} |     }} | ||||||
| } | } | ||||||
|  |  | ||||||
| template<class rtype,class vtype> accelerator_inline void accumRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | template<class rtype,class vtype> accelerator_inline void accumRecon5m (iScalar<rtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   accumRecon5m(hspin._internal,fspin._internal); |   accumRecon5m(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | template<class rtype,class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void accumRecon5m (iVector<rtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     accumRecon5m(hspin._internal[i],fspin._internal[i]); |     accumRecon5m(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iMatrix<rtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       accumRecon5m(hspin._internal[i][j],fspin._internal[i][j]); |       accumRecon5m(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
| @@ -960,18 +1063,24 @@ template<class rtype,class vtype,int N> accelerator_inline void accumRecon5m (iM | |||||||
|  |  | ||||||
|  |  | ||||||
| // four spinor projectors for chiral proj | // four spinor projectors for chiral proj | ||||||
| template<class vtype,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | //  template<class vtype> accelerator_inline void fspProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | ||||||
|  | template<class vtype> accelerator_inline void spProj5m (iScalar<vtype> &hspin,const iScalar<vtype> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iScalar<vtype>,SpinorIndex>::notvalue,iScalar<vtype> >::type *temp; | ||||||
|   spProj5m(hspin._internal,fspin._internal); |   spProj5m(hspin._internal,fspin._internal); | ||||||
| } | } | ||||||
| template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin) | //  template<class vtype,int N> accelerator_inline void fspProj5m (iVector<vtype,N> &hspin,iVector<vtype,N> &fspin) | ||||||
|  | template<class vtype,int N,IfNotSpinor<iVector<vtype,N> > = 0> accelerator_inline void spProj5m (iVector<vtype,N> &hspin,const iVector<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iVector<vtype,N>,SpinorIndex>::notvalue,iVector<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++) { |   for(int i=0;i<N;i++) { | ||||||
|     spProj5m(hspin._internal[i],fspin._internal[i]); |     spProj5m(hspin._internal[i],fspin._internal[i]); | ||||||
|   } |   } | ||||||
| } | } | ||||||
| template<class vtype,int N,IfNotCoarsened<iScalar<vtype> > = 0> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin) | //  template<class vtype,int N> accelerator_inline void fspProj5m (iMatrix<vtype,N> &hspin,iMatrix<vtype,N> &fspin) | ||||||
|  | template<class vtype,int N> accelerator_inline void spProj5m (iMatrix<vtype,N> &hspin,const iMatrix<vtype,N> &fspin) | ||||||
| { | { | ||||||
|  |   //typename std::enable_if<matchGridTensorIndex<iMatrix<vtype,N>,SpinorIndex>::notvalue,iMatrix<vtype,N> >::type *temp; | ||||||
|   for(int i=0;i<N;i++){  |   for(int i=0;i<N;i++){  | ||||||
|     for(int j=0;j<N;j++){ |     for(int j=0;j<N;j++){ | ||||||
|       spProj5m(hspin._internal[i][j],fspin._internal[i][j]); |       spProj5m(hspin._internal[i][j],fspin._internal[i][j]); | ||||||
|   | |||||||
| @@ -53,24 +53,6 @@ namespace PeriodicBC { | |||||||
|     return Cshift(tmp,mu,-1);// moves towards positive mu |     return Cshift(tmp,mu,-1);// moves towards positive mu | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu)  |  | ||||||
|   { |  | ||||||
|     return Cshift(adj(Link), mu, -1); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   CovShiftIdentityForward(const Lattice<gauge> &Link, int mu) |  | ||||||
|   { |  | ||||||
|     return Link; |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   ShiftStaple(const Lattice<gauge> &Link, int mu) |  | ||||||
|   { |  | ||||||
|     return Cshift(Link, mu, 1); |  | ||||||
|   } |  | ||||||
|    |  | ||||||
|   template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr> |   template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr> | ||||||
|     auto  CovShiftForward(const Lattice<gauge> &Link,  |     auto  CovShiftForward(const Lattice<gauge> &Link,  | ||||||
| 			  int mu, | 			  int mu, | ||||||
| @@ -88,7 +70,6 @@ namespace PeriodicBC { | |||||||
|     return CovShiftBackward(Link,mu,arg); |     return CovShiftBackward(Link,mu,arg); | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  |  | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -158,38 +139,6 @@ namespace ConjugateBC { | |||||||
|     //    std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl; |     //    std::cout<<"Gparity::CovCshiftBackward mu="<<mu<<std::endl; | ||||||
|     return Cshift(tmp,mu,-1);// moves towards positive mu |     return Cshift(tmp,mu,-1);// moves towards positive mu | ||||||
|   } |   } | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu) { |  | ||||||
|     GridBase *grid = Link.Grid(); |  | ||||||
|     int Lmu = grid->GlobalDimensions()[mu] - 1; |  | ||||||
|  |  | ||||||
|     Lattice<iScalar<vInteger>> coor(grid); |  | ||||||
|     LatticeCoordinate(coor, mu); |  | ||||||
|  |  | ||||||
|     Lattice<gauge> tmp(grid); |  | ||||||
|     tmp = adj(Link); |  | ||||||
|     tmp = where(coor == Lmu, conjugate(tmp), tmp); |  | ||||||
|     return Cshift(tmp, mu, -1); // moves towards positive mu |  | ||||||
|   } |  | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   CovShiftIdentityForward(const Lattice<gauge> &Link, int mu) { |  | ||||||
|     return Link; |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   template<class gauge> Lattice<gauge> |  | ||||||
|   ShiftStaple(const Lattice<gauge> &Link, int mu) |  | ||||||
|   { |  | ||||||
|     GridBase *grid = Link.Grid(); |  | ||||||
|     int Lmu = grid->GlobalDimensions()[mu] - 1; |  | ||||||
|  |  | ||||||
|     Lattice<iScalar<vInteger>> coor(grid); |  | ||||||
|     LatticeCoordinate(coor, mu); |  | ||||||
|  |  | ||||||
|     Lattice<gauge> tmp(grid); |  | ||||||
|     tmp = Cshift(Link, mu, 1); |  | ||||||
|     tmp = where(coor == Lmu, conjugate(tmp), tmp); |  | ||||||
|     return tmp; |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr> |   template<class gauge,class Expr,typename std::enable_if<is_lattice_expr<Expr>::value,void>::type * = nullptr> | ||||||
|     auto  CovShiftForward(const Lattice<gauge> &Link,  |     auto  CovShiftForward(const Lattice<gauge> &Link,  | ||||||
|   | |||||||
| @@ -154,8 +154,8 @@ void axpby_ssp_pminus(Lattice<vobj> &z,Coeff a,const Lattice<vobj> &x,Coeff b,co | |||||||
|   accelerator_for(sss,nloop,vobj::Nsimd(),{ |   accelerator_for(sss,nloop,vobj::Nsimd(),{ | ||||||
|     uint64_t ss = sss*Ls; |     uint64_t ss = sss*Ls; | ||||||
|     decltype(coalescedRead(y_v[ss+sp])) tmp; |     decltype(coalescedRead(y_v[ss+sp])) tmp; | ||||||
|     spProj5m(tmp,y_v(ss+sp));  |     spProj5m(tmp,y_v(ss+sp)); | ||||||
|    tmp = a*x_v(ss+s)+b*tmp; |     tmp = a*x_v(ss+s)+b*tmp; | ||||||
|     coalescedWrite(z_v[ss+s],tmp); |     coalescedWrite(z_v[ss+s],tmp); | ||||||
|   }); |   }); | ||||||
| } | } | ||||||
| @@ -188,6 +188,7 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x) | |||||||
|   z.Checkerboard() = x.Checkerboard(); |   z.Checkerboard() = x.Checkerboard(); | ||||||
|   conformable(x,z); |   conformable(x,z); | ||||||
|   int Ls = grid->_rdimensions[0]; |   int Ls = grid->_rdimensions[0]; | ||||||
|  |   Gamma G5(Gamma::Algebra::Gamma5); | ||||||
|   autoView( x_v, x, AcceleratorRead); |   autoView( x_v, x, AcceleratorRead); | ||||||
|   autoView( z_v, z, AcceleratorWrite); |   autoView( z_v, z, AcceleratorWrite); | ||||||
|   uint64_t nloop = grid->oSites()/Ls; |   uint64_t nloop = grid->oSites()/Ls; | ||||||
| @@ -195,13 +196,7 @@ void G5R5(Lattice<vobj> &z,const Lattice<vobj> &x) | |||||||
|     uint64_t ss = sss*Ls; |     uint64_t ss = sss*Ls; | ||||||
|     for(int s=0;s<Ls;s++){ |     for(int s=0;s<Ls;s++){ | ||||||
|       int sp = Ls-1-s; |       int sp = Ls-1-s; | ||||||
|       auto tmp = x_v(ss+s); |       coalescedWrite(z_v[ss+sp],G5*x_v(ss+s)); | ||||||
|       decltype(tmp) tmp_p; |  | ||||||
|       decltype(tmp) tmp_m; |  | ||||||
|       spProj5p(tmp_p,tmp); |  | ||||||
|       spProj5m(tmp_m,tmp); |  | ||||||
|       // Use of spProj5m, 5p captures the coarse space too |  | ||||||
|       coalescedWrite(z_v[ss+sp],tmp_p - tmp_m); |  | ||||||
|     } |     } | ||||||
|   }); |   }); | ||||||
| } | } | ||||||
| @@ -213,20 +208,10 @@ void G5C(Lattice<vobj> &z, const Lattice<vobj> &x) | |||||||
|   z.Checkerboard() = x.Checkerboard(); |   z.Checkerboard() = x.Checkerboard(); | ||||||
|   conformable(x, z); |   conformable(x, z); | ||||||
|  |  | ||||||
|   autoView( x_v, x, AcceleratorRead); |   Gamma G5(Gamma::Algebra::Gamma5); | ||||||
|   autoView( z_v, z, AcceleratorWrite); |   z = G5 * x; | ||||||
|   uint64_t nloop = grid->oSites(); |  | ||||||
|   accelerator_for(ss,nloop,vobj::Nsimd(),{ |  | ||||||
|     auto tmp = x_v(ss); |  | ||||||
|     decltype(tmp) tmp_p; |  | ||||||
|     decltype(tmp) tmp_m; |  | ||||||
|     spProj5p(tmp_p,tmp); |  | ||||||
|     spProj5m(tmp_m,tmp); |  | ||||||
|     coalescedWrite(z_v[ss],tmp_p - tmp_m); |  | ||||||
|   }); |  | ||||||
| } | } | ||||||
|  |  | ||||||
| /* |  | ||||||
| template<class CComplex, int nbasis> | template<class CComplex, int nbasis> | ||||||
| void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, nbasis>> &x) | void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, nbasis>> &x) | ||||||
| { | { | ||||||
| @@ -249,7 +234,6 @@ void G5C(Lattice<iVector<CComplex, nbasis>> &z, const Lattice<iVector<CComplex, | |||||||
|     } |     } | ||||||
|   }); |   }); | ||||||
| } | } | ||||||
| */ |  | ||||||
|  |  | ||||||
| NAMESPACE_END(Grid); | NAMESPACE_END(Grid); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -735,6 +735,7 @@ public: | |||||||
|     } |     } | ||||||
|   } |   } | ||||||
|  |  | ||||||
|  |  | ||||||
|   template <typename GaugeField> |   template <typename GaugeField> | ||||||
|   static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) { |   static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) { | ||||||
|     typedef typename GaugeField::vector_type vector_type; |     typedef typename GaugeField::vector_type vector_type; | ||||||
| @@ -799,88 +800,6 @@ public: | |||||||
|   } |   } | ||||||
| }; | }; | ||||||
|  |  | ||||||
| template<int N> |  | ||||||
| LatticeComplexD Determinant(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu) |  | ||||||
| { |  | ||||||
|   GridBase *grid=Umu.Grid(); |  | ||||||
|   auto lvol = grid->lSites(); |  | ||||||
|   LatticeComplexD ret(grid); |  | ||||||
|  |  | ||||||
|   autoView(Umu_v,Umu,CpuRead); |  | ||||||
|   autoView(ret_v,ret,CpuWrite); |  | ||||||
|   thread_for(site,lvol,{ |  | ||||||
|     Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N); |  | ||||||
|     Coordinate lcoor; |  | ||||||
|     grid->LocalIndexToLocalCoor(site, lcoor); |  | ||||||
|     iScalar<iScalar<iMatrix<ComplexD, N> > > Us; |  | ||||||
|     peekLocalSite(Us, Umu_v, lcoor); |  | ||||||
|     for(int i=0;i<N;i++){ |  | ||||||
|       for(int j=0;j<N;j++){ |  | ||||||
| 	EigenU(i,j) = Us()()(i,j); |  | ||||||
|       }} |  | ||||||
|     ComplexD det = EigenU.determinant(); |  | ||||||
|     pokeLocalSite(det,ret_v,lcoor); |  | ||||||
|   }); |  | ||||||
|   return ret; |  | ||||||
| } |  | ||||||
| template<int N> |  | ||||||
| static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu) |  | ||||||
| { |  | ||||||
|   Umu      = ProjectOnGroup(Umu); |  | ||||||
|   auto det = Determinant(Umu); |  | ||||||
|  |  | ||||||
|   det = conjugate(det); |  | ||||||
|  |  | ||||||
|   for(int i=0;i<N;i++){ |  | ||||||
|     auto element = PeekIndex<ColourIndex>(Umu,N-1,i); |  | ||||||
|     element = element * det; |  | ||||||
|     PokeIndex<ColourIndex>(Umu,element,Nc-1,i); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| template<int N> |  | ||||||
| static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplexD, N> >,Nd> > &U) |  | ||||||
| { |  | ||||||
|   GridBase *grid=U.Grid(); |  | ||||||
|   // Reunitarise |  | ||||||
|   for(int mu=0;mu<Nd;mu++){ |  | ||||||
|     auto Umu = PeekIndex<LorentzIndex>(U,mu); |  | ||||||
|     Umu      = ProjectOnGroup(Umu); |  | ||||||
|     ProjectSUn(Umu); |  | ||||||
|     PokeIndex<LorentzIndex>(U,Umu,mu); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
| // Explicit specialisation for SU(3). |  | ||||||
| // Explicit specialisation for SU(3). |  | ||||||
| static void |  | ||||||
| ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu) |  | ||||||
| { |  | ||||||
|   GridBase *grid=Umu.Grid(); |  | ||||||
|   const int x=0; |  | ||||||
|   const int y=1; |  | ||||||
|   const int z=2; |  | ||||||
|   // Reunitarise |  | ||||||
|   Umu = ProjectOnGroup(Umu); |  | ||||||
|   autoView(Umu_v,Umu,CpuWrite); |  | ||||||
|   thread_for(ss,grid->oSites(),{ |  | ||||||
|       auto cm = Umu_v[ss]; |  | ||||||
|       cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy |  | ||||||
|       cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz |  | ||||||
|       cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx |  | ||||||
|       Umu_v[ss]=cm; |  | ||||||
|   }); |  | ||||||
| } |  | ||||||
| static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U) |  | ||||||
| { |  | ||||||
|   GridBase *grid=U.Grid(); |  | ||||||
|   // Reunitarise |  | ||||||
|   for(int mu=0;mu<Nd;mu++){ |  | ||||||
|     auto Umu = PeekIndex<LorentzIndex>(U,mu); |  | ||||||
|     Umu      = ProjectOnGroup(Umu); |  | ||||||
|     ProjectSU3(Umu); |  | ||||||
|     PokeIndex<LorentzIndex>(U,Umu,mu); |  | ||||||
|   } |  | ||||||
| } |  | ||||||
|  |  | ||||||
| typedef SU<2> SU2; | typedef SU<2> SU2; | ||||||
| typedef SU<3> SU3; | typedef SU<3> SU3; | ||||||
| typedef SU<4> SU4; | typedef SU<4> SU4; | ||||||
|   | |||||||
							
								
								
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_double.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_double.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,779 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  |     Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  |     Source file: Fujitsu_A64FX_asm_double.h | ||||||
|  |  | ||||||
|  |     Copyright (C) 2020 | ||||||
|  |  | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> | ||||||
|  |  | ||||||
|  |     This program is free software; you can redistribute it and/or modify | ||||||
|  |     it under the terms of the GNU General Public License as published by | ||||||
|  |     the Free Software Foundation; either version 2 of the License, or | ||||||
|  |     (at your option) any later version. | ||||||
|  |  | ||||||
|  |     This program is distributed in the hope that it will be useful, | ||||||
|  |     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  |     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  |     GNU General Public License for more details. | ||||||
|  |  | ||||||
|  |     You should have received a copy of the GNU General Public License along | ||||||
|  |     with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  |     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  |     See the full license in the file "LICENSE" in the top level distribution directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXd(base)   | ||||||
|  | #define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXd(A)   | ||||||
|  | #define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)   | ||||||
|  | #define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXd(A)   | ||||||
|  | #define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)   | ||||||
|  | #define PF_GAUGE(A)   | ||||||
|  | #define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(A)   | ||||||
|  | #define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(A)   | ||||||
|  | #define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)   | ||||||
|  | #define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)   | ||||||
|  | #define LOCK_GAUGE(A)   | ||||||
|  | #define UNLOCK_GAUGE(A)   | ||||||
|  | #define MASK_REGS                      DECLARATIONS_A64FXd   | ||||||
|  | #define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)   | ||||||
|  | #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)   | ||||||
|  | #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd   | ||||||
|  | #define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)   | ||||||
|  | #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)   | ||||||
|  | #define XP_PROJ                        XP_PROJ_A64FXd   | ||||||
|  | #define YP_PROJ                        YP_PROJ_A64FXd   | ||||||
|  | #define ZP_PROJ                        ZP_PROJ_A64FXd   | ||||||
|  | #define TP_PROJ                        TP_PROJ_A64FXd   | ||||||
|  | #define XM_PROJ                        XM_PROJ_A64FXd   | ||||||
|  | #define YM_PROJ                        YM_PROJ_A64FXd   | ||||||
|  | #define ZM_PROJ                        ZM_PROJ_A64FXd   | ||||||
|  | #define TM_PROJ                        TM_PROJ_A64FXd   | ||||||
|  | #define XP_RECON                       XP_RECON_A64FXd   | ||||||
|  | #define XM_RECON                       XM_RECON_A64FXd   | ||||||
|  | #define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXd   | ||||||
|  | #define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXd   | ||||||
|  | #define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXd   | ||||||
|  | #define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXd   | ||||||
|  | #define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXd   | ||||||
|  | #define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXd   | ||||||
|  | #define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXd   | ||||||
|  | #define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXd   | ||||||
|  | #define PERMUTE_DIR0                   0   | ||||||
|  | #define PERMUTE_DIR1                   1   | ||||||
|  | #define PERMUTE_DIR2                   2   | ||||||
|  | #define PERMUTE_DIR3                   3   | ||||||
|  | #define PERMUTE                        PERMUTE_A64FXd;   | ||||||
|  | #define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1; } else if (Dir == 2) { LOAD_TABLE2; }   | ||||||
|  | #define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }   | ||||||
|  | // DECLARATIONS | ||||||
|  | #define DECLARATIONS_A64FXd  \ | ||||||
|  |     const uint64_t lut[4][8] = { \ | ||||||
|  |         {4, 5, 6, 7, 0, 1, 2, 3}, \ | ||||||
|  |         {2, 3, 0, 1, 6, 7, 4, 5}, \ | ||||||
|  |         {1, 0, 3, 2, 5, 4, 7, 6}, \ | ||||||
|  |         {0, 1, 2, 4, 5, 6, 7, 8} };\ | ||||||
|  | asm ( \ | ||||||
|  |     "fmov z31.d , 0 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // RESULT | ||||||
|  | #define RESULT_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "str z0, [%[storeptr], -6, mul vl] \n\t" \ | ||||||
|  |     "str z1, [%[storeptr], -5, mul vl] \n\t" \ | ||||||
|  |     "str z2, [%[storeptr], -4, mul vl] \n\t" \ | ||||||
|  |     "str z3, [%[storeptr], -3, mul vl] \n\t" \ | ||||||
|  |     "str z4, [%[storeptr], -2, mul vl] \n\t" \ | ||||||
|  |     "str z5, [%[storeptr], -1, mul vl] \n\t" \ | ||||||
|  |     "str z6, [%[storeptr], 0, mul vl] \n\t" \ | ||||||
|  |     "str z7, [%[storeptr], 1, mul vl] \n\t" \ | ||||||
|  |     "str z8, [%[storeptr], 2, mul vl] \n\t" \ | ||||||
|  |     "str z9, [%[storeptr], 3, mul vl] \n\t" \ | ||||||
|  |     "str z10, [%[storeptr], 4, mul vl] \n\t" \ | ||||||
|  |     "str z11, [%[storeptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [storeptr] "r" (base + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_CHIMU_L2 (prefetch to L2) | ||||||
|  | #define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_CHIMU_L1 (prefetch to L1) | ||||||
|  | #define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_GAUGE_L2 (prefetch to L2) | ||||||
|  | #define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_GAUGE_L1 (prefetch to L1) | ||||||
|  | #define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHI | ||||||
|  | #define LOAD_CHI_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU | ||||||
|  | #define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU_0213 | ||||||
|  | #define LOAD_CHIMU_0213_A64FXd  \ | ||||||
|  | { \ | ||||||
|  |     const SiteSpinor & ref(in[offset]); \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (&ref[2][0]) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU_0312 | ||||||
|  | #define LOAD_CHIMU_0312_A64FXd  \ | ||||||
|  | { \ | ||||||
|  |     const SiteSpinor & ref(in[offset]); \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (&ref[2][0]) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_TABLE0 | ||||||
|  | #define LOAD_TABLE0  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (0) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE1 | ||||||
|  | #define LOAD_TABLE1  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (1) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE2 | ||||||
|  | #define LOAD_TABLE2  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (2) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE3 | ||||||
|  | #define LOAD_TABLE3  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (3) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // PERMUTE | ||||||
|  | #define PERMUTE_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "tbl z12.d, { z12.d }, z30.d \n\t"  \ | ||||||
|  |     "tbl z13.d, { z13.d }, z30.d \n\t"  \ | ||||||
|  |     "tbl z14.d, { z14.d }, z30.d \n\t"  \ | ||||||
|  |     "tbl z15.d, { z15.d }, z30.d \n\t"  \ | ||||||
|  |     "tbl z16.d, { z16.d }, z30.d \n\t"  \ | ||||||
|  |     "tbl z17.d, { z17.d }, z30.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_GAUGE | ||||||
|  | #define LOAD_GAUGE  \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // MULT_2SPIN | ||||||
|  | #define MULT_2SPIN_1_A64FXd(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "movprfx z18.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z18.d, p5/m, z24.d, z12.d, 0 \n\t" \ | ||||||
|  |     "movprfx z21.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z24.d, z15.d, 0 \n\t" \ | ||||||
|  |     "movprfx z19.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z25.d, z12.d, 0 \n\t" \ | ||||||
|  |     "movprfx z22.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z25.d, z15.d, 0 \n\t" \ | ||||||
|  |     "movprfx z20.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z26.d, z12.d, 0 \n\t" \ | ||||||
|  |     "movprfx z23.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z26.d, z15.d, 0 \n\t" \ | ||||||
|  |     "fcmla z18.d, p5/m, z24.d, z12.d, 90 \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z24.d, z15.d, 90 \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z25.d, z12.d, 90 \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z25.d, z15.d, 90 \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z26.d, z12.d, 90 \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z26.d, z15.d, 90 \n\t" \ | ||||||
|  |     "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // MULT_2SPIN_BACKEND | ||||||
|  | #define MULT_2SPIN_2_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcmla z18.d, p5/m, z27.d, z13.d, 0 \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z27.d, z16.d, 0 \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z28.d, z13.d, 0 \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z28.d, z16.d, 0 \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z29.d, z13.d, 0 \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z29.d, z16.d, 0 \n\t" \ | ||||||
|  |     "fcmla z18.d, p5/m, z27.d, z13.d, 90 \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z27.d, z16.d, 90 \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z28.d, z13.d, 90 \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z28.d, z16.d, 90 \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z29.d, z13.d, 90 \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z29.d, z16.d, 90 \n\t" \ | ||||||
|  |     "fcmla z18.d, p5/m, z24.d, z14.d, 0 \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z24.d, z17.d, 0 \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z25.d, z14.d, 0 \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z25.d, z17.d, 0 \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z26.d, z14.d, 0 \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z26.d, z17.d, 0 \n\t" \ | ||||||
|  |     "fcmla z18.d, p5/m, z24.d, z14.d, 90 \n\t" \ | ||||||
|  |     "fcmla z21.d, p5/m, z24.d, z17.d, 90 \n\t" \ | ||||||
|  |     "fcmla z19.d, p5/m, z25.d, z14.d, 90 \n\t" \ | ||||||
|  |     "fcmla z22.d, p5/m, z25.d, z17.d, 90 \n\t" \ | ||||||
|  |     "fcmla z20.d, p5/m, z26.d, z14.d, 90 \n\t" \ | ||||||
|  |     "fcmla z23.d, p5/m, z26.d, z17.d, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XP_PROJ | ||||||
|  | #define XP_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.d, p5/m, z12.d, z21.d, 90 \n\t" \ | ||||||
|  |     "fcadd z13.d, p5/m, z13.d, z22.d, 90 \n\t" \ | ||||||
|  |     "fcadd z14.d, p5/m, z14.d, z23.d, 90 \n\t" \ | ||||||
|  |     "fcadd z15.d, p5/m, z15.d, z18.d, 90 \n\t" \ | ||||||
|  |     "fcadd z16.d, p5/m, z16.d, z19.d, 90 \n\t" \ | ||||||
|  |     "fcadd z17.d, p5/m, z17.d, z20.d, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XP_RECON | ||||||
|  | #define XP_RECON_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "movprfx z6.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \ | ||||||
|  |     "movprfx z7.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \ | ||||||
|  |     "movprfx z8.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \ | ||||||
|  |     "movprfx z9.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \ | ||||||
|  |     "movprfx z10.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \ | ||||||
|  |     "movprfx z11.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \ | ||||||
|  |     "mov z0.d, p5/m, z18.d \n\t" \ | ||||||
|  |     "mov z1.d, p5/m, z19.d \n\t" \ | ||||||
|  |     "mov z2.d, p5/m, z20.d \n\t" \ | ||||||
|  |     "mov z3.d, p5/m, z21.d \n\t" \ | ||||||
|  |     "mov z4.d, p5/m, z22.d \n\t" \ | ||||||
|  |     "mov z5.d, p5/m, z23.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // XP_RECON_ACCUM | ||||||
|  | #define XP_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z18.d, 270 \n\t" \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z19.d, 270 \n\t" \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z20.d, 270 \n\t" \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z21.d, 270 \n\t" \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z22.d, 270 \n\t" \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z23.d, 270 \n\t" \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YP_PROJ | ||||||
|  | #define YP_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fsub z12.d, p5/m, z12.d, z21.d \n\t" \ | ||||||
|  |     "fsub z13.d, p5/m, z13.d, z22.d \n\t" \ | ||||||
|  |     "fsub z14.d, p5/m, z14.d, z23.d \n\t" \ | ||||||
|  |     "fadd z15.d, p5/m, z15.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z16.d, p5/m, z16.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z17.d, p5/m, z17.d, z20.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ZP_PROJ | ||||||
|  | #define ZP_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.d, p5/m, z12.d, z18.d, 90 \n\t" \ | ||||||
|  |     "fcadd z13.d, p5/m, z13.d, z19.d, 90 \n\t" \ | ||||||
|  |     "fcadd z14.d, p5/m, z14.d, z20.d, 90 \n\t" \ | ||||||
|  |     "fcadd z15.d, p5/m, z15.d, z21.d, 270 \n\t" \ | ||||||
|  |     "fcadd z16.d, p5/m, z16.d, z22.d, 270 \n\t" \ | ||||||
|  |     "fcadd z17.d, p5/m, z17.d, z23.d, 270 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // TP_PROJ | ||||||
|  | #define TP_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z12.d, p5/m, z12.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z13.d, p5/m, z13.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z14.d, p5/m, z14.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z15.d, p5/m, z15.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z16.d, p5/m, z16.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z17.d, p5/m, z17.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_PROJ | ||||||
|  | #define XM_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.d, p5/m, z12.d, z21.d, 270 \n\t" \ | ||||||
|  |     "fcadd z13.d, p5/m, z13.d, z22.d, 270 \n\t" \ | ||||||
|  |     "fcadd z14.d, p5/m, z14.d, z23.d, 270 \n\t" \ | ||||||
|  |     "fcadd z15.d, p5/m, z15.d, z18.d, 270 \n\t" \ | ||||||
|  |     "fcadd z16.d, p5/m, z16.d, z19.d, 270 \n\t" \ | ||||||
|  |     "fcadd z17.d, p5/m, z17.d, z20.d, 270 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_RECON | ||||||
|  | #define XM_RECON_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "movprfx z6.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \ | ||||||
|  |     "movprfx z7.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \ | ||||||
|  |     "movprfx z8.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \ | ||||||
|  |     "movprfx z9.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \ | ||||||
|  |     "movprfx z10.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \ | ||||||
|  |     "movprfx z11.d, p5/m, z31.d \n\t" \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \ | ||||||
|  |     "mov z0.d, p5/m, z18.d \n\t" \ | ||||||
|  |     "mov z1.d, p5/m, z19.d \n\t" \ | ||||||
|  |     "mov z2.d, p5/m, z20.d \n\t" \ | ||||||
|  |     "mov z3.d, p5/m, z21.d \n\t" \ | ||||||
|  |     "mov z4.d, p5/m, z22.d \n\t" \ | ||||||
|  |     "mov z5.d, p5/m, z23.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YM_PROJ | ||||||
|  | #define YM_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z12.d, p5/m, z12.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z13.d, p5/m, z13.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z14.d, p5/m, z14.d, z23.d \n\t"  \ | ||||||
|  |     "fsub z15.d, p5/m, z15.d, z18.d \n\t" \ | ||||||
|  |     "fsub z16.d, p5/m, z16.d, z19.d \n\t" \ | ||||||
|  |     "fsub z17.d, p5/m, z17.d, z20.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ZM_PROJ | ||||||
|  | #define ZM_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.d, p5/m, z12.d, z18.d, 270 \n\t" \ | ||||||
|  |     "fcadd z13.d, p5/m, z13.d, z19.d, 270 \n\t" \ | ||||||
|  |     "fcadd z14.d, p5/m, z14.d, z20.d, 270 \n\t" \ | ||||||
|  |     "fcadd z15.d, p5/m, z15.d, z21.d, 90 \n\t" \ | ||||||
|  |     "fcadd z16.d, p5/m, z16.d, z22.d, 90 \n\t" \ | ||||||
|  |     "fcadd z17.d, p5/m, z17.d, z23.d, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // TM_PROJ | ||||||
|  | #define TM_PROJ_A64FXd  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "fsub z12.d, p5/m, z12.d, z18.d \n\t" \ | ||||||
|  |     "fsub z13.d, p5/m, z13.d, z19.d \n\t" \ | ||||||
|  |     "fsub z14.d, p5/m, z14.d, z20.d \n\t" \ | ||||||
|  |     "fsub z15.d, p5/m, z15.d, z21.d \n\t" \ | ||||||
|  |     "fsub z16.d, p5/m, z16.d, z22.d \n\t" \ | ||||||
|  |     "fsub z17.d, p5/m, z17.d, z23.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_RECON_ACCUM | ||||||
|  | #define XM_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z18.d, 90 \n\t" \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z19.d, 90 \n\t" \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z20.d, 90 \n\t" \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z21.d, 90 \n\t" \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z22.d, 90 \n\t" \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z23.d, 90 \n\t" \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YP_RECON_ACCUM | ||||||
|  | #define YP_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fsub z9.d, p5/m, z9.d, z18.d \n\t" \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fsub z10.d, p5/m, z10.d, z19.d \n\t" \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fsub z11.d, p5/m, z11.d, z20.d \n\t" \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z6.d, p5/m, z6.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z7.d, p5/m, z7.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     "fadd z8.d, p5/m, z8.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YM_RECON_ACCUM | ||||||
|  | #define YM_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z9.d, p5/m, z9.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z10.d, p5/m, z10.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z11.d, p5/m, z11.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fsub z6.d, p5/m, z6.d, z21.d \n\t" \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fsub z7.d, p5/m, z7.d, z22.d \n\t" \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     "fsub z8.d, p5/m, z8.d, z23.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZP_RECON_ACCUM | ||||||
|  | #define ZP_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z18.d, 270 \n\t" \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z19.d, 270 \n\t" \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z20.d, 270 \n\t" \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z21.d, 90 \n\t" \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z22.d, 90 \n\t" \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z23.d, 90 \n\t" \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZM_RECON_ACCUM | ||||||
|  | #define ZM_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z6.d, p5/m, z6.d, z18.d, 90 \n\t" \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fcadd z7.d, p5/m, z7.d, z19.d, 90 \n\t" \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fcadd z8.d, p5/m, z8.d, z20.d, 90 \n\t" \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fcadd z9.d, p5/m, z9.d, z21.d, 270 \n\t" \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fcadd z10.d, p5/m, z10.d, z22.d, 270 \n\t" \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fcadd z11.d, p5/m, z11.d, z23.d, 270 \n\t" \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // TP_RECON_ACCUM | ||||||
|  | #define TP_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // TM_RECON_ACCUM | ||||||
|  | #define TM_RECON_ACCUM_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z18.d \n\t"  \ | ||||||
|  |     "fsub z6.d, p5/m, z6.d, z18.d \n\t" \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z19.d \n\t"  \ | ||||||
|  |     "fsub z7.d, p5/m, z7.d, z19.d \n\t" \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z20.d \n\t"  \ | ||||||
|  |     "fsub z8.d, p5/m, z8.d, z20.d \n\t" \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z21.d \n\t"  \ | ||||||
|  |     "fsub z9.d, p5/m, z9.d, z21.d \n\t" \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z22.d \n\t"  \ | ||||||
|  |     "fsub z10.d, p5/m, z10.d, z22.d \n\t" \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z23.d \n\t"  \ | ||||||
|  |     "fsub z11.d, p5/m, z11.d, z23.d \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZERO_PSI | ||||||
|  | #define ZERO_PSI_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.d \n\t" \ | ||||||
|  |     "fmov z0.d , 0 \n\t" \ | ||||||
|  |     "fmov z1.d , 0 \n\t" \ | ||||||
|  |     "fmov z2.d , 0 \n\t" \ | ||||||
|  |     "fmov z3.d , 0 \n\t" \ | ||||||
|  |     "fmov z4.d , 0 \n\t" \ | ||||||
|  |     "fmov z5.d , 0 \n\t" \ | ||||||
|  |     "fmov z6.d , 0 \n\t" \ | ||||||
|  |     "fmov z7.d , 0 \n\t" \ | ||||||
|  |     "fmov z8.d , 0 \n\t" \ | ||||||
|  |     "fmov z9.d , 0 \n\t" \ | ||||||
|  |     "fmov z10.d , 0 \n\t" \ | ||||||
|  |     "fmov z11.d , 0 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // PREFETCH_RESULT_L2_STORE (prefetch store to L2) | ||||||
|  | #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | ||||||
|  | #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ADD_RESULT_INTERNAL | ||||||
|  | #define ADD_RESULT_INTERNAL_A64FXd  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.d, p5/m, z0.d, z12.d \n\t"  \ | ||||||
|  |     "fadd z1.d, p5/m, z1.d, z13.d \n\t"  \ | ||||||
|  |     "fadd z2.d, p5/m, z2.d, z14.d \n\t"  \ | ||||||
|  |     "fadd z3.d, p5/m, z3.d, z15.d \n\t"  \ | ||||||
|  |     "fadd z4.d, p5/m, z4.d, z16.d \n\t"  \ | ||||||
|  |     "fadd z5.d, p5/m, z5.d, z17.d \n\t"  \ | ||||||
|  |     "fadd z6.d, p5/m, z6.d, z18.d \n\t"  \ | ||||||
|  |     "fadd z7.d, p5/m, z7.d, z19.d \n\t"  \ | ||||||
|  |     "fadd z8.d, p5/m, z8.d, z20.d \n\t"  \ | ||||||
|  |     "fadd z9.d, p5/m, z9.d, z21.d \n\t"  \ | ||||||
|  |     "fadd z10.d, p5/m, z10.d, z22.d \n\t"  \ | ||||||
|  |     "fadd z11.d, p5/m, z11.d, z23.d \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
							
								
								
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_single.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										779
									
								
								Grid/simd/Fujitsu_A64FX_asm_single.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,779 @@ | |||||||
|  | /************************************************************************************* | ||||||
|  |  | ||||||
|  |     Grid physics library, www.github.com/paboyle/Grid | ||||||
|  |  | ||||||
|  |     Source file: Fujitsu_A64FX_asm_single.h | ||||||
|  |  | ||||||
|  |     Copyright (C) 2020 | ||||||
|  |  | ||||||
|  | Author: Nils Meyer <nils.meyer@ur.de> | ||||||
|  |  | ||||||
|  |     This program is free software; you can redistribute it and/or modify | ||||||
|  |     it under the terms of the GNU General Public License as published by | ||||||
|  |     the Free Software Foundation; either version 2 of the License, or | ||||||
|  |     (at your option) any later version. | ||||||
|  |  | ||||||
|  |     This program is distributed in the hope that it will be useful, | ||||||
|  |     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|  |     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|  |     GNU General Public License for more details. | ||||||
|  |  | ||||||
|  |     You should have received a copy of the GNU General Public License along | ||||||
|  |     with this program; if not, write to the Free Software Foundation, Inc., | ||||||
|  |     51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||||||
|  |  | ||||||
|  |     See the full license in the file "LICENSE" in the top level distribution directory | ||||||
|  | *************************************************************************************/ | ||||||
|  | /*  END LEGAL */ | ||||||
|  | #define LOAD_CHIMU(base)               LOAD_CHIMU_INTERLEAVED_A64FXf(base)   | ||||||
|  | #define PREFETCH_CHIMU_L1(A)           PREFETCH_CHIMU_L1_INTERNAL_A64FXf(A)   | ||||||
|  | #define PREFETCH_GAUGE_L1(A)           PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)   | ||||||
|  | #define PREFETCH_CHIMU_L2(A)           PREFETCH_CHIMU_L2_INTERNAL_A64FXf(A)   | ||||||
|  | #define PREFETCH_GAUGE_L2(A)           PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)   | ||||||
|  | #define PF_GAUGE(A)   | ||||||
|  | #define PREFETCH_RESULT_L2_STORE(A)    PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(A)   | ||||||
|  | #define PREFETCH_RESULT_L1_STORE(A)    PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(A)   | ||||||
|  | #define PREFETCH1_CHIMU(A)             PREFETCH_CHIMU_L1(A)   | ||||||
|  | #define PREFETCH_CHIMU(A)              PREFETCH_CHIMU_L1(A)   | ||||||
|  | #define LOCK_GAUGE(A)   | ||||||
|  | #define UNLOCK_GAUGE(A)   | ||||||
|  | #define MASK_REGS                      DECLARATIONS_A64FXf   | ||||||
|  | #define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)   | ||||||
|  | #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)   | ||||||
|  | #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf   | ||||||
|  | #define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)   | ||||||
|  | #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)   | ||||||
|  | #define XP_PROJ                        XP_PROJ_A64FXf   | ||||||
|  | #define YP_PROJ                        YP_PROJ_A64FXf   | ||||||
|  | #define ZP_PROJ                        ZP_PROJ_A64FXf   | ||||||
|  | #define TP_PROJ                        TP_PROJ_A64FXf   | ||||||
|  | #define XM_PROJ                        XM_PROJ_A64FXf   | ||||||
|  | #define YM_PROJ                        YM_PROJ_A64FXf   | ||||||
|  | #define ZM_PROJ                        ZM_PROJ_A64FXf   | ||||||
|  | #define TM_PROJ                        TM_PROJ_A64FXf   | ||||||
|  | #define XP_RECON                       XP_RECON_A64FXf   | ||||||
|  | #define XM_RECON                       XM_RECON_A64FXf   | ||||||
|  | #define XM_RECON_ACCUM                 XM_RECON_ACCUM_A64FXf   | ||||||
|  | #define YM_RECON_ACCUM                 YM_RECON_ACCUM_A64FXf   | ||||||
|  | #define ZM_RECON_ACCUM                 ZM_RECON_ACCUM_A64FXf   | ||||||
|  | #define TM_RECON_ACCUM                 TM_RECON_ACCUM_A64FXf   | ||||||
|  | #define XP_RECON_ACCUM                 XP_RECON_ACCUM_A64FXf   | ||||||
|  | #define YP_RECON_ACCUM                 YP_RECON_ACCUM_A64FXf   | ||||||
|  | #define ZP_RECON_ACCUM                 ZP_RECON_ACCUM_A64FXf   | ||||||
|  | #define TP_RECON_ACCUM                 TP_RECON_ACCUM_A64FXf   | ||||||
|  | #define PERMUTE_DIR0                   0   | ||||||
|  | #define PERMUTE_DIR1                   1   | ||||||
|  | #define PERMUTE_DIR2                   2   | ||||||
|  | #define PERMUTE_DIR3                   3   | ||||||
|  | #define PERMUTE                        PERMUTE_A64FXf;   | ||||||
|  | #define LOAD_TABLE(Dir)                if (Dir == 0) { LOAD_TABLE0; } else if (Dir == 1) { LOAD_TABLE1 } else if (Dir == 2) { LOAD_TABLE2; } else if (Dir == 3) { LOAD_TABLE3; }   | ||||||
|  | #define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }   | ||||||
|  | // DECLARATIONS | ||||||
|  | #define DECLARATIONS_A64FXf  \ | ||||||
|  |     const uint32_t lut[4][16] = { \ | ||||||
|  |         {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \ | ||||||
|  |         {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \ | ||||||
|  |         {2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \ | ||||||
|  |         {1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \ | ||||||
|  | asm ( \ | ||||||
|  |     "fmov z31.s , 0 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // RESULT | ||||||
|  | #define RESULT_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "str z0, [%[storeptr], -6, mul vl] \n\t" \ | ||||||
|  |     "str z1, [%[storeptr], -5, mul vl] \n\t" \ | ||||||
|  |     "str z2, [%[storeptr], -4, mul vl] \n\t" \ | ||||||
|  |     "str z3, [%[storeptr], -3, mul vl] \n\t" \ | ||||||
|  |     "str z4, [%[storeptr], -2, mul vl] \n\t" \ | ||||||
|  |     "str z5, [%[storeptr], -1, mul vl] \n\t" \ | ||||||
|  |     "str z6, [%[storeptr], 0, mul vl] \n\t" \ | ||||||
|  |     "str z7, [%[storeptr], 1, mul vl] \n\t" \ | ||||||
|  |     "str z8, [%[storeptr], 2, mul vl] \n\t" \ | ||||||
|  |     "str z9, [%[storeptr], 3, mul vl] \n\t" \ | ||||||
|  |     "str z10, [%[storeptr], 4, mul vl] \n\t" \ | ||||||
|  |     "str z11, [%[storeptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [storeptr] "r" (base + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_CHIMU_L2 (prefetch to L2) | ||||||
|  | #define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_CHIMU_L1 (prefetch to L1) | ||||||
|  | #define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_GAUGE_L2 (prefetch to L2) | ||||||
|  | #define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 12, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 16, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 20, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 24, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL2STRM, p5, [%[fetchptr], 28, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_GAUGE_L1 (prefetch to L1) | ||||||
|  | #define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PLDL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHI | ||||||
|  | #define LOAD_CHI_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z12, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU | ||||||
|  | #define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU_0213 | ||||||
|  | #define LOAD_CHIMU_0213_A64FXf  \ | ||||||
|  | { \ | ||||||
|  |     const SiteSpinor & ref(in[offset]); \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (&ref[2][0]) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_CHIMU_0312 | ||||||
|  | #define LOAD_CHIMU_0312_A64FXf  \ | ||||||
|  | { \ | ||||||
|  |     const SiteSpinor & ref(in[offset]); \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "ldr z12, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z21, [%[fetchptr], 3, mul vl] \n\t" \ | ||||||
|  |     "ldr z13, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z22, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "ldr z14, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z23, [%[fetchptr], 5, mul vl] \n\t" \ | ||||||
|  |     "ldr z15, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z18, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z16, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z19, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "ldr z17, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z20, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (&ref[2][0]) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // LOAD_TABLE0 | ||||||
|  | #define LOAD_TABLE0  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (0) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE1 | ||||||
|  | #define LOAD_TABLE1  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (1) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE2 | ||||||
|  | #define LOAD_TABLE2  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (2) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_TABLE3 | ||||||
|  | #define LOAD_TABLE3  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z30, [%[tableptr], %[index], mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [tableptr] "r" (&lut[0]),[index] "i" (3) \ | ||||||
|  |     : "memory","cc","p5","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // PERMUTE | ||||||
|  | #define PERMUTE_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "tbl z12.s, { z12.s }, z30.s \n\t"  \ | ||||||
|  |     "tbl z13.s, { z13.s }, z30.s \n\t"  \ | ||||||
|  |     "tbl z14.s, { z14.s }, z30.s \n\t"  \ | ||||||
|  |     "tbl z15.s, { z15.s }, z30.s \n\t"  \ | ||||||
|  |     "tbl z16.s, { z16.s }, z30.s \n\t"  \ | ||||||
|  |     "tbl z17.s, { z17.s }, z30.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // LOAD_GAUGE | ||||||
|  | #define LOAD_GAUGE  \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // MULT_2SPIN | ||||||
|  | #define MULT_2SPIN_1_A64FXf(A)  \ | ||||||
|  | { \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|  | asm ( \ | ||||||
|  |     "ldr z24, [%[fetchptr], -6, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -3, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "ldr z27, [%[fetchptr], -5, mul vl] \n\t" \ | ||||||
|  |     "ldr z28, [%[fetchptr], -2, mul vl] \n\t" \ | ||||||
|  |     "ldr z29, [%[fetchptr], 1, mul vl] \n\t" \ | ||||||
|  |     "movprfx z18.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z18.s, p5/m, z24.s, z12.s, 0 \n\t" \ | ||||||
|  |     "movprfx z21.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z24.s, z15.s, 0 \n\t" \ | ||||||
|  |     "movprfx z19.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z25.s, z12.s, 0 \n\t" \ | ||||||
|  |     "movprfx z22.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z25.s, z15.s, 0 \n\t" \ | ||||||
|  |     "movprfx z20.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z26.s, z12.s, 0 \n\t" \ | ||||||
|  |     "movprfx z23.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z26.s, z15.s, 0 \n\t" \ | ||||||
|  |     "fcmla z18.s, p5/m, z24.s, z12.s, 90 \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z24.s, z15.s, 90 \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z25.s, z12.s, 90 \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z25.s, z15.s, 90 \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z26.s, z12.s, 90 \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z26.s, z15.s, 90 \n\t" \ | ||||||
|  |     "ldr z24, [%[fetchptr], -4, mul vl] \n\t" \ | ||||||
|  |     "ldr z25, [%[fetchptr], -1, mul vl] \n\t" \ | ||||||
|  |     "ldr z26, [%[fetchptr], 2, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (baseU + 2 * 3 * 64) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // MULT_2SPIN_BACKEND | ||||||
|  | #define MULT_2SPIN_2_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcmla z18.s, p5/m, z27.s, z13.s, 0 \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z27.s, z16.s, 0 \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z28.s, z13.s, 0 \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z28.s, z16.s, 0 \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z29.s, z13.s, 0 \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z29.s, z16.s, 0 \n\t" \ | ||||||
|  |     "fcmla z18.s, p5/m, z27.s, z13.s, 90 \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z27.s, z16.s, 90 \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z28.s, z13.s, 90 \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z28.s, z16.s, 90 \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z29.s, z13.s, 90 \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z29.s, z16.s, 90 \n\t" \ | ||||||
|  |     "fcmla z18.s, p5/m, z24.s, z14.s, 0 \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z24.s, z17.s, 0 \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z25.s, z14.s, 0 \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z25.s, z17.s, 0 \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z26.s, z14.s, 0 \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z26.s, z17.s, 0 \n\t" \ | ||||||
|  |     "fcmla z18.s, p5/m, z24.s, z14.s, 90 \n\t" \ | ||||||
|  |     "fcmla z21.s, p5/m, z24.s, z17.s, 90 \n\t" \ | ||||||
|  |     "fcmla z19.s, p5/m, z25.s, z14.s, 90 \n\t" \ | ||||||
|  |     "fcmla z22.s, p5/m, z25.s, z17.s, 90 \n\t" \ | ||||||
|  |     "fcmla z20.s, p5/m, z26.s, z14.s, 90 \n\t" \ | ||||||
|  |     "fcmla z23.s, p5/m, z26.s, z17.s, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XP_PROJ | ||||||
|  | #define XP_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.s, p5/m, z12.s, z21.s, 90 \n\t" \ | ||||||
|  |     "fcadd z13.s, p5/m, z13.s, z22.s, 90 \n\t" \ | ||||||
|  |     "fcadd z14.s, p5/m, z14.s, z23.s, 90 \n\t" \ | ||||||
|  |     "fcadd z15.s, p5/m, z15.s, z18.s, 90 \n\t" \ | ||||||
|  |     "fcadd z16.s, p5/m, z16.s, z19.s, 90 \n\t" \ | ||||||
|  |     "fcadd z17.s, p5/m, z17.s, z20.s, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XP_RECON | ||||||
|  | #define XP_RECON_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "movprfx z6.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \ | ||||||
|  |     "movprfx z7.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \ | ||||||
|  |     "movprfx z8.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \ | ||||||
|  |     "movprfx z9.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \ | ||||||
|  |     "movprfx z10.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \ | ||||||
|  |     "movprfx z11.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \ | ||||||
|  |     "mov z0.s, p5/m, z18.s \n\t" \ | ||||||
|  |     "mov z1.s, p5/m, z19.s \n\t" \ | ||||||
|  |     "mov z2.s, p5/m, z20.s \n\t" \ | ||||||
|  |     "mov z3.s, p5/m, z21.s \n\t" \ | ||||||
|  |     "mov z4.s, p5/m, z22.s \n\t" \ | ||||||
|  |     "mov z5.s, p5/m, z23.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // XP_RECON_ACCUM | ||||||
|  | #define XP_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z18.s, 270 \n\t" \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z19.s, 270 \n\t" \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z20.s, 270 \n\t" \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z21.s, 270 \n\t" \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z22.s, 270 \n\t" \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z23.s, 270 \n\t" \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YP_PROJ | ||||||
|  | #define YP_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fsub z12.s, p5/m, z12.s, z21.s \n\t" \ | ||||||
|  |     "fsub z13.s, p5/m, z13.s, z22.s \n\t" \ | ||||||
|  |     "fsub z14.s, p5/m, z14.s, z23.s \n\t" \ | ||||||
|  |     "fadd z15.s, p5/m, z15.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z16.s, p5/m, z16.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z17.s, p5/m, z17.s, z20.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ZP_PROJ | ||||||
|  | #define ZP_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.s, p5/m, z12.s, z18.s, 90 \n\t" \ | ||||||
|  |     "fcadd z13.s, p5/m, z13.s, z19.s, 90 \n\t" \ | ||||||
|  |     "fcadd z14.s, p5/m, z14.s, z20.s, 90 \n\t" \ | ||||||
|  |     "fcadd z15.s, p5/m, z15.s, z21.s, 270 \n\t" \ | ||||||
|  |     "fcadd z16.s, p5/m, z16.s, z22.s, 270 \n\t" \ | ||||||
|  |     "fcadd z17.s, p5/m, z17.s, z23.s, 270 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // TP_PROJ | ||||||
|  | #define TP_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z12.s, p5/m, z12.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z13.s, p5/m, z13.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z14.s, p5/m, z14.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z15.s, p5/m, z15.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z16.s, p5/m, z16.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z17.s, p5/m, z17.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_PROJ | ||||||
|  | #define XM_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.s, p5/m, z12.s, z21.s, 270 \n\t" \ | ||||||
|  |     "fcadd z13.s, p5/m, z13.s, z22.s, 270 \n\t" \ | ||||||
|  |     "fcadd z14.s, p5/m, z14.s, z23.s, 270 \n\t" \ | ||||||
|  |     "fcadd z15.s, p5/m, z15.s, z18.s, 270 \n\t" \ | ||||||
|  |     "fcadd z16.s, p5/m, z16.s, z19.s, 270 \n\t" \ | ||||||
|  |     "fcadd z17.s, p5/m, z17.s, z20.s, 270 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_RECON | ||||||
|  | #define XM_RECON_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "movprfx z6.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \ | ||||||
|  |     "movprfx z7.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \ | ||||||
|  |     "movprfx z8.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \ | ||||||
|  |     "movprfx z9.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \ | ||||||
|  |     "movprfx z10.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \ | ||||||
|  |     "movprfx z11.s, p5/m, z31.s \n\t" \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \ | ||||||
|  |     "mov z0.s, p5/m, z18.s \n\t" \ | ||||||
|  |     "mov z1.s, p5/m, z19.s \n\t" \ | ||||||
|  |     "mov z2.s, p5/m, z20.s \n\t" \ | ||||||
|  |     "mov z3.s, p5/m, z21.s \n\t" \ | ||||||
|  |     "mov z4.s, p5/m, z22.s \n\t" \ | ||||||
|  |     "mov z5.s, p5/m, z23.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YM_PROJ | ||||||
|  | #define YM_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z12.s, p5/m, z12.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z13.s, p5/m, z13.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z14.s, p5/m, z14.s, z23.s \n\t"  \ | ||||||
|  |     "fsub z15.s, p5/m, z15.s, z18.s \n\t" \ | ||||||
|  |     "fsub z16.s, p5/m, z16.s, z19.s \n\t" \ | ||||||
|  |     "fsub z17.s, p5/m, z17.s, z20.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ZM_PROJ | ||||||
|  | #define ZM_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z12.s, p5/m, z12.s, z18.s, 270 \n\t" \ | ||||||
|  |     "fcadd z13.s, p5/m, z13.s, z19.s, 270 \n\t" \ | ||||||
|  |     "fcadd z14.s, p5/m, z14.s, z20.s, 270 \n\t" \ | ||||||
|  |     "fcadd z15.s, p5/m, z15.s, z21.s, 90 \n\t" \ | ||||||
|  |     "fcadd z16.s, p5/m, z16.s, z22.s, 90 \n\t" \ | ||||||
|  |     "fcadd z17.s, p5/m, z17.s, z23.s, 90 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // TM_PROJ | ||||||
|  | #define TM_PROJ_A64FXf  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "fsub z12.s, p5/m, z12.s, z18.s \n\t" \ | ||||||
|  |     "fsub z13.s, p5/m, z13.s, z19.s \n\t" \ | ||||||
|  |     "fsub z14.s, p5/m, z14.s, z20.s \n\t" \ | ||||||
|  |     "fsub z15.s, p5/m, z15.s, z21.s \n\t" \ | ||||||
|  |     "fsub z16.s, p5/m, z16.s, z22.s \n\t" \ | ||||||
|  |     "fsub z17.s, p5/m, z17.s, z23.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // XM_RECON_ACCUM | ||||||
|  | #define XM_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z18.s, 90 \n\t" \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z19.s, 90 \n\t" \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z20.s, 90 \n\t" \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z21.s, 90 \n\t" \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z22.s, 90 \n\t" \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z23.s, 90 \n\t" \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YP_RECON_ACCUM | ||||||
|  | #define YP_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fsub z9.s, p5/m, z9.s, z18.s \n\t" \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fsub z10.s, p5/m, z10.s, z19.s \n\t" \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fsub z11.s, p5/m, z11.s, z20.s \n\t" \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z6.s, p5/m, z6.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z7.s, p5/m, z7.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     "fadd z8.s, p5/m, z8.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // YM_RECON_ACCUM | ||||||
|  | #define YM_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z9.s, p5/m, z9.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z10.s, p5/m, z10.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z11.s, p5/m, z11.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fsub z6.s, p5/m, z6.s, z21.s \n\t" \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fsub z7.s, p5/m, z7.s, z22.s \n\t" \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     "fsub z8.s, p5/m, z8.s, z23.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZP_RECON_ACCUM | ||||||
|  | #define ZP_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z18.s, 270 \n\t" \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z19.s, 270 \n\t" \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z20.s, 270 \n\t" \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z21.s, 90 \n\t" \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z22.s, 90 \n\t" \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z23.s, 90 \n\t" \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZM_RECON_ACCUM | ||||||
|  | #define ZM_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fcadd z6.s, p5/m, z6.s, z18.s, 90 \n\t" \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fcadd z7.s, p5/m, z7.s, z19.s, 90 \n\t" \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fcadd z8.s, p5/m, z8.s, z20.s, 90 \n\t" \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fcadd z9.s, p5/m, z9.s, z21.s, 270 \n\t" \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fcadd z10.s, p5/m, z10.s, z22.s, 270 \n\t" \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fcadd z11.s, p5/m, z11.s, z23.s, 270 \n\t" \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // TP_RECON_ACCUM | ||||||
|  | #define TP_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // TM_RECON_ACCUM | ||||||
|  | #define TM_RECON_ACCUM_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z18.s \n\t"  \ | ||||||
|  |     "fsub z6.s, p5/m, z6.s, z18.s \n\t" \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z19.s \n\t"  \ | ||||||
|  |     "fsub z7.s, p5/m, z7.s, z19.s \n\t" \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z20.s \n\t"  \ | ||||||
|  |     "fsub z8.s, p5/m, z8.s, z20.s \n\t" \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z21.s \n\t"  \ | ||||||
|  |     "fsub z9.s, p5/m, z9.s, z21.s \n\t" \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z22.s \n\t"  \ | ||||||
|  |     "fsub z10.s, p5/m, z10.s, z22.s \n\t" \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z23.s \n\t"  \ | ||||||
|  |     "fsub z11.s, p5/m, z11.s, z23.s \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // ZERO_PSI | ||||||
|  | #define ZERO_PSI_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "ptrue p5.s \n\t" \ | ||||||
|  |     "fmov z0.s , 0 \n\t" \ | ||||||
|  |     "fmov z1.s , 0 \n\t" \ | ||||||
|  |     "fmov z2.s , 0 \n\t" \ | ||||||
|  |     "fmov z3.s , 0 \n\t" \ | ||||||
|  |     "fmov z4.s , 0 \n\t" \ | ||||||
|  |     "fmov z5.s , 0 \n\t" \ | ||||||
|  |     "fmov z6.s , 0 \n\t" \ | ||||||
|  |     "fmov z7.s , 0 \n\t" \ | ||||||
|  |     "fmov z8.s , 0 \n\t" \ | ||||||
|  |     "fmov z9.s , 0 \n\t" \ | ||||||
|  |     "fmov z10.s , 0 \n\t" \ | ||||||
|  |     "fmov z11.s , 0 \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
|  | // PREFETCH_RESULT_L2_STORE (prefetch store to L2) | ||||||
|  | #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | ||||||
|  | #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \ | ||||||
|  | { \ | ||||||
|  | asm ( \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \ | ||||||
|  |     "prfd PSTL1STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \ | ||||||
|  |     :  \ | ||||||
|  |     : [fetchptr] "r" (base) \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \ | ||||||
|  | ); \ | ||||||
|  | } | ||||||
|  | // ADD_RESULT_INTERNAL | ||||||
|  | #define ADD_RESULT_INTERNAL_A64FXf  \ | ||||||
|  | asm ( \ | ||||||
|  |     "fadd z0.s, p5/m, z0.s, z12.s \n\t"  \ | ||||||
|  |     "fadd z1.s, p5/m, z1.s, z13.s \n\t"  \ | ||||||
|  |     "fadd z2.s, p5/m, z2.s, z14.s \n\t"  \ | ||||||
|  |     "fadd z3.s, p5/m, z3.s, z15.s \n\t"  \ | ||||||
|  |     "fadd z4.s, p5/m, z4.s, z16.s \n\t"  \ | ||||||
|  |     "fadd z5.s, p5/m, z5.s, z17.s \n\t"  \ | ||||||
|  |     "fadd z6.s, p5/m, z6.s, z18.s \n\t"  \ | ||||||
|  |     "fadd z7.s, p5/m, z7.s, z19.s \n\t"  \ | ||||||
|  |     "fadd z8.s, p5/m, z8.s, z20.s \n\t"  \ | ||||||
|  |     "fadd z9.s, p5/m, z9.s, z21.s \n\t"  \ | ||||||
|  |     "fadd z10.s, p5/m, z10.s, z22.s \n\t"  \ | ||||||
|  |     "fadd z11.s, p5/m, z11.s, z23.s \n\t"  \ | ||||||
|  |     :  \ | ||||||
|  |     :  \ | ||||||
|  |     : "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \ | ||||||
|  | );  | ||||||
|  |  | ||||||
| @@ -38,11 +38,10 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| #define LOCK_GAUGE(A)   | #define LOCK_GAUGE(A)   | ||||||
| #define UNLOCK_GAUGE(A)   | #define UNLOCK_GAUGE(A)   | ||||||
| #define MASK_REGS                      DECLARATIONS_A64FXd   | #define MASK_REGS                      DECLARATIONS_A64FXd   | ||||||
| #define SAVE_RESULT(A,B)               RESULT_A64FXd(A);   | #define SAVE_RESULT(A,B)               RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)   | ||||||
| #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)   | #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXd(Dir)   | ||||||
| #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd   | #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXd   | ||||||
| #define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)   | #define LOAD_CHI(base)                 LOAD_CHI_A64FXd(base)   | ||||||
| #define ZERO_PSI                       ZERO_PSI_A64FXd   |  | ||||||
| #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)   | #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)   | ||||||
| #define XP_PROJ                        XP_PROJ_A64FXd   | #define XP_PROJ                        XP_PROJ_A64FXd   | ||||||
| #define YP_PROJ                        YP_PROJ_A64FXd   | #define YP_PROJ                        YP_PROJ_A64FXd   | ||||||
| @@ -71,7 +70,6 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| #define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }   | #define MAYBEPERM(Dir,perm)            if (Dir != 3) { if (perm) { PERMUTE; } }   | ||||||
| // DECLARATIONS | // DECLARATIONS | ||||||
| #define DECLARATIONS_A64FXd  \ | #define DECLARATIONS_A64FXd  \ | ||||||
|     uint64_t baseU; \ |  | ||||||
|     const uint64_t lut[4][8] = { \ |     const uint64_t lut[4][8] = { \ | ||||||
|         {4, 5, 6, 7, 0, 1, 2, 3}, \ |         {4, 5, 6, 7, 0, 1, 2, 3}, \ | ||||||
|         {2, 3, 0, 1, 6, 7, 4, 5}, \ |         {2, 3, 0, 1, 6, 7, 4, 5}, \ | ||||||
| @@ -128,114 +126,114 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| // RESULT | // RESULT | ||||||
| #define RESULT_A64FXd(base)  \ | #define RESULT_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \ | ||||||
|     svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32);  \ |     svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \ | ||||||
| } | } | ||||||
| // PREFETCH_CHIMU_L2 (prefetch to L2) | // PREFETCH_CHIMU_L2 (prefetch to L2) | ||||||
| #define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \ | #define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_CHIMU_L1 (prefetch to L1) | // PREFETCH_CHIMU_L1 (prefetch to L1) | ||||||
| #define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \ | #define PREFETCH_CHIMU_L1_INTERNAL_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_GAUGE_L2 (prefetch to L2) | // PREFETCH_GAUGE_L2 (prefetch to L2) | ||||||
| #define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \ | #define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \ |     const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_GAUGE_L1 (prefetch to L1) | // PREFETCH_GAUGE_L1 (prefetch to L1) | ||||||
| #define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \ | #define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \ | ||||||
| } | } | ||||||
| // LOAD_CHI | // LOAD_CHI | ||||||
| #define LOAD_CHI_A64FXd(base)  \ | #define LOAD_CHI_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     Chi_00 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(0));  \ |     Chi_00 = svld1(pg1, (float64_t*)(base + 0 * 64));  \ | ||||||
|     Chi_01 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(1));  \ |     Chi_01 = svld1(pg1, (float64_t*)(base + 1 * 64));  \ | ||||||
|     Chi_02 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(2));  \ |     Chi_02 = svld1(pg1, (float64_t*)(base + 2 * 64));  \ | ||||||
|     Chi_10 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(3));  \ |     Chi_10 = svld1(pg1, (float64_t*)(base + 3 * 64));  \ | ||||||
|     Chi_11 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(4));  \ |     Chi_11 = svld1(pg1, (float64_t*)(base + 4 * 64));  \ | ||||||
|     Chi_12 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(5));  \ |     Chi_12 = svld1(pg1, (float64_t*)(base + 5 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU | // LOAD_CHIMU | ||||||
| #define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \ | #define LOAD_CHIMU_INTERLEAVED_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU_0213 | // LOAD_CHIMU_0213 | ||||||
| #define LOAD_CHIMU_0213_A64FXd  \ | #define LOAD_CHIMU_0213_A64FXd  \ | ||||||
| { \ | { \ | ||||||
|     const SiteSpinor & ref(in[offset]); \ |     const SiteSpinor & ref(in[offset]); \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU_0312 | // LOAD_CHIMU_0312 | ||||||
| #define LOAD_CHIMU_0312_A64FXd  \ | #define LOAD_CHIMU_0312_A64FXd  \ | ||||||
| { \ | { \ | ||||||
|     const SiteSpinor & ref(in[offset]); \ |     const SiteSpinor & ref(in[offset]); \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_TABLE0 | // LOAD_TABLE0 | ||||||
| #define LOAD_TABLE0  \ | #define LOAD_TABLE0  \ | ||||||
| @@ -263,26 +261,26 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     Chi_12 = svtbl(Chi_12, table0);     |     Chi_12 = svtbl(Chi_12, table0);     | ||||||
|  |  | ||||||
| // LOAD_GAUGE | // LOAD_GAUGE | ||||||
| #define LOAD_GAUGE(A)  \ | #define LOAD_GAUGE  \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \ |     U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \ |     U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \ |     U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \ |     U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \ |     U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \ |  | ||||||
| } | } | ||||||
| // MULT_2SPIN | // MULT_2SPIN | ||||||
| #define MULT_2SPIN_1_A64FXd(A)  \ | #define MULT_2SPIN_1_A64FXd(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|     U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \ |     U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \ |     U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \ |     U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \ |     U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \ |     U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \ |     U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \ |     UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \ | ||||||
|     UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \ |     UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \ | ||||||
|     UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \ |     UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \ | ||||||
| @@ -295,9 +293,9 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \ |     UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \ | ||||||
|     UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \ |     UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \ | ||||||
|     UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \ |     UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \ | ||||||
|     U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-4));  \ |     U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-1));  \ |     U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(2));  \ |     U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // MULT_2SPIN_BACKEND | // MULT_2SPIN_BACKEND | ||||||
| #define MULT_2SPIN_2_A64FXd  \ | #define MULT_2SPIN_2_A64FXd  \ | ||||||
| @@ -572,12 +570,12 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     result_31 = svdup_f64(0.); \ |     result_31 = svdup_f64(0.); \ | ||||||
|     result_32 = svdup_f64(0.);  |     result_32 = svdup_f64(0.);  | ||||||
|  |  | ||||||
| // PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing) | // PREFETCH_RESULT_L2_STORE (prefetch store to L2) | ||||||
| #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \ | #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base)  \ | ||||||
| { \ | { \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | ||||||
| #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \ | #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base)  \ | ||||||
|   | |||||||
| @@ -38,11 +38,10 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| #define LOCK_GAUGE(A)   | #define LOCK_GAUGE(A)   | ||||||
| #define UNLOCK_GAUGE(A)   | #define UNLOCK_GAUGE(A)   | ||||||
| #define MASK_REGS                      DECLARATIONS_A64FXf   | #define MASK_REGS                      DECLARATIONS_A64FXf   | ||||||
| #define SAVE_RESULT(A,B)               RESULT_A64FXf(A);   | #define SAVE_RESULT(A,B)               RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)   | ||||||
| #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)   | #define MULT_2SPIN_1(Dir)              MULT_2SPIN_1_A64FXf(Dir)   | ||||||
| #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf   | #define MULT_2SPIN_2                   MULT_2SPIN_2_A64FXf   | ||||||
| #define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)   | #define LOAD_CHI(base)                 LOAD_CHI_A64FXf(base)   | ||||||
| #define ZERO_PSI                       ZERO_PSI_A64FXf   |  | ||||||
| #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)   | #define ADD_RESULT(base,basep)         LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)   | ||||||
| #define XP_PROJ                        XP_PROJ_A64FXf   | #define XP_PROJ                        XP_PROJ_A64FXf   | ||||||
| #define YP_PROJ                        YP_PROJ_A64FXf   | #define YP_PROJ                        YP_PROJ_A64FXf   | ||||||
| @@ -71,7 +70,6 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| #define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }   | #define MAYBEPERM(A,perm)              if (perm) { PERMUTE; }   | ||||||
| // DECLARATIONS | // DECLARATIONS | ||||||
| #define DECLARATIONS_A64FXf  \ | #define DECLARATIONS_A64FXf  \ | ||||||
|     uint64_t baseU; \ |  | ||||||
|     const uint32_t lut[4][16] = { \ |     const uint32_t lut[4][16] = { \ | ||||||
|         {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \ |         {8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \ | ||||||
|         {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \ |         {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \ | ||||||
| @@ -128,114 +126,114 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| // RESULT | // RESULT | ||||||
| #define RESULT_A64FXf(base)  \ | #define RESULT_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64), result_00);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64), result_01);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64), result_02);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64), result_10);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64), result_11);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64), result_12);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64), result_20);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64), result_21);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64), result_22);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64), result_30);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64), result_31);  \ | ||||||
|     svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32);  \ |     svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64), result_32);  \ | ||||||
| } | } | ||||||
| // PREFETCH_CHIMU_L2 (prefetch to L2) | // PREFETCH_CHIMU_L2 (prefetch to L2) | ||||||
| #define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \ | #define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PLDL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_CHIMU_L1 (prefetch to L1) | // PREFETCH_CHIMU_L1 (prefetch to L1) | ||||||
| #define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \ | #define PREFETCH_CHIMU_L1_INTERNAL_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(0), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(4), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(base), (int64_t)(8), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PLDL1STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_GAUGE_L2 (prefetch to L2) | // PREFETCH_GAUGE_L2 (prefetch to L2) | ||||||
| #define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \ | #define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \ |     const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(-4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(12), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 768), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(16), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1024), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(20), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1280), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(24), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1536), SV_PLDL2STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(28), SV_PLDL2STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 1792), SV_PLDL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_GAUGE_L1 (prefetch to L1) | // PREFETCH_GAUGE_L1 (prefetch to L1) | ||||||
| #define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \ | #define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(0), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(4), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \ | ||||||
|     svprfd_vnum(pg1, (void*)(baseU), (int64_t)(8), SV_PLDL1STRM); \ |     svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \ | ||||||
| } | } | ||||||
| // LOAD_CHI | // LOAD_CHI | ||||||
| #define LOAD_CHI_A64FXf(base)  \ | #define LOAD_CHI_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     Chi_00 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(0));  \ |     Chi_00 = svld1(pg1, (float32_t*)(base + 0 * 64));  \ | ||||||
|     Chi_01 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(1));  \ |     Chi_01 = svld1(pg1, (float32_t*)(base + 1 * 64));  \ | ||||||
|     Chi_02 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(2));  \ |     Chi_02 = svld1(pg1, (float32_t*)(base + 2 * 64));  \ | ||||||
|     Chi_10 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(3));  \ |     Chi_10 = svld1(pg1, (float32_t*)(base + 3 * 64));  \ | ||||||
|     Chi_11 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(4));  \ |     Chi_11 = svld1(pg1, (float32_t*)(base + 4 * 64));  \ | ||||||
|     Chi_12 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(5));  \ |     Chi_12 = svld1(pg1, (float32_t*)(base + 5 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU | // LOAD_CHIMU | ||||||
| #define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \ | #define LOAD_CHIMU_INTERLEAVED_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU_0213 | // LOAD_CHIMU_0213 | ||||||
| #define LOAD_CHIMU_0213_A64FXf  \ | #define LOAD_CHIMU_0213_A64FXf  \ | ||||||
| { \ | { \ | ||||||
|     const SiteSpinor & ref(in[offset]); \ |     const SiteSpinor & ref(in[offset]); \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_CHIMU_0312 | // LOAD_CHIMU_0312 | ||||||
| #define LOAD_CHIMU_0312_A64FXf  \ | #define LOAD_CHIMU_0312_A64FXf  \ | ||||||
| { \ | { \ | ||||||
|     const SiteSpinor & ref(in[offset]); \ |     const SiteSpinor & ref(in[offset]); \ | ||||||
|     Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6));  \ |     Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3));  \ |     Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64));  \ | ||||||
|     Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5));  \ |     Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4));  \ |     Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64));  \ | ||||||
|     Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4));  \ |     Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5));  \ |     Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64));  \ | ||||||
|     Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3));  \ |     Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0));  \ |     Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2));  \ |     Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1));  \ |     Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1));  \ |     Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2));  \ |     Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // LOAD_TABLE0 | // LOAD_TABLE0 | ||||||
| #define LOAD_TABLE0  \ | #define LOAD_TABLE0  \ | ||||||
| @@ -263,26 +261,26 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     Chi_12 = svtbl(Chi_12, table0);     |     Chi_12 = svtbl(Chi_12, table0);     | ||||||
|  |  | ||||||
| // LOAD_GAUGE | // LOAD_GAUGE | ||||||
| #define LOAD_GAUGE(A)  \ | #define LOAD_GAUGE  \ | ||||||
|  |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \ |     U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \ |     U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \ |     U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \ |     U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \ |     U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \ |  | ||||||
| } | } | ||||||
| // MULT_2SPIN | // MULT_2SPIN | ||||||
| #define MULT_2SPIN_1_A64FXf(A)  \ | #define MULT_2SPIN_1_A64FXf(A)  \ | ||||||
| { \ | { \ | ||||||
|     const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \ |     const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \ | ||||||
|     U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6));  \ |     U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3));  \ |     U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0));  \ |     U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64));  \ | ||||||
|     U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5));  \ |     U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64));  \ | ||||||
|     U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2));  \ |     U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64));  \ | ||||||
|     U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1));  \ |     U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64));  \ | ||||||
|     UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \ |     UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \ | ||||||
|     UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \ |     UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \ | ||||||
|     UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \ |     UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \ | ||||||
| @@ -295,9 +293,9 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \ |     UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \ | ||||||
|     UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \ |     UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \ | ||||||
|     UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \ |     UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \ | ||||||
|     U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-4));  \ |     U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -4 * 64));  \ | ||||||
|     U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-1));  \ |     U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -1 * 64));  \ | ||||||
|     U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(2));  \ |     U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 2 * 64));  \ | ||||||
| } | } | ||||||
| // MULT_2SPIN_BACKEND | // MULT_2SPIN_BACKEND | ||||||
| #define MULT_2SPIN_2_A64FXf  \ | #define MULT_2SPIN_2_A64FXf  \ | ||||||
| @@ -572,12 +570,12 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
|     result_31 = svdup_f32(0.); \ |     result_31 = svdup_f32(0.); \ | ||||||
|     result_32 = svdup_f32(0.);  |     result_32 = svdup_f32(0.);  | ||||||
|  |  | ||||||
| // PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing) | // PREFETCH_RESULT_L2_STORE (prefetch store to L2) | ||||||
| #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \ | #define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base)  \ | ||||||
| { \ | { \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \ | ||||||
|     asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \ |     svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \ | ||||||
| } | } | ||||||
| // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | // PREFETCH_RESULT_L1_STORE (prefetch store to L1) | ||||||
| #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \ | #define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base)  \ | ||||||
|   | |||||||
| @@ -46,7 +46,6 @@ Author: Nils Meyer <nils.meyer@ur.de> | |||||||
| #undef MULT_2SPIN_2 | #undef MULT_2SPIN_2 | ||||||
| #undef MAYBEPERM | #undef MAYBEPERM | ||||||
| #undef LOAD_CHI | #undef LOAD_CHI | ||||||
| #undef ZERO_PSI |  | ||||||
| #undef XP_PROJ | #undef XP_PROJ | ||||||
| #undef YP_PROJ | #undef YP_PROJ | ||||||
| #undef ZP_PROJ | #undef ZP_PROJ | ||||||
|   | |||||||
| @@ -38,20 +38,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk> | |||||||
| #ifdef GRID_HIP | #ifdef GRID_HIP | ||||||
| #include <hip/hip_fp16.h> | #include <hip/hip_fp16.h> | ||||||
| #endif | #endif | ||||||
| #ifdef GRID_SYCL |  | ||||||
| namespace Grid { | namespace Grid { | ||||||
|   typedef struct { uint16_t x;} half; |  | ||||||
|   typedef struct { half   x; half   y;} half2; | #if (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) | ||||||
|   typedef struct { float  x; float  y;} float2; | typedef struct { uint16_t x;} half; | ||||||
|   typedef struct { double x; double y;} double2; |  | ||||||
| } |  | ||||||
| #endif | #endif | ||||||
|  |  | ||||||
|  |  | ||||||
| namespace Grid { |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| typedef struct Half2_t { half x; half y; } Half2; | typedef struct Half2_t { half x; half y; } Half2; | ||||||
|  |  | ||||||
| #define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH ) | #define COALESCE_GRANULARITY ( GEN_SIMD_WIDTH ) | ||||||
| @@ -60,25 +52,11 @@ template<class pair> | |||||||
| class GpuComplex { | class GpuComplex { | ||||||
| public: | public: | ||||||
|   pair z; |   pair z; | ||||||
|   typedef decltype(z.x) Real; |   typedef decltype(z.x) real; | ||||||
| public:  | public:  | ||||||
|   accelerator_inline GpuComplex() = default; |   accelerator_inline GpuComplex() = default; | ||||||
|   accelerator_inline GpuComplex(Real re,Real im) { z.x=re; z.y=im; }; |   accelerator_inline GpuComplex(real re,real im) { z.x=re; z.y=im; }; | ||||||
|   accelerator_inline GpuComplex(const GpuComplex &zz) { z = zz.z;}; |   accelerator_inline GpuComplex(const GpuComplex &zz) { z = zz.z;}; | ||||||
|   accelerator_inline Real real(void) const { return z.x; }; |  | ||||||
|   accelerator_inline Real imag(void) const { return z.y; }; |  | ||||||
|   accelerator_inline GpuComplex &operator*=(const GpuComplex &r) { |  | ||||||
|     *this = (*this) * r; |  | ||||||
|     return *this; |  | ||||||
|   } |  | ||||||
|   accelerator_inline GpuComplex &operator+=(const GpuComplex &r) { |  | ||||||
|     *this = (*this) + r; |  | ||||||
|     return *this; |  | ||||||
|   } |  | ||||||
|   accelerator_inline GpuComplex &operator-=(const GpuComplex &r) { |  | ||||||
|     *this = (*this) - r; |  | ||||||
|     return *this; |  | ||||||
|   } |  | ||||||
|   friend accelerator_inline  GpuComplex operator+(const GpuComplex &lhs,const GpuComplex &rhs) {  |   friend accelerator_inline  GpuComplex operator+(const GpuComplex &lhs,const GpuComplex &rhs) {  | ||||||
|     GpuComplex r ;  |     GpuComplex r ;  | ||||||
|     r.z.x = lhs.z.x + rhs.z.x;  |     r.z.x = lhs.z.x + rhs.z.x;  | ||||||
| @@ -171,11 +149,6 @@ typedef GpuVector<NSIMD_RealD,    double      > GpuVectorRD; | |||||||
| typedef GpuVector<NSIMD_ComplexD, GpuComplexD > GpuVectorCD; | typedef GpuVector<NSIMD_ComplexD, GpuComplexD > GpuVectorCD; | ||||||
| typedef GpuVector<NSIMD_Integer,  Integer     > GpuVectorI; | typedef GpuVector<NSIMD_Integer,  Integer     > GpuVectorI; | ||||||
|  |  | ||||||
| accelerator_inline GpuComplexF timesI(const GpuComplexF &r)     { return(GpuComplexF(-r.imag(),r.real()));} |  | ||||||
| accelerator_inline GpuComplexD timesI(const GpuComplexD &r)     { return(GpuComplexD(-r.imag(),r.real()));} |  | ||||||
| accelerator_inline GpuComplexF timesMinusI(const GpuComplexF &r){ return(GpuComplexF(r.imag(),-r.real()));} |  | ||||||
| accelerator_inline GpuComplexD timesMinusI(const GpuComplexD &r){ return(GpuComplexD(r.imag(),-r.real()));} |  | ||||||
|  |  | ||||||
| accelerator_inline float half2float(half h) | accelerator_inline float half2float(half h) | ||||||
| { | { | ||||||
|   float f; |   float f; | ||||||
| @@ -183,7 +156,7 @@ accelerator_inline float half2float(half h) | |||||||
|   f = __half2float(h); |   f = __half2float(h); | ||||||
| #else  | #else  | ||||||
|   Grid_half hh;  |   Grid_half hh;  | ||||||
|   hh.x = h.x; |   hh.x = hr.x; | ||||||
|   f=  sfw_half_to_float(hh); |   f=  sfw_half_to_float(hh); | ||||||
| #endif | #endif | ||||||
|   return f; |   return f; | ||||||
|   | |||||||
| @@ -148,14 +148,10 @@ accelerator_inline void sub (ComplexF * __restrict__ y,const ComplexF * __restri | |||||||
| accelerator_inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); } | accelerator_inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); } | ||||||
|    |    | ||||||
| //conjugate already supported for complex | //conjugate already supported for complex | ||||||
| accelerator_inline ComplexF timesI(const ComplexF &r)     { return(ComplexF(-r.imag(),r.real()));} | accelerator_inline ComplexF timesI(const ComplexF &r)     { return(r*ComplexF(0.0,1.0));} | ||||||
| accelerator_inline ComplexD timesI(const ComplexD &r)     { return(ComplexD(-r.imag(),r.real()));} | accelerator_inline ComplexD timesI(const ComplexD &r)     { return(r*ComplexD(0.0,1.0));} | ||||||
| accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(ComplexF(r.imag(),-r.real()));} | accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));} | ||||||
| accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(ComplexD(r.imag(),-r.real()));} | accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));} | ||||||
| //accelerator_inline ComplexF timesI(const ComplexF &r)     { return(r*ComplexF(0.0,1.0));} |  | ||||||
| //accelerator_inline ComplexD timesI(const ComplexD &r)     { return(r*ComplexD(0.0,1.0));} |  | ||||||
| //accelerator_inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));} |  | ||||||
| //accelerator_inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));} |  | ||||||
|  |  | ||||||
| // define projections to real and imaginay parts | // define projections to real and imaginay parts | ||||||
| accelerator_inline ComplexF projReal(const ComplexF &r){return( ComplexF(r.real(), 0.0));} | accelerator_inline ComplexF projReal(const ComplexF &r){return( ComplexF(r.real(), 0.0));} | ||||||
|   | |||||||
							
								
								
									
										2377
									
								
								Grid/simd/gridverter.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										2377
									
								
								Grid/simd/gridverter.py
									
									
									
									
									
										Executable file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -7,20 +7,20 @@ template<class vobj> | |||||||
| class SimpleCompressor { | class SimpleCompressor { | ||||||
| public: | public: | ||||||
|   void Point(int) {}; |   void Point(int) {}; | ||||||
|   accelerator_inline int  CommDatumSize(void) const { return sizeof(vobj); } |   accelerator_inline int  CommDatumSize(void) { return sizeof(vobj); } | ||||||
|   accelerator_inline bool DecompressionStep(void) const { return false; } |   accelerator_inline bool DecompressionStep(void) { return false; } | ||||||
|   template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) const { buf[o]=in; } |   template<class cobj> accelerator_inline void Compress(cobj *buf,int o,const cobj &in) { buf[o]=in; } | ||||||
|   accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o) const { |   accelerator_inline void Exchange(vobj *mp,vobj *vp0,vobj *vp1,Integer type,Integer o){ | ||||||
|     exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type); |     exchange(mp[2*o],mp[2*o+1],vp0[o],vp1[o],type); | ||||||
|   } |   } | ||||||
|   accelerator_inline void Decompress(vobj *out,vobj *in, int o) const { assert(0); } |   accelerator_inline void Decompress(vobj *out,vobj *in, int o){ assert(0); } | ||||||
|   accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in, |   accelerator_inline void CompressExchange(vobj *out0,vobj *out1,const vobj *in, | ||||||
| 			       int j,int k, int m,int type) const { | 			       int j,int k, int m,int type){ | ||||||
|     exchange(out0[j],out1[j],in[k],in[m],type); |     exchange(out0[j],out1[j],in[k],in[m],type); | ||||||
|   } |   } | ||||||
|   // For cshift. Cshift should drop compressor coupling altogether  |   // For cshift. Cshift should drop compressor coupling altogether  | ||||||
|   // because I had to decouple the code from the Stencil anyway |   // because I had to decouple the code from the Stencil anyway | ||||||
|   accelerator_inline vobj operator() (const vobj &arg) const { |   accelerator_inline vobj operator() (const vobj &arg) { | ||||||
|     return arg; |     return arg; | ||||||
|   } |   } | ||||||
| }; | }; | ||||||
|   | |||||||
| @@ -147,16 +147,16 @@ class CartesianStencilAccelerator { | |||||||
|   cobj* u_recv_buf_p; |   cobj* u_recv_buf_p; | ||||||
|   cobj* u_send_buf_p; |   cobj* u_send_buf_p; | ||||||
|  |  | ||||||
|   accelerator_inline cobj *CommBuf(void) const { return u_recv_buf_p; } |   accelerator_inline cobj *CommBuf(void) { return u_recv_buf_p; } | ||||||
|  |  | ||||||
|   accelerator_inline int GetNodeLocal(int osite,int point) const { |   accelerator_inline int GetNodeLocal(int osite,int point) { | ||||||
|     return this->_entries_p[point+this->_npoints*osite]._is_local; |     return this->_entries_p[point+this->_npoints*osite]._is_local; | ||||||
|   } |   } | ||||||
|   accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) const { |   accelerator_inline StencilEntry * GetEntry(int &ptype,int point,int osite) { | ||||||
|     ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite]; |     ptype = this->_permute_type[point]; return & this->_entries_p[point+this->_npoints*osite]; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) const { |   accelerator_inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) { | ||||||
|     uint64_t cbase = (uint64_t)&u_recv_buf_p[0]; |     uint64_t cbase = (uint64_t)&u_recv_buf_p[0]; | ||||||
|     local = this->_entries_p[ent]._is_local; |     local = this->_entries_p[ent]._is_local; | ||||||
|     perm  = this->_entries_p[ent]._permute; |     perm  = this->_entries_p[ent]._permute; | ||||||
| @@ -168,14 +168,14 @@ class CartesianStencilAccelerator { | |||||||
|     } |     } | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) const { |   accelerator_inline uint64_t GetPFInfo(int ent,uint64_t base) { | ||||||
|     uint64_t cbase = (uint64_t)&u_recv_buf_p[0]; |     uint64_t cbase = (uint64_t)&u_recv_buf_p[0]; | ||||||
|     int local = this->_entries_p[ent]._is_local; |     int local = this->_entries_p[ent]._is_local; | ||||||
|     if (local) return  base + this->_entries_p[ent]._byte_offset; |     if (local) return  base + this->_entries_p[ent]._byte_offset; | ||||||
|     else       return cbase + this->_entries_p[ent]._byte_offset; |     else       return cbase + this->_entries_p[ent]._byte_offset; | ||||||
|   } |   } | ||||||
|  |  | ||||||
|   accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane) const |   accelerator_inline void iCoorFromIindex(Coordinate &coor,int lane) | ||||||
|   { |   { | ||||||
|     Lexicographic::CoorFromIndex(coor,lane,this->_simd_layout); |     Lexicographic::CoorFromIndex(coor,lane,this->_simd_layout); | ||||||
|   } |   } | ||||||
| @@ -269,7 +269,7 @@ public: | |||||||
|   std::vector<Vector<std::pair<int,int> > > face_table ; |   std::vector<Vector<std::pair<int,int> > > face_table ; | ||||||
|   Vector<int> surface_list; |   Vector<int> surface_list; | ||||||
|  |  | ||||||
|   stencilVector<StencilEntry>  _entries; // Resident in managed memory |   Vector<StencilEntry>  _entries; // Resident in managed memory | ||||||
|   std::vector<Packet> Packets; |   std::vector<Packet> Packets; | ||||||
|   std::vector<Merge> Mergers; |   std::vector<Merge> Mergers; | ||||||
|   std::vector<Merge> MergersSHM; |   std::vector<Merge> MergersSHM; | ||||||
|   | |||||||
| @@ -64,68 +64,6 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__ | |||||||
| } | } | ||||||
| #else | #else | ||||||
|  |  | ||||||
|  |  | ||||||
| #if 0 |  | ||||||
| // Use the scalar as our own complex on GPU |  | ||||||
| template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| typename vsimd::scalar_type |  | ||||||
| coalescedRead(const vsimd & __restrict__ vec,int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::scalar_type S; |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   return p[lane]; |  | ||||||
| } |  | ||||||
| template<int ptype,class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| typename vsimd::scalar_type |  | ||||||
| coalescedReadPermute(const vsimd & __restrict__ vec,int doperm,int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::scalar_type S; |  | ||||||
|  |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   int mask = vsimd::Nsimd() >> (ptype + 1); |  | ||||||
|   int plane= doperm ? lane ^ mask : lane; |  | ||||||
|   return p[plane]; |  | ||||||
| } |  | ||||||
| template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| void coalescedWrite(vsimd & __restrict__ vec, |  | ||||||
| 		    const typename vsimd::scalar_type & __restrict__ extracted, |  | ||||||
| 		    int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::scalar_type S; |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   p[lane]=extracted; |  | ||||||
| } |  | ||||||
| #else |  | ||||||
| template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| typename vsimd::vector_type::datum |  | ||||||
| coalescedRead(const vsimd & __restrict__ vec,int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::vector_type::datum S; |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   return p[lane]; |  | ||||||
| } |  | ||||||
| template<int ptype,class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| typename vsimd::vector_type::datum |  | ||||||
| coalescedReadPermute(const vsimd & __restrict__ vec,int doperm,int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::vector_type::datum S; |  | ||||||
|  |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   int mask = vsimd::Nsimd() >> (ptype + 1); |  | ||||||
|   int plane= doperm ? lane ^ mask : lane; |  | ||||||
|   return p[plane]; |  | ||||||
| } |  | ||||||
| template<class vsimd,IfSimd<vsimd> = 0> accelerator_inline |  | ||||||
| void coalescedWrite(vsimd & __restrict__ vec, |  | ||||||
| 		    const typename vsimd::vector_type::datum & __restrict__ extracted, |  | ||||||
| 		    int lane=acceleratorSIMTlane(vsimd::Nsimd())) |  | ||||||
| { |  | ||||||
|   typedef typename vsimd::vector_type::datum S; |  | ||||||
|   S * __restrict__ p=(S *)&vec; |  | ||||||
|   p[lane]=extracted; |  | ||||||
| } |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
| ////////////////////////////////////////// | ////////////////////////////////////////// | ||||||
| // Extract and insert slices on the GPU | // Extract and insert slices on the GPU | ||||||
| ////////////////////////////////////////// | ////////////////////////////////////////// | ||||||
|   | |||||||
| @@ -92,6 +92,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg) | |||||||
| { | { | ||||||
|   // need a check for the group type? |   // need a check for the group type? | ||||||
|   iMatrix<vtype,N> ret(arg); |   iMatrix<vtype,N> ret(arg); | ||||||
|  |   vtype rnrm; | ||||||
|   vtype nrm; |   vtype nrm; | ||||||
|   vtype inner; |   vtype inner; | ||||||
|   for(int c1=0;c1<N;c1++){ |   for(int c1=0;c1<N;c1++){ | ||||||
| @@ -117,19 +118,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg) | |||||||
| 	ret._internal[b][c] -= pr * ret._internal[c1][c]; | 	ret._internal[b][c] -= pr * ret._internal[c1][c]; | ||||||
|       } |       } | ||||||
|     } |     } | ||||||
|   } | 	   | ||||||
|  |  | ||||||
|   // Normalise last row |  | ||||||
|   { |  | ||||||
|     int c1 = N-1; |  | ||||||
|     zeroit(inner);	 |  | ||||||
|     for(int c2=0;c2<N;c2++) |  | ||||||
|       inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]); |  | ||||||
|  |  | ||||||
|     nrm = sqrt(inner); |  | ||||||
|     nrm = 1.0/nrm; |  | ||||||
|     for(int c2=0;c2<N;c2++) |  | ||||||
|       ret._internal[c1][c2]*= nrm; |  | ||||||
|   } |   } | ||||||
|   // assuming the determinant is ok |   // assuming the determinant is ok | ||||||
|   return ret; |   return ret; | ||||||
|   | |||||||
| @@ -1,7 +1,6 @@ | |||||||
| #include <Grid/GridCore.h> | #include <Grid/GridCore.h> | ||||||
|  |  | ||||||
| NAMESPACE_BEGIN(Grid); | NAMESPACE_BEGIN(Grid); | ||||||
| int      acceleratorAbortOnGpuError=1; |  | ||||||
| uint32_t accelerator_threads=2; | uint32_t accelerator_threads=2; | ||||||
| uint32_t acceleratorThreads(void)       {return accelerator_threads;}; | uint32_t acceleratorThreads(void)       {return accelerator_threads;}; | ||||||
| void     acceleratorThreads(uint32_t t) {accelerator_threads = t;}; | void     acceleratorThreads(uint32_t t) {accelerator_threads = t;}; | ||||||
| @@ -22,26 +21,22 @@ void acceleratorInit(void) | |||||||
| #define ENV_RANK_SLURM         "SLURM_PROCID" | #define ENV_RANK_SLURM         "SLURM_PROCID" | ||||||
| #define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK" | #define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK" | ||||||
| #define ENV_RANK_MVAPICH       "MV2_COMM_WORLD_RANK" | #define ENV_RANK_MVAPICH       "MV2_COMM_WORLD_RANK" | ||||||
|  |   // We extract the local rank initialization using an environment variable | ||||||
|  |   if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) { | ||||||
|  |     printf("OPENMPI detected\n"); | ||||||
|  |     rank = atoi(localRankStr);		 | ||||||
|  |   } else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) { | ||||||
|  |     printf("MVAPICH detected\n"); | ||||||
|  |     rank = atoi(localRankStr);		 | ||||||
|  |   } else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) { | ||||||
|  |     printf("SLURM detected\n"); | ||||||
|  |     rank = atoi(localRankStr);		 | ||||||
|  |   } else {  | ||||||
|  |     printf("MPI version is unknown - bad things may happen\n"); | ||||||
|  |   } | ||||||
|   if ((localRankStr = getenv(ENV_RANK_OMPI   )) != NULL) { world_rank = atoi(localRankStr);} |   if ((localRankStr = getenv(ENV_RANK_OMPI   )) != NULL) { world_rank = atoi(localRankStr);} | ||||||
|   if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);} |   if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);} | ||||||
|   if ((localRankStr = getenv(ENV_RANK_SLURM  )) != NULL) { world_rank = atoi(localRankStr);} |   if ((localRankStr = getenv(ENV_RANK_SLURM  )) != NULL) { world_rank = atoi(localRankStr);} | ||||||
|   // We extract the local rank initialization using an environment variable |  | ||||||
|   if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL) { |  | ||||||
|     if (!world_rank) |  | ||||||
|       printf("OPENMPI detected\n"); |  | ||||||
|     rank = atoi(localRankStr);		 |  | ||||||
|   } else if ((localRankStr = getenv(ENV_LOCAL_RANK_MVAPICH)) != NULL) { |  | ||||||
|     if (!world_rank) |  | ||||||
|       printf("MVAPICH detected\n"); |  | ||||||
|     rank = atoi(localRankStr);		 |  | ||||||
|   } else if ((localRankStr = getenv(ENV_LOCAL_RANK_SLURM)) != NULL) { |  | ||||||
|     if (!world_rank) |  | ||||||
|       printf("SLURM detected\n"); |  | ||||||
|     rank = atoi(localRankStr);		 |  | ||||||
|   } else {  |  | ||||||
|     if (!world_rank) |  | ||||||
|       printf("MPI version is unknown - bad things may happen\n"); |  | ||||||
|   } |  | ||||||
|  |  | ||||||
|   size_t totalDeviceMem=0; |   size_t totalDeviceMem=0; | ||||||
|   for (int i = 0; i < nDevices; i++) { |   for (int i = 0; i < nDevices; i++) { | ||||||
|   | |||||||
| @@ -100,8 +100,6 @@ void     acceleratorInit(void); | |||||||
| #define accelerator        __host__ __device__ | #define accelerator        __host__ __device__ | ||||||
| #define accelerator_inline __host__ __device__ inline | #define accelerator_inline __host__ __device__ inline | ||||||
|  |  | ||||||
| extern int acceleratorAbortOnGpuError; |  | ||||||
|  |  | ||||||
| accelerator_inline int acceleratorSIMTlane(int Nsimd) { | accelerator_inline int acceleratorSIMTlane(int Nsimd) { | ||||||
| #ifdef GRID_SIMT | #ifdef GRID_SIMT | ||||||
|   return threadIdx.z;  |   return threadIdx.z;  | ||||||
| @@ -142,7 +140,6 @@ void LambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda) | |||||||
|       printf("Cuda error %s \n", cudaGetErrorString( err ));		\ |       printf("Cuda error %s \n", cudaGetErrorString( err ));		\ | ||||||
|       puts(__FILE__);							\ |       puts(__FILE__);							\ | ||||||
|       printf("Line %d\n",__LINE__);					\ |       printf("Line %d\n",__LINE__);					\ | ||||||
|       if (acceleratorAbortOnGpuError) assert(err==cudaSuccess);		\ |  | ||||||
|     }									\ |     }									\ | ||||||
|   } |   } | ||||||
|  |  | ||||||
| @@ -221,7 +218,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) { | |||||||
|       cl::sycl::range<3> global{unum1,unum2,nsimd};			\ |       cl::sycl::range<3> global{unum1,unum2,nsimd};			\ | ||||||
|       cgh.parallel_for<class dslash>(					\ |       cgh.parallel_for<class dslash>(					\ | ||||||
|       cl::sycl::nd_range<3>(global,local), \ |       cl::sycl::nd_range<3>(global,local), \ | ||||||
|       [=] (cl::sycl::nd_item<3> item) /*mutable*/ {   \ |       [=] (cl::sycl::nd_item<3> item) mutable {       \ | ||||||
|       auto iter1    = item.get_global_id(0);	      \ |       auto iter1    = item.get_global_id(0);	      \ | ||||||
|       auto iter2    = item.get_global_id(1);	      \ |       auto iter2    = item.get_global_id(1);	      \ | ||||||
|       auto lane     = item.get_global_id(2);	      \ |       auto lane     = item.get_global_id(2);	      \ | ||||||
| @@ -364,7 +361,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas | |||||||
| ////////////////////////////////////////////// | ////////////////////////////////////////////// | ||||||
| // CPU Target - No accelerator just thread instead | // CPU Target - No accelerator just thread instead | ||||||
| ////////////////////////////////////////////// | ////////////////////////////////////////////// | ||||||
|  | #define GRID_ALLOC_ALIGN (2*1024*1024) // 2MB aligned  | ||||||
| #if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) ) | #if ( (!defined(GRID_SYCL)) && (!defined(GRID_CUDA)) && (!defined(GRID_HIP)) ) | ||||||
|  |  | ||||||
| #undef GRID_SIMT | #undef GRID_SIMT | ||||||
|   | |||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user