mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
up to +36% performance gain for dslash/dwf on QPACE 4 using GCC 10.1.1
This commit is contained in:
parent
3362f8dfa0
commit
4dd9e39e0d
@ -38,9 +38,6 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
// undefine everything related to kernels
|
||||
#include <simd/Fujitsu_A64FX_undef.h>
|
||||
|
||||
// enable A64FX body
|
||||
#define WILSONKERNELSASMBODYA64FX
|
||||
//#pragma message("A64FX Dslash: WilsonKernelsAsmBodyA64FX.h")
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// If we are A64FX specialise the single precision routine
|
||||
@ -63,119 +60,89 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
@ -185,119 +152,89 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// undefine
|
||||
@ -330,119 +267,89 @@ WilsonKernels<ZWilsonImplFH>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFie
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSite(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// XYZT vectorised, dag Kernel, double
|
||||
@ -451,124 +358,93 @@ WilsonKernels<ZWilsonImplDF>::AsmDhopSiteExt(StencilView &st, DoubledGaugeFieldV
|
||||
#define INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDag(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#define INTERIOR
|
||||
#undef EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagInt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
#undef INTERIOR_AND_EXTERIOR
|
||||
#undef INTERIOR
|
||||
#define EXTERIOR
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplD>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<WilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
#pragma GCC optimize ("-O3", "-fno-schedule-insns", "-fno-schedule-insns2")
|
||||
template<> void
|
||||
WilsonKernels<ZWilsonImplDF>::AsmDhopSiteDagExt(StencilView &st, DoubledGaugeFieldView &U, SiteHalfSpinor *buf,
|
||||
int ss,int ssU,int Ls,int Ns,const FermionFieldView &in, FermionFieldView &out)
|
||||
#if defined (WILSONKERNELSASMBODYA64FX)
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBodyA64FX.h>
|
||||
#else
|
||||
#include <qcd/action/fermion/implementation/WilsonKernelsAsmBody.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
// undefs
|
||||
#undef WILSONKERNELSASMBODYA64FX
|
||||
#include <simd/Fujitsu_A64FX_undef.h>
|
||||
|
||||
#endif //A64FXASM
|
||||
|
@ -25,6 +25,11 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
// GCC 10 messes up SVE instruction scheduling using -O3 only,
|
||||
// using -O3 -fno-schedule-insns -fno-schedule-insns2 does wonders
|
||||
// performance is better than armclang 20.2
|
||||
|
||||
#ifdef KERNEL_DAG
|
||||
#define DIR0_PROJ XP_PROJ
|
||||
#define DIR1_PROJ YP_PROJ
|
||||
@ -97,7 +102,7 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
PROJ; \
|
||||
MAYBEPERM(PERMUTE_DIR,perm); \
|
||||
} else { \
|
||||
LOAD_CHI(base); \
|
||||
LOAD_CHI(base); \
|
||||
} \
|
||||
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
@ -110,6 +115,15 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
} \
|
||||
RECON; \
|
||||
|
||||
/*
|
||||
NB: picking PREFETCH_GAUGE_L2(Dir+4); here results in performance penalty
|
||||
though I expected that it would improve on performance
|
||||
|
||||
if (s == 0) { \
|
||||
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
|
||||
} \
|
||||
*/
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
PREFETCH1_CHIMU(base); \
|
||||
@ -126,73 +140,63 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
|
||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
basep = st.GetPFInfo(nent,plocal); nent++; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(base); \
|
||||
LOAD_TABLE(PERMUTE_DIR); \
|
||||
PROJ; \
|
||||
MAYBEPERM(PERMUTE_DIR,perm); \
|
||||
}else if ( st.same_node[Dir] ) {LOAD_CHI(base);} \
|
||||
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
|
||||
if ( local || st.same_node[Dir] ) { \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
PREFETCH_CHIMU(base); \
|
||||
/* PREFETCH_GAUGE_L1(NxtDir); */ \
|
||||
MULT_2SPIN_2; \
|
||||
if (s == 0) { \
|
||||
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
|
||||
} \
|
||||
RECON; \
|
||||
PREFETCH_CHIMU_L2(basep); \
|
||||
} else { PREFETCH_CHIMU(base); } \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(base); \
|
||||
LOAD_TABLE(PERMUTE_DIR); \
|
||||
PROJ; \
|
||||
MAYBEPERM(PERMUTE_DIR,perm); \
|
||||
}else if ( st.same_node[Dir] ) {LOAD_CHI(base);} \
|
||||
if ( local || st.same_node[Dir] ) { \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
MULT_2SPIN_2; \
|
||||
RECON; \
|
||||
} \
|
||||
base = st.GetInfo(ptype,local,perm,NxtDir,ent,plocal); ent++; \
|
||||
PREFETCH_CHIMU(base); \
|
||||
PREFETCH_CHIMU_L2(basep); \
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
PREFETCH1_CHIMU(base); \
|
||||
{ ZERO_PSI; } \
|
||||
ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON)
|
||||
|
||||
#define RESULT(base,basep) SAVE_RESULT(base,basep);
|
||||
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Post comms kernel
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef EXTERIOR
|
||||
|
||||
|
||||
#define ASM_LEG(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
PREFETCH_CHIMU(base); \
|
||||
/* PREFETCH_GAUGE_L1(NxtDir); */ \
|
||||
MULT_2SPIN_2; \
|
||||
if (s == 0) { \
|
||||
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
|
||||
} \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
nmu=0; \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++;\
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
#define ASM_LEG_XP(Dir,NxtDir,PERMUTE_DIR,PROJ,RECON) \
|
||||
nmu=0; \
|
||||
{ ZERO_PSI;} \
|
||||
base = st.GetInfo(ptype,local,perm,Dir,ent,plocal); ent++; \
|
||||
if((!local)&&(!st.same_node[Dir]) ) { \
|
||||
LOAD_CHI(base); \
|
||||
MULT_2SPIN_1(Dir); \
|
||||
PREFETCH_CHIMU(base); \
|
||||
/* PREFETCH_GAUGE_L1(NxtDir); */ \
|
||||
MULT_2SPIN_2; \
|
||||
if (s == 0) { \
|
||||
if ((Dir == 0) || (Dir == 4)) { PREFETCH_GAUGE_L2(Dir); } \
|
||||
} \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define RESULT(base,basep) if (nmu){ ADD_RESULT(base,base);}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
{
|
||||
int nmu;
|
||||
int local,perm, ptype;
|
||||
@ -209,7 +213,6 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
|
||||
// int sUn=lo.Reorder(ssn);
|
||||
int sUn=ssn;
|
||||
LOCK_GAUGE(0);
|
||||
#else
|
||||
int sU =ssU;
|
||||
int ssn=ssU+1; if(ssn>=nmax) ssn=0;
|
||||
@ -295,6 +298,11 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
// { uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore); }
|
||||
|
||||
|
||||
ASM_LEG(Ym,Zm,PERMUTE_DIR2,DIR5_PROJ,DIR5_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
@ -308,6 +316,11 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
//{ uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore); }
|
||||
|
||||
|
||||
ASM_LEG(Zm,Tm,PERMUTE_DIR1,DIR6_PROJ,DIR6_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
@ -321,6 +334,11 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
std::cout << "----------------------------------------------------" << std::endl;
|
||||
#endif
|
||||
|
||||
// DC ZVA test
|
||||
//{ uint64_t basestore = (uint64_t)&out[ss];
|
||||
// PREFETCH_RESULT_L2_STORE(basestore);
|
||||
//}
|
||||
|
||||
ASM_LEG(Tm,Xp,PERMUTE_DIR0,DIR7_PROJ,DIR7_RECON);
|
||||
|
||||
#ifdef SHOW
|
||||
@ -341,6 +359,7 @@ Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||
base = (uint64_t) &out[ss];
|
||||
basep= st.GetPFInfo(nent,plocal); ent++;
|
||||
basep = (uint64_t) &out[ssn];
|
||||
//PREFETCH_RESULT_L1_STORE(base);
|
||||
RESULT(base,basep);
|
||||
|
||||
#ifdef SHOW
|
||||
|
@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXd
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXd(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXd(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXd
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXd(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXd
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXd
|
||||
#define YP_PROJ YP_PROJ_A64FXd
|
||||
@ -70,11 +71,18 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define MAYBEPERM(Dir,perm) if (Dir != 3) { if (perm) { PERMUTE; } }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXd \
|
||||
uint64_t baseU; \
|
||||
const uint64_t lut[4][8] = { \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3}, \
|
||||
{2, 3, 0, 1, 6, 7, 4, 5}, \
|
||||
{1, 0, 3, 2, 5, 4, 7, 6}, \
|
||||
{0, 1, 2, 4, 5, 6, 7, 8} };\
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
: \
|
||||
: \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
|
||||
); \
|
||||
asm ( \
|
||||
"fmov z31.d , 0 \n\t" \
|
||||
: \
|
||||
@ -130,7 +138,7 @@ asm ( \
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
asm ( \
|
||||
"prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
@ -149,7 +157,7 @@ asm ( \
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
@ -163,12 +171,12 @@ asm ( \
|
||||
#define LOAD_CHI_A64FXd(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1d { z12.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z13.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z14.d }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1d { z15.d }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1d { z16.d }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1d { z17.d }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -178,19 +186,18 @@ asm ( \
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1d { z12.d }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1d { z21.d }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1d { z15.d }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1d { z18.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z13.d }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1d { z22.d }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1d { z16.d }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1d { z19.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z14.d }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1d { z23.d }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1d { z17.d }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1d { z20.d }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -201,19 +208,18 @@ asm ( \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1d { z12.d }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1d { z18.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z13.d }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1d { z19.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z14.d }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1d { z20.d }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1d { z15.d }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1d { z21.d }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1d { z16.d }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1d { z22.d }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1d { z17.d }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1d { z23.d }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (&ref[2][0]) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -224,19 +230,18 @@ asm ( \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1d { z12.d }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1d { z21.d }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1d { z13.d }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1d { z22.d }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1d { z14.d }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1d { z23.d }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1d { z15.d }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1d { z18.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z16.d }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1d { z19.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z17.d }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1d { z20.d }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (&ref[2][0]) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -293,17 +298,16 @@ asm ( \
|
||||
);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z24.d }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1d { z25.d }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1d { z26.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z27.d }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1d { z28.d }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1d { z29.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -312,14 +316,14 @@ asm ( \
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1d { z24.d }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1d { z25.d }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1d { z26.d }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1d { z27.d }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1d { z28.d }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1d { z29.d }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"movprfx z18.d, p5/m, z31.d \n\t" \
|
||||
"fcmla z18.d, p5/m, z24.d, z12.d, 0 \n\t" \
|
||||
"movprfx z21.d, p5/m, z31.d \n\t" \
|
||||
@ -338,9 +342,9 @@ asm ( \
|
||||
"fcmla z22.d, p5/m, z25.d, z15.d, 90 \n\t" \
|
||||
"fcmla z20.d, p5/m, z26.d, z12.d, 90 \n\t" \
|
||||
"fcmla z23.d, p5/m, z26.d, z15.d, 90 \n\t" \
|
||||
"ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1d { z24.d }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1d { z25.d }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1d { z26.d }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -560,7 +564,6 @@ asm ( \
|
||||
#define TM_PROJ_A64FXd \
|
||||
{ \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"fsub z12.d, p5/m, z12.d, z18.d \n\t" \
|
||||
"fsub z13.d, p5/m, z13.d, z19.d \n\t" \
|
||||
"fsub z14.d, p5/m, z14.d, z20.d \n\t" \
|
||||
@ -715,7 +718,6 @@ asm ( \
|
||||
// ZERO_PSI
|
||||
#define ZERO_PSI_A64FXd \
|
||||
asm ( \
|
||||
"ptrue p5.d \n\t" \
|
||||
"fmov z0.d , 0 \n\t" \
|
||||
"fmov z1.d , 0 \n\t" \
|
||||
"fmov z2.d , 0 \n\t" \
|
||||
@ -733,13 +735,13 @@ asm ( \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
|
||||
);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
|
@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXf
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXf(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXf(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXf
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXf(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXf
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXf
|
||||
#define YP_PROJ YP_PROJ_A64FXf
|
||||
@ -70,11 +71,18 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define MAYBEPERM(A,perm) if (perm) { PERMUTE; }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXf \
|
||||
uint64_t baseU; \
|
||||
const uint32_t lut[4][16] = { \
|
||||
{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
|
||||
{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, \
|
||||
{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14} }; \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
: \
|
||||
: \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
|
||||
); \
|
||||
asm ( \
|
||||
"fmov z31.s , 0 \n\t" \
|
||||
: \
|
||||
@ -130,7 +138,7 @@ asm ( \
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
asm ( \
|
||||
"prfd PLDL2STRM, p5, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"prfd PLDL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
@ -149,7 +157,7 @@ asm ( \
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"prfd PLDL1STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"prfd PLDL1STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
@ -163,12 +171,12 @@ asm ( \
|
||||
#define LOAD_CHI_A64FXf(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"ldr z12, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1w { z12.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z13.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z14.s }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1w { z15.s }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1w { z16.s }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1w { z17.s }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -178,19 +186,18 @@ asm ( \
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1w { z12.s }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1w { z21.s }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1w { z15.s }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1w { z18.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z13.s }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1w { z22.s }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1w { z16.s }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1w { z19.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z14.s }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1w { z23.s }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1w { z17.s }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1w { z20.s }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -201,19 +208,18 @@ asm ( \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1w { z12.s }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1w { z18.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z13.s }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1w { z19.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z14.s }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1w { z20.s }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1w { z15.s }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1w { z21.s }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1w { z16.s }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1w { z22.s }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1w { z17.s }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1w { z23.s }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (&ref[2][0]) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -224,19 +230,18 @@ asm ( \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"ldr z12, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z21, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ldr z13, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z22, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ldr z14, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z23, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ldr z15, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z18, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z16, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z19, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ldr z17, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z20, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1w { z12.s }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1w { z21.s }, p5/z, [%[fetchptr], 3, mul vl] \n\t" \
|
||||
"ld1w { z13.s }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1w { z22.s }, p5/z, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"ld1w { z14.s }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1w { z23.s }, p5/z, [%[fetchptr], 5, mul vl] \n\t" \
|
||||
"ld1w { z15.s }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1w { z18.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z16.s }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1w { z19.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z17.s }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1w { z20.s }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (&ref[2][0]) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -293,17 +298,16 @@ asm ( \
|
||||
);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z24.s }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1w { z25.s }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1w { z26.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z27.s }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1w { z28.s }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1w { z29.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -312,14 +316,14 @@ asm ( \
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
asm ( \
|
||||
"ldr z24, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ldr z27, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ldr z28, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ldr z29, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"ld1w { z24.s }, p5/z, [%[fetchptr], -6, mul vl] \n\t" \
|
||||
"ld1w { z25.s }, p5/z, [%[fetchptr], -3, mul vl] \n\t" \
|
||||
"ld1w { z26.s }, p5/z, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"ld1w { z27.s }, p5/z, [%[fetchptr], -5, mul vl] \n\t" \
|
||||
"ld1w { z28.s }, p5/z, [%[fetchptr], -2, mul vl] \n\t" \
|
||||
"ld1w { z29.s }, p5/z, [%[fetchptr], 1, mul vl] \n\t" \
|
||||
"movprfx z18.s, p5/m, z31.s \n\t" \
|
||||
"fcmla z18.s, p5/m, z24.s, z12.s, 0 \n\t" \
|
||||
"movprfx z21.s, p5/m, z31.s \n\t" \
|
||||
@ -338,9 +342,9 @@ asm ( \
|
||||
"fcmla z22.s, p5/m, z25.s, z15.s, 90 \n\t" \
|
||||
"fcmla z20.s, p5/m, z26.s, z12.s, 90 \n\t" \
|
||||
"fcmla z23.s, p5/m, z26.s, z15.s, 90 \n\t" \
|
||||
"ldr z24, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ldr z25, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ldr z26, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
"ld1w { z24.s }, p5/z, [%[fetchptr], -4, mul vl] \n\t" \
|
||||
"ld1w { z25.s }, p5/z, [%[fetchptr], -1, mul vl] \n\t" \
|
||||
"ld1w { z26.s }, p5/z, [%[fetchptr], 2, mul vl] \n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (baseU + 2 * 3 * 64) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
@ -560,7 +564,6 @@ asm ( \
|
||||
#define TM_PROJ_A64FXf \
|
||||
{ \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"fsub z12.s, p5/m, z12.s, z18.s \n\t" \
|
||||
"fsub z13.s, p5/m, z13.s, z19.s \n\t" \
|
||||
"fsub z14.s, p5/m, z14.s, z20.s \n\t" \
|
||||
@ -715,7 +718,6 @@ asm ( \
|
||||
// ZERO_PSI
|
||||
#define ZERO_PSI_A64FXf \
|
||||
asm ( \
|
||||
"ptrue p5.s \n\t" \
|
||||
"fmov z0.s , 0 \n\t" \
|
||||
"fmov z1.s , 0 \n\t" \
|
||||
"fmov z2.s , 0 \n\t" \
|
||||
@ -733,13 +735,13 @@ asm ( \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31" \
|
||||
);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
asm ( \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 0, mul vl] \n\t" \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 4, mul vl] \n\t" \
|
||||
"prfd PSTL2STRM, p5, [%[fetchptr], 8, mul vl] \n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
"dc zva, %[fetchptr]\n\t" \
|
||||
: \
|
||||
: [fetchptr] "r" (base) \
|
||||
: "p5","cc","z0","z1","z2","z3","z4","z5","z6","z7","z8","z9","z10","z11","z12","z13","z14","z15","z16","z17","z18","z19","z20","z21","z22","z23","z24","z25","z26","z27","z28","z29","z30","z31","memory" \
|
||||
|
@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXd
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXd(A); PREFETCH_RESULT_L2_STORE(B)
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXd(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXd(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXd
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXd(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXd
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXd; RESULT_A64FXd(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXd
|
||||
#define YP_PROJ YP_PROJ_A64FXd
|
||||
@ -70,6 +71,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define MAYBEPERM(Dir,perm) if (Dir != 3) { if (perm) { PERMUTE; } }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXd \
|
||||
uint64_t baseU; \
|
||||
const uint64_t lut[4][8] = { \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3}, \
|
||||
{2, 3, 0, 1, 6, 7, 4, 5}, \
|
||||
@ -126,18 +128,18 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// RESULT
|
||||
#define RESULT_A64FXd(base) \
|
||||
{ \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64), result_00); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64), result_01); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64), result_02); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64), result_10); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64), result_11); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64), result_12); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64), result_20); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64), result_21); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64), result_22); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64), result_30); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64), result_31); \
|
||||
svst1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64), result_32); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
|
||||
svst1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
|
||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXd(base) \
|
||||
@ -156,7 +158,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
|
||||
@ -170,7 +172,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
|
||||
@ -178,62 +180,62 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// LOAD_CHI
|
||||
#define LOAD_CHI_A64FXd(base) \
|
||||
{ \
|
||||
Chi_00 = svld1(pg1, (float64_t*)(base + 0 * 64)); \
|
||||
Chi_01 = svld1(pg1, (float64_t*)(base + 1 * 64)); \
|
||||
Chi_02 = svld1(pg1, (float64_t*)(base + 2 * 64)); \
|
||||
Chi_10 = svld1(pg1, (float64_t*)(base + 3 * 64)); \
|
||||
Chi_11 = svld1(pg1, (float64_t*)(base + 4 * 64)); \
|
||||
Chi_12 = svld1(pg1, (float64_t*)(base + 5 * 64)); \
|
||||
Chi_00 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(0)); \
|
||||
Chi_01 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(1)); \
|
||||
Chi_02 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(2)); \
|
||||
Chi_10 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(3)); \
|
||||
Chi_11 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(4)); \
|
||||
Chi_12 = svld1_vnum(pg1, (float64_t*)(base), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXd(base) \
|
||||
{ \
|
||||
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_CHIMU_0213
|
||||
#define LOAD_CHIMU_0213_A64FXd \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU_0312
|
||||
#define LOAD_CHIMU_0312_A64FXd \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float64_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float64_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_TABLE0
|
||||
#define LOAD_TABLE0 \
|
||||
@ -261,26 +263,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
Chi_12 = svtbl(Chi_12, table0);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
|
||||
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
|
||||
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
|
||||
U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
|
||||
U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
|
||||
U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
}
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXd(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
|
||||
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
|
||||
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
|
||||
U_01 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
|
||||
U_11 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
|
||||
U_21 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
|
||||
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
|
||||
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
|
||||
@ -293,9 +295,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
|
||||
U_00 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -4 * 64)); \
|
||||
U_10 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + -1 * 64)); \
|
||||
U_20 = svld1(pg1, (float64_t*)(baseU + 2 * 3 * 64 + 2 * 64)); \
|
||||
U_00 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
U_10 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
U_20 = svld1_vnum(pg1, (float64_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// MULT_2SPIN_BACKEND
|
||||
#define MULT_2SPIN_2_A64FXd \
|
||||
@ -570,12 +572,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
result_31 = svdup_f64(0.); \
|
||||
result_32 = svdup_f64(0.);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXd(base) \
|
||||
{ \
|
||||
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
|
||||
}
|
||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
|
||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXd(base) \
|
||||
|
@ -38,10 +38,11 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define LOCK_GAUGE(A)
|
||||
#define UNLOCK_GAUGE(A)
|
||||
#define MASK_REGS DECLARATIONS_A64FXf
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXf(A); PREFETCH_RESULT_L2_STORE(B)
|
||||
#define SAVE_RESULT(A,B) RESULT_A64FXf(A);
|
||||
#define MULT_2SPIN_1(Dir) MULT_2SPIN_1_A64FXf(Dir)
|
||||
#define MULT_2SPIN_2 MULT_2SPIN_2_A64FXf
|
||||
#define LOAD_CHI(base) LOAD_CHI_A64FXf(base)
|
||||
#define ZERO_PSI ZERO_PSI_A64FXf
|
||||
#define ADD_RESULT(base,basep) LOAD_CHIMU(base); ADD_RESULT_INTERNAL_A64FXf; RESULT_A64FXf(base)
|
||||
#define XP_PROJ XP_PROJ_A64FXf
|
||||
#define YP_PROJ YP_PROJ_A64FXf
|
||||
@ -70,6 +71,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#define MAYBEPERM(A,perm) if (perm) { PERMUTE; }
|
||||
// DECLARATIONS
|
||||
#define DECLARATIONS_A64FXf \
|
||||
uint64_t baseU; \
|
||||
const uint32_t lut[4][16] = { \
|
||||
{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, \
|
||||
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, \
|
||||
@ -126,18 +128,18 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// RESULT
|
||||
#define RESULT_A64FXf(base) \
|
||||
{ \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64), result_00); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64), result_01); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64), result_02); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64), result_10); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64), result_11); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64), result_12); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64), result_20); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64), result_21); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64), result_22); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64), result_30); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64), result_31); \
|
||||
svst1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64), result_32); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-6), result_00); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-5), result_01); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-4), result_02); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-3), result_10); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-2), result_11); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(-1), result_12); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(0), result_20); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(1), result_21); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(2), result_22); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(3), result_30); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(4), result_31); \
|
||||
svst1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64),(int64_t)(5), result_32); \
|
||||
}
|
||||
// PREFETCH_CHIMU_L2 (prefetch to L2)
|
||||
#define PREFETCH_CHIMU_L2_INTERNAL_A64FXf(base) \
|
||||
@ -156,7 +158,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// PREFETCH_GAUGE_L2 (prefetch to L2)
|
||||
#define PREFETCH_GAUGE_L2_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sUn](A)); uint64_t baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
const auto & ref(U[sUn](A)); baseU = (uint64_t)&ref + 3 * 3 * 64; \
|
||||
svprfd(pg1, (int64_t*)(baseU + -256), SV_PLDL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL2STRM); \
|
||||
@ -170,7 +172,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// PREFETCH_GAUGE_L1 (prefetch to L1)
|
||||
#define PREFETCH_GAUGE_L1_INTERNAL_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
svprfd(pg1, (int64_t*)(baseU + 0), SV_PLDL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 256), SV_PLDL1STRM); \
|
||||
svprfd(pg1, (int64_t*)(baseU + 512), SV_PLDL1STRM); \
|
||||
@ -178,62 +180,62 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
// LOAD_CHI
|
||||
#define LOAD_CHI_A64FXf(base) \
|
||||
{ \
|
||||
Chi_00 = svld1(pg1, (float32_t*)(base + 0 * 64)); \
|
||||
Chi_01 = svld1(pg1, (float32_t*)(base + 1 * 64)); \
|
||||
Chi_02 = svld1(pg1, (float32_t*)(base + 2 * 64)); \
|
||||
Chi_10 = svld1(pg1, (float32_t*)(base + 3 * 64)); \
|
||||
Chi_11 = svld1(pg1, (float32_t*)(base + 4 * 64)); \
|
||||
Chi_12 = svld1(pg1, (float32_t*)(base + 5 * 64)); \
|
||||
Chi_00 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(0)); \
|
||||
Chi_01 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(1)); \
|
||||
Chi_02 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(2)); \
|
||||
Chi_10 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(3)); \
|
||||
Chi_11 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(4)); \
|
||||
Chi_12 = svld1_vnum(pg1, (float32_t*)(base), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU
|
||||
#define LOAD_CHIMU_INTERLEAVED_A64FXf(base) \
|
||||
{ \
|
||||
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_CHIMU_0213
|
||||
#define LOAD_CHIMU_0213_A64FXf \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
}
|
||||
// LOAD_CHIMU_0312
|
||||
#define LOAD_CHIMU_0312_A64FXf \
|
||||
{ \
|
||||
const SiteSpinor & ref(in[offset]); \
|
||||
Chimu_00 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -6 * 64)); \
|
||||
Chimu_30 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 3 * 64)); \
|
||||
Chimu_01 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -5 * 64)); \
|
||||
Chimu_31 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 4 * 64)); \
|
||||
Chimu_02 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -4 * 64)); \
|
||||
Chimu_32 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 5 * 64)); \
|
||||
Chimu_10 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -3 * 64)); \
|
||||
Chimu_20 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 0 * 64)); \
|
||||
Chimu_11 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -2 * 64)); \
|
||||
Chimu_21 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 1 * 64)); \
|
||||
Chimu_12 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + -1 * 64)); \
|
||||
Chimu_22 = svld1(pg1, (float32_t*)(base + 2 * 3 * 64 + 2 * 64)); \
|
||||
Chimu_00 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
Chimu_30 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(3)); \
|
||||
Chimu_01 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
Chimu_31 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(4)); \
|
||||
Chimu_02 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
Chimu_32 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(5)); \
|
||||
Chimu_10 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
Chimu_20 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(0)); \
|
||||
Chimu_11 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
Chimu_21 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(1)); \
|
||||
Chimu_12 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
Chimu_22 = svld1_vnum(pg1, (float32_t*)(base + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// LOAD_TABLE0
|
||||
#define LOAD_TABLE0 \
|
||||
@ -261,26 +263,26 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
Chi_12 = svtbl(Chi_12, table0);
|
||||
|
||||
// LOAD_GAUGE
|
||||
#define LOAD_GAUGE \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
#define LOAD_GAUGE(A) \
|
||||
{ \
|
||||
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
|
||||
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
|
||||
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
|
||||
U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
|
||||
U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
|
||||
U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
}
|
||||
// MULT_2SPIN
|
||||
#define MULT_2SPIN_1_A64FXf(A) \
|
||||
{ \
|
||||
const auto & ref(U[sU](A)); uint64_t baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -6 * 64)); \
|
||||
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -3 * 64)); \
|
||||
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 0 * 64)); \
|
||||
U_01 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -5 * 64)); \
|
||||
U_11 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -2 * 64)); \
|
||||
U_21 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 1 * 64)); \
|
||||
const auto & ref(U[sU](A)); baseU = (uint64_t)&ref; \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-6)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-3)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(0)); \
|
||||
U_01 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-5)); \
|
||||
U_11 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-2)); \
|
||||
U_21 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(1)); \
|
||||
UChi_00 = svcmla_x(pg1, zero0, U_00, Chi_00, 0); \
|
||||
UChi_10 = svcmla_x(pg1, zero0, U_00, Chi_10, 0); \
|
||||
UChi_01 = svcmla_x(pg1, zero0, U_10, Chi_00, 0); \
|
||||
@ -293,9 +295,9 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
UChi_11 = svcmla_x(pg1, UChi_11, U_10, Chi_10, 90); \
|
||||
UChi_02 = svcmla_x(pg1, UChi_02, U_20, Chi_00, 90); \
|
||||
UChi_12 = svcmla_x(pg1, UChi_12, U_20, Chi_10, 90); \
|
||||
U_00 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -4 * 64)); \
|
||||
U_10 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + -1 * 64)); \
|
||||
U_20 = svld1(pg1, (float32_t*)(baseU + 2 * 3 * 64 + 2 * 64)); \
|
||||
U_00 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-4)); \
|
||||
U_10 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(-1)); \
|
||||
U_20 = svld1_vnum(pg1, (float32_t*)(baseU + 2 * 3 * 64), (int64_t)(2)); \
|
||||
}
|
||||
// MULT_2SPIN_BACKEND
|
||||
#define MULT_2SPIN_2_A64FXf \
|
||||
@ -570,12 +572,12 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
result_31 = svdup_f32(0.); \
|
||||
result_32 = svdup_f32(0.);
|
||||
|
||||
// PREFETCH_RESULT_L2_STORE (prefetch store to L2)
|
||||
// PREFETCH_RESULT_L2_STORE (uses DC ZVA for cache line zeroing)
|
||||
#define PREFETCH_RESULT_L2_STORE_INTERNAL_A64FXf(base) \
|
||||
{ \
|
||||
svprfd(pg1, (int64_t*)(base + 0), SV_PSTL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 256), SV_PSTL2STRM); \
|
||||
svprfd(pg1, (int64_t*)(base + 512), SV_PSTL2STRM); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 0) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 1) : "memory" ); \
|
||||
asm( "dc zva, %[fetchptr] \n\t" : : [fetchptr] "r" (base + 256 * 2) : "memory" ); \
|
||||
}
|
||||
// PREFETCH_RESULT_L1_STORE (prefetch store to L1)
|
||||
#define PREFETCH_RESULT_L1_STORE_INTERNAL_A64FXf(base) \
|
||||
|
@ -46,6 +46,7 @@ Author: Nils Meyer <nils.meyer@ur.de>
|
||||
#undef MULT_2SPIN_2
|
||||
#undef MAYBEPERM
|
||||
#undef LOAD_CHI
|
||||
#undef ZERO_PSI
|
||||
#undef XP_PROJ
|
||||
#undef YP_PROJ
|
||||
#undef ZP_PROJ
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user