mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-28 22:55:55 +01:00
NAMESPACE and format to my liking
This commit is contained in:
parent
13bce2a6bf
commit
08682c5461
@ -1,4 +1,4 @@
|
|||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -27,8 +27,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
#ifdef AVXFMA4
|
#ifdef AVXFMA4
|
||||||
#include <x86intrin.h>
|
#include <x86intrin.h>
|
||||||
@ -38,26 +38,26 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#define _mm256_set_m128i(hi,lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo),(hi),1)
|
#define _mm256_set_m128i(hi,lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo),(hi),1)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
NAMESPACE_BEGIN(Grid);
|
||||||
namespace Optimization {
|
NAMESPACE_BEGIN(Optimization);
|
||||||
|
|
||||||
template<class vtype>
|
template<class vtype>
|
||||||
union uconv {
|
union uconv {
|
||||||
__m256 f;
|
__m256 f;
|
||||||
vtype v;
|
vtype v;
|
||||||
};
|
};
|
||||||
|
|
||||||
union u256f {
|
union u256f {
|
||||||
__m256 v;
|
__m256 v;
|
||||||
float f[8];
|
float f[8];
|
||||||
};
|
};
|
||||||
|
|
||||||
union u256d {
|
union u256d {
|
||||||
__m256d v;
|
__m256d v;
|
||||||
double f[4];
|
double f[4];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Vsplat{
|
struct Vsplat{
|
||||||
// Complex float
|
// Complex float
|
||||||
inline __m256 operator()(float a, float b) {
|
inline __m256 operator()(float a, float b) {
|
||||||
return _mm256_set_ps(b,a,b,a,b,a,b,a);
|
return _mm256_set_ps(b,a,b,a,b,a,b,a);
|
||||||
@ -78,9 +78,9 @@ namespace Optimization {
|
|||||||
inline __m256i operator()(Integer a){
|
inline __m256i operator()(Integer a){
|
||||||
return _mm256_set1_epi32(a);
|
return _mm256_set1_epi32(a);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Vstore{
|
struct Vstore{
|
||||||
//Float
|
//Float
|
||||||
inline void operator()(__m256 a, float* F){
|
inline void operator()(__m256 a, float* F){
|
||||||
_mm256_store_ps(F,a);
|
_mm256_store_ps(F,a);
|
||||||
@ -94,9 +94,9 @@ namespace Optimization {
|
|||||||
_mm256_store_si256((__m256i*)I,a);
|
_mm256_store_si256((__m256i*)I,a);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Vstream{
|
struct Vstream{
|
||||||
//Float
|
//Float
|
||||||
inline void operator()(float * a, __m256 b){
|
inline void operator()(float * a, __m256 b){
|
||||||
_mm256_stream_ps(a,b);
|
_mm256_stream_ps(a,b);
|
||||||
@ -107,9 +107,9 @@ namespace Optimization {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Vset{
|
struct Vset{
|
||||||
// Complex float
|
// Complex float
|
||||||
inline __m256 operator()(Grid::ComplexF *a){
|
inline __m256 operator()(Grid::ComplexF *a){
|
||||||
return _mm256_set_ps(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
return _mm256_set_ps(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
||||||
@ -131,9 +131,9 @@ namespace Optimization {
|
|||||||
return _mm256_set_epi32(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
return _mm256_set_epi32(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Out_type, typename In_type>
|
template <typename Out_type, typename In_type>
|
||||||
struct Reduce{
|
struct Reduce{
|
||||||
// Need templated class to overload output type
|
// Need templated class to overload output type
|
||||||
// General form must generate error if compiled
|
// General form must generate error if compiled
|
||||||
@ -144,10 +144,10 @@ namespace Optimization {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
// Arithmetic operations
|
// Arithmetic operations
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
struct Sum{
|
struct Sum{
|
||||||
//Complex/Real float
|
//Complex/Real float
|
||||||
inline __m256 operator()(__m256 a, __m256 b){
|
inline __m256 operator()(__m256 a, __m256 b){
|
||||||
return _mm256_add_ps(a,b);
|
return _mm256_add_ps(a,b);
|
||||||
@ -173,9 +173,9 @@ namespace Optimization {
|
|||||||
return _mm256_add_epi32(a,b);
|
return _mm256_add_epi32(a,b);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Sub{
|
struct Sub{
|
||||||
//Complex/Real float
|
//Complex/Real float
|
||||||
inline __m256 operator()(__m256 a, __m256 b){
|
inline __m256 operator()(__m256 a, __m256 b){
|
||||||
return _mm256_sub_ps(a,b);
|
return _mm256_sub_ps(a,b);
|
||||||
@ -202,9 +202,9 @@ namespace Optimization {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MultRealPart{
|
struct MultRealPart{
|
||||||
inline __m256 operator()(__m256 a, __m256 b){
|
inline __m256 operator()(__m256 a, __m256 b){
|
||||||
__m256 ymm0;
|
__m256 ymm0;
|
||||||
ymm0 = _mm256_shuffle_ps(a,a,_MM_SELECT_FOUR_FOUR(2,2,0,0)); // ymm0 <- ar ar,
|
ymm0 = _mm256_shuffle_ps(a,a,_MM_SELECT_FOUR_FOUR(2,2,0,0)); // ymm0 <- ar ar,
|
||||||
@ -215,8 +215,8 @@ namespace Optimization {
|
|||||||
ymm0 = _mm256_shuffle_pd(a,a,0x0); // ymm0 <- ar ar, ar,ar b'00,00
|
ymm0 = _mm256_shuffle_pd(a,a,0x0); // ymm0 <- ar ar, ar,ar b'00,00
|
||||||
return _mm256_mul_pd(ymm0,b); // ymm0 <- ar bi, ar br
|
return _mm256_mul_pd(ymm0,b); // ymm0 <- ar bi, ar br
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
struct MaddRealPart{
|
struct MaddRealPart{
|
||||||
inline __m256 operator()(__m256 a, __m256 b, __m256 c){
|
inline __m256 operator()(__m256 a, __m256 b, __m256 c){
|
||||||
__m256 ymm0 = _mm256_moveldup_ps(a); // ymm0 <- ar ar,
|
__m256 ymm0 = _mm256_moveldup_ps(a); // ymm0 <- ar ar,
|
||||||
return _mm256_add_ps(_mm256_mul_ps( ymm0, b),c);
|
return _mm256_add_ps(_mm256_mul_ps( ymm0, b),c);
|
||||||
@ -225,9 +225,9 @@ namespace Optimization {
|
|||||||
__m256d ymm0 = _mm256_shuffle_pd( a, a, 0x0 );
|
__m256d ymm0 = _mm256_shuffle_pd( a, a, 0x0 );
|
||||||
return _mm256_add_pd(_mm256_mul_pd( ymm0, b),c);
|
return _mm256_add_pd(_mm256_mul_pd( ymm0, b),c);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MultComplex{
|
struct MultComplex{
|
||||||
// Complex float
|
// Complex float
|
||||||
inline __m256 operator()(__m256 a, __m256 b){
|
inline __m256 operator()(__m256 a, __m256 b){
|
||||||
#if defined (AVX1)
|
#if defined (AVX1)
|
||||||
@ -302,10 +302,10 @@ namespace Optimization {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
struct ComplexDot {
|
struct ComplexDot {
|
||||||
|
|
||||||
inline void Prep(__m256 ari,__m256 &air) {
|
inline void Prep(__m256 ari,__m256 &air) {
|
||||||
cdotRIperm(ari,air);
|
cdotRIperm(ari,air);
|
||||||
@ -322,10 +322,10 @@ namespace Optimization {
|
|||||||
// cdotRI
|
// cdotRI
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct Mult{
|
struct Mult{
|
||||||
|
|
||||||
inline void mac(__m256 &a, __m256 b, __m256 c){
|
inline void mac(__m256 &a, __m256 b, __m256 c){
|
||||||
#if defined (AVX1)
|
#if defined (AVX1)
|
||||||
@ -377,9 +377,9 @@ namespace Optimization {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Div {
|
struct Div {
|
||||||
// Real float
|
// Real float
|
||||||
inline __m256 operator()(__m256 a, __m256 b) {
|
inline __m256 operator()(__m256 a, __m256 b) {
|
||||||
return _mm256_div_ps(a, b);
|
return _mm256_div_ps(a, b);
|
||||||
@ -388,10 +388,10 @@ namespace Optimization {
|
|||||||
inline __m256d operator()(__m256d a, __m256d b){
|
inline __m256d operator()(__m256d a, __m256d b){
|
||||||
return _mm256_div_pd(a,b);
|
return _mm256_div_pd(a,b);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
struct Conj{
|
struct Conj{
|
||||||
// Complex single
|
// Complex single
|
||||||
inline __m256 operator()(__m256 in){
|
inline __m256 operator()(__m256 in){
|
||||||
return _mm256_xor_ps(_mm256_addsub_ps(_mm256_setzero_ps(),in), _mm256_set1_ps(-0.f));
|
return _mm256_xor_ps(_mm256_addsub_ps(_mm256_setzero_ps(),in), _mm256_set1_ps(-0.f));
|
||||||
@ -401,9 +401,9 @@ namespace Optimization {
|
|||||||
return _mm256_xor_pd(_mm256_addsub_pd(_mm256_setzero_pd(),in), _mm256_set1_pd(-0.f));
|
return _mm256_xor_pd(_mm256_addsub_pd(_mm256_setzero_pd(),in), _mm256_set1_pd(-0.f));
|
||||||
}
|
}
|
||||||
// do not define for integer input
|
// do not define for integer input
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TimesMinusI{
|
struct TimesMinusI{
|
||||||
//Complex single
|
//Complex single
|
||||||
inline __m256 operator()(__m256 in, __m256 ret){
|
inline __m256 operator()(__m256 in, __m256 ret){
|
||||||
__m256 tmp =_mm256_addsub_ps(_mm256_setzero_ps(),in); // r,-i
|
__m256 tmp =_mm256_addsub_ps(_mm256_setzero_ps(),in); // r,-i
|
||||||
@ -414,9 +414,9 @@ namespace Optimization {
|
|||||||
__m256d tmp = _mm256_addsub_pd(_mm256_setzero_pd(),in); // r,-i
|
__m256d tmp = _mm256_addsub_pd(_mm256_setzero_pd(),in); // r,-i
|
||||||
return _mm256_shuffle_pd(tmp,tmp,0x5);
|
return _mm256_shuffle_pd(tmp,tmp,0x5);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TimesI{
|
struct TimesI{
|
||||||
//Complex single
|
//Complex single
|
||||||
inline __m256 operator()(__m256 in, __m256 ret){
|
inline __m256 operator()(__m256 in, __m256 ret){
|
||||||
__m256 tmp =_mm256_shuffle_ps(in,in,_MM_SELECT_FOUR_FOUR(2,3,0,1)); // i,r
|
__m256 tmp =_mm256_shuffle_ps(in,in,_MM_SELECT_FOUR_FOUR(2,3,0,1)); // i,r
|
||||||
@ -427,13 +427,13 @@ namespace Optimization {
|
|||||||
__m256d tmp = _mm256_shuffle_pd(in,in,0x5);
|
__m256d tmp = _mm256_shuffle_pd(in,in,0x5);
|
||||||
return _mm256_addsub_pd(_mm256_setzero_pd(),tmp); // i,-r
|
return _mm256_addsub_pd(_mm256_setzero_pd(),tmp); // i,-r
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
// Some Template specialization
|
// Some Template specialization
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
|
|
||||||
struct Permute{
|
struct Permute{
|
||||||
|
|
||||||
static inline __m256 Permute0(__m256 in){
|
static inline __m256 Permute0(__m256 in){
|
||||||
return _mm256_permute2f128_ps(in,in,0x01); //ABCD EFGH -> EFGH ABCD
|
return _mm256_permute2f128_ps(in,in,0x01); //ABCD EFGH -> EFGH ABCD
|
||||||
@ -460,9 +460,9 @@ namespace Optimization {
|
|||||||
static inline __m256d Permute3(__m256d in){
|
static inline __m256d Permute3(__m256d in){
|
||||||
return in;
|
return in;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
#define USE_FP16
|
#define USE_FP16
|
||||||
struct PrecisionChange {
|
struct PrecisionChange {
|
||||||
static inline __m256i StoH (__m256 a,__m256 b) {
|
static inline __m256i StoH (__m256 a,__m256 b) {
|
||||||
__m256i h;
|
__m256i h;
|
||||||
#ifdef USE_FP16
|
#ifdef USE_FP16
|
||||||
@ -506,8 +506,8 @@ namespace Optimization {
|
|||||||
StoD(sa,a,b);
|
StoD(sa,a,b);
|
||||||
StoD(sb,c,d);
|
StoD(sb,c,d);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
struct Exchange{
|
struct Exchange{
|
||||||
// 3210 ordering
|
// 3210 ordering
|
||||||
static inline void Exchange0(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
|
static inline void Exchange0(__m256 &out1,__m256 &out2,__m256 in1,__m256 in2){
|
||||||
//Invertible
|
//Invertible
|
||||||
@ -558,7 +558,7 @@ namespace Optimization {
|
|||||||
assert(0);
|
assert(0);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#if defined (AVX2)
|
#if defined (AVX2)
|
||||||
@ -597,7 +597,7 @@ namespace Optimization {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct Rotate{
|
struct Rotate{
|
||||||
|
|
||||||
static inline __m256 rotate(__m256 in,int n){
|
static inline __m256 rotate(__m256 in,int n){
|
||||||
switch(n){
|
switch(n){
|
||||||
@ -647,11 +647,11 @@ namespace Optimization {
|
|||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//Complex float Reduce
|
//Complex float Reduce
|
||||||
template<>
|
template<>
|
||||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m256>::operator()(__m256 in){
|
inline Grid::ComplexF Reduce<Grid::ComplexF, __m256>::operator()(__m256 in){
|
||||||
__m256 v1,v2;
|
__m256 v1,v2;
|
||||||
v1=Optimization::Permute::Permute0(in); // avx 256; quad complex single
|
v1=Optimization::Permute::Permute0(in); // avx 256; quad complex single
|
||||||
v1= _mm256_add_ps(v1,in);
|
v1= _mm256_add_ps(v1,in);
|
||||||
@ -659,11 +659,11 @@ namespace Optimization {
|
|||||||
v1 = _mm256_add_ps(v1,v2);
|
v1 = _mm256_add_ps(v1,v2);
|
||||||
u256f conv; conv.v = v1;
|
u256f conv; conv.v = v1;
|
||||||
return Grid::ComplexF(conv.f[0],conv.f[1]);
|
return Grid::ComplexF(conv.f[0],conv.f[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Real float Reduce
|
//Real float Reduce
|
||||||
template<>
|
template<>
|
||||||
inline Grid::RealF Reduce<Grid::RealF, __m256>::operator()(__m256 in){
|
inline Grid::RealF Reduce<Grid::RealF, __m256>::operator()(__m256 in){
|
||||||
__m256 v1,v2;
|
__m256 v1,v2;
|
||||||
v1 = Optimization::Permute::Permute0(in); // avx 256; octo-double
|
v1 = Optimization::Permute::Permute0(in); // avx 256; octo-double
|
||||||
v1 = _mm256_add_ps(v1,in);
|
v1 = _mm256_add_ps(v1,in);
|
||||||
@ -673,22 +673,22 @@ namespace Optimization {
|
|||||||
v1 = _mm256_add_ps(v1,v2);
|
v1 = _mm256_add_ps(v1,v2);
|
||||||
u256f conv; conv.v=v1;
|
u256f conv; conv.v=v1;
|
||||||
return conv.f[0];
|
return conv.f[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//Complex double Reduce
|
//Complex double Reduce
|
||||||
template<>
|
template<>
|
||||||
inline Grid::ComplexD Reduce<Grid::ComplexD, __m256d>::operator()(__m256d in){
|
inline Grid::ComplexD Reduce<Grid::ComplexD, __m256d>::operator()(__m256d in){
|
||||||
__m256d v1;
|
__m256d v1;
|
||||||
v1 = Optimization::Permute::Permute0(in); // sse 128; paired complex single
|
v1 = Optimization::Permute::Permute0(in); // sse 128; paired complex single
|
||||||
v1 = _mm256_add_pd(v1,in);
|
v1 = _mm256_add_pd(v1,in);
|
||||||
u256d conv; conv.v = v1;
|
u256d conv; conv.v = v1;
|
||||||
return Grid::ComplexD(conv.f[0],conv.f[1]);
|
return Grid::ComplexD(conv.f[0],conv.f[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
//Real double Reduce
|
//Real double Reduce
|
||||||
template<>
|
template<>
|
||||||
inline Grid::RealD Reduce<Grid::RealD, __m256d>::operator()(__m256d in){
|
inline Grid::RealD Reduce<Grid::RealD, __m256d>::operator()(__m256d in){
|
||||||
__m256d v1,v2;
|
__m256d v1,v2;
|
||||||
v1 = Optimization::Permute::Permute0(in); // avx 256; quad double
|
v1 = Optimization::Permute::Permute0(in); // avx 256; quad double
|
||||||
v1 = _mm256_add_pd(v1,in);
|
v1 = _mm256_add_pd(v1,in);
|
||||||
@ -696,11 +696,11 @@ namespace Optimization {
|
|||||||
v1 = _mm256_add_pd(v1,v2);
|
v1 = _mm256_add_pd(v1,v2);
|
||||||
u256d conv; conv.v = v1;
|
u256d conv; conv.v = v1;
|
||||||
return conv.f[0];
|
return conv.f[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
//Integer Reduce
|
//Integer Reduce
|
||||||
template<>
|
template<>
|
||||||
inline Integer Reduce<Integer, __m256i>::operator()(__m256i in){
|
inline Integer Reduce<Integer, __m256i>::operator()(__m256i in){
|
||||||
__m128i ret;
|
__m128i ret;
|
||||||
#if defined (AVX2)
|
#if defined (AVX2)
|
||||||
// AVX2 horizontal adds within upper and lower halves of register; use
|
// AVX2 horizontal adds within upper and lower halves of register; use
|
||||||
@ -723,47 +723,47 @@ namespace Optimization {
|
|||||||
ret = _mm_hadd_epi32(u1, u1);
|
ret = _mm_hadd_epi32(u1, u1);
|
||||||
#endif
|
#endif
|
||||||
return _mm_cvtsi128_si32(ret);
|
return _mm_cvtsi128_si32(ret);
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NAMESPACE_END(Optimization);
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Here assign types
|
// Here assign types
|
||||||
|
|
||||||
typedef __m256i SIMD_Htype; // Single precision type
|
typedef __m256i SIMD_Htype; // Single precision type
|
||||||
typedef __m256 SIMD_Ftype; // Single precision type
|
typedef __m256 SIMD_Ftype; // Single precision type
|
||||||
typedef __m256d SIMD_Dtype; // Double precision type
|
typedef __m256d SIMD_Dtype; // Double precision type
|
||||||
typedef __m256i SIMD_Itype; // Integer type
|
typedef __m256i SIMD_Itype; // Integer type
|
||||||
|
|
||||||
// prefecthing
|
// prefecthing
|
||||||
inline void v_prefetch0(int size, const char *ptr){
|
inline void v_prefetch0(int size, const char *ptr){
|
||||||
for(int i=0;i<size;i+=64){ // Define L1 linesize above
|
for(int i=0;i<size;i+=64){ // Define L1 linesize above
|
||||||
_mm_prefetch(ptr+i+4096,_MM_HINT_T1);
|
_mm_prefetch(ptr+i+4096,_MM_HINT_T1);
|
||||||
_mm_prefetch(ptr+i+512,_MM_HINT_T0);
|
_mm_prefetch(ptr+i+512,_MM_HINT_T0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
inline void prefetch_HINT_T0(const char *ptr){
|
inline void prefetch_HINT_T0(const char *ptr){
|
||||||
_mm_prefetch(ptr, _MM_HINT_T0);
|
_mm_prefetch(ptr, _MM_HINT_T0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Function name aliases
|
// Function name aliases
|
||||||
typedef Optimization::Vsplat VsplatSIMD;
|
typedef Optimization::Vsplat VsplatSIMD;
|
||||||
typedef Optimization::Vstore VstoreSIMD;
|
typedef Optimization::Vstore VstoreSIMD;
|
||||||
typedef Optimization::Vset VsetSIMD;
|
typedef Optimization::Vset VsetSIMD;
|
||||||
typedef Optimization::Vstream VstreamSIMD;
|
typedef Optimization::Vstream VstreamSIMD;
|
||||||
|
|
||||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S, T>;
|
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S, T>;
|
||||||
|
|
||||||
// Arithmetic operations
|
// Arithmetic operations
|
||||||
typedef Optimization::Sum SumSIMD;
|
typedef Optimization::Sum SumSIMD;
|
||||||
typedef Optimization::Sub SubSIMD;
|
typedef Optimization::Sub SubSIMD;
|
||||||
typedef Optimization::Div DivSIMD;
|
typedef Optimization::Div DivSIMD;
|
||||||
typedef Optimization::Mult MultSIMD;
|
typedef Optimization::Mult MultSIMD;
|
||||||
typedef Optimization::MultComplex MultComplexSIMD;
|
typedef Optimization::MultComplex MultComplexSIMD;
|
||||||
typedef Optimization::MultRealPart MultRealPartSIMD;
|
typedef Optimization::MultRealPart MultRealPartSIMD;
|
||||||
typedef Optimization::MaddRealPart MaddRealPartSIMD;
|
typedef Optimization::MaddRealPart MaddRealPartSIMD;
|
||||||
typedef Optimization::Conj ConjSIMD;
|
typedef Optimization::Conj ConjSIMD;
|
||||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||||
typedef Optimization::TimesI TimesISIMD;
|
typedef Optimization::TimesI TimesISIMD;
|
||||||
|
|
||||||
} // namespace Grid
|
NAMESPACE_END(Grid)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user