2015-03-04 05:12:19 +00:00
|
|
|
#ifndef GRID_SIMD_H
|
|
|
|
#define GRID_SIMD_H
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
// Define scalar and vector floating point types
|
|
|
|
//
|
|
|
|
// Scalar: RealF, RealD, ComplexF, ComplexD
|
|
|
|
//
|
|
|
|
// Vector: vRealF, vRealD, vComplexF, vComplexD
|
|
|
|
//
|
|
|
|
// Vector types are arch dependent
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
2015-03-04 05:31:44 +00:00
|
|
|
|
2015-04-06 06:30:48 +01:00
|
|
|
|
2015-04-16 14:47:28 +01:00
|
|
|
#ifdef SSE4
|
2015-04-06 06:30:48 +01:00
|
|
|
#include <pmmintrin.h>
|
|
|
|
#endif
|
|
|
|
#if defined(AVX1) || defined (AVX2)
|
|
|
|
#include <immintrin.h>
|
|
|
|
#endif
|
|
|
|
#ifdef AVX512
|
|
|
|
#include <immintrin.h>
|
|
|
|
#endif
|
2015-03-04 05:12:19 +00:00
|
|
|
|
2015-04-03 05:29:54 +01:00
|
|
|
namespace Grid {
|
2015-03-04 05:12:19 +00:00
|
|
|
|
|
|
|
typedef float RealF;
|
|
|
|
typedef double RealD;
|
|
|
|
|
|
|
|
typedef std::complex<RealF> ComplexF;
|
|
|
|
typedef std::complex<RealD> ComplexD;
|
2015-04-14 20:22:04 +01:00
|
|
|
|
2015-03-29 20:35:37 +01:00
|
|
|
|
2015-03-04 05:31:44 +00:00
|
|
|
inline RealF adj(const RealF & r){ return r; }
|
|
|
|
inline RealF conj(const RealF & r){ return r; }
|
2015-04-16 14:47:28 +01:00
|
|
|
inline RealF real(const RealF & r){ return r; }
|
|
|
|
|
|
|
|
inline RealD adj(const RealD & r){ return r; }
|
|
|
|
inline RealD conj(const RealD & r){ return r; }
|
|
|
|
inline RealD real(const RealD & r){ return r; }
|
|
|
|
|
2015-04-14 22:40:40 +01:00
|
|
|
inline ComplexD innerProduct(const ComplexD & l, const ComplexD & r) { return conj(l)*r; }
|
|
|
|
inline ComplexF innerProduct(const ComplexF & l, const ComplexF & r) { return conj(l)*r; }
|
|
|
|
inline RealD innerProduct(const RealD & l, const RealD & r) { return l*r; }
|
|
|
|
inline RealF innerProduct(const RealF & l, const RealF & r) { return l*r; }
|
2015-03-04 05:31:44 +00:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
2015-04-03 05:29:54 +01:00
|
|
|
//Provide support functions for basic real and complex data types required by Grid
|
2015-03-04 05:31:44 +00:00
|
|
|
//Single and double precision versions. Should be able to template this once only.
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
inline void mac (ComplexD * __restrict__ y,const ComplexD * __restrict__ a,const ComplexD *__restrict__ x){ *y = (*a) * (*x)+(*y); };
|
|
|
|
inline void mult(ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) * (*r);}
|
|
|
|
inline void sub (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) - (*r);}
|
|
|
|
inline void add (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) + (*r);}
|
|
|
|
inline ComplexD adj(const ComplexD& r){ return(conj(r)); }
|
|
|
|
// conj already supported for complex
|
|
|
|
|
|
|
|
inline void mac (ComplexF * __restrict__ y,const ComplexF * __restrict__ a,const ComplexF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
|
|
|
inline void mult(ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
2015-04-14 20:22:04 +01:00
|
|
|
inline ComplexF adj(const ComplexF& r ){ return(conj(r)); }
|
2015-03-04 05:31:44 +00:00
|
|
|
//conj already supported for complex
|
|
|
|
|
|
|
|
inline void mac (RealD * __restrict__ y,const RealD * __restrict__ a,const RealD *__restrict__ x){ *y = (*a) * (*x)+(*y);}
|
|
|
|
inline void mult(RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) * (*r);}
|
|
|
|
inline void sub (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) - (*r);}
|
|
|
|
inline void add (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) + (*r);}
|
|
|
|
|
|
|
|
inline void mac (RealF * __restrict__ y,const RealF * __restrict__ a,const RealF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
|
|
|
inline void mult(RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
|
|
|
|
|
|
|
|
|
2015-03-04 05:12:19 +00:00
|
|
|
class Zero{};
|
|
|
|
static Zero zero;
|
2015-04-16 14:47:28 +01:00
|
|
|
template<class itype> inline void zeroit(itype &arg){ arg=zero;};
|
|
|
|
template<> inline void zeroit(ComplexF &arg){ arg=0; };
|
|
|
|
template<> inline void zeroit(ComplexD &arg){ arg=0; };
|
|
|
|
template<> inline void zeroit(RealF &arg){ arg=0; };
|
|
|
|
template<> inline void zeroit(RealD &arg){ arg=0; };
|
2015-03-04 05:12:19 +00:00
|
|
|
|
|
|
|
|
2015-04-16 14:47:28 +01:00
|
|
|
#if defined (SSE4)
|
2015-03-04 05:12:19 +00:00
|
|
|
typedef __m128 fvec;
|
|
|
|
typedef __m128d dvec;
|
|
|
|
typedef __m128 cvec;
|
|
|
|
typedef __m128d zvec;
|
2015-04-06 06:30:48 +01:00
|
|
|
typedef __m128i ivec;
|
2015-03-04 05:12:19 +00:00
|
|
|
#endif
|
|
|
|
#if defined (AVX1) || defined (AVX2)
|
|
|
|
typedef __m256 fvec;
|
|
|
|
typedef __m256d dvec;
|
2015-03-29 20:35:37 +01:00
|
|
|
typedef __m256 cvec;
|
2015-03-04 05:12:19 +00:00
|
|
|
typedef __m256d zvec;
|
2015-04-06 06:30:48 +01:00
|
|
|
typedef __m256i ivec;
|
2015-03-04 05:12:19 +00:00
|
|
|
#endif
|
|
|
|
#if defined (AVX512)
|
|
|
|
typedef __m512 fvec;
|
|
|
|
typedef __m512d dvec;
|
|
|
|
typedef __m512 cvec;
|
|
|
|
typedef __m512d zvec;
|
2015-04-06 06:30:48 +01:00
|
|
|
typedef __m512i ivec;
|
2015-03-04 05:12:19 +00:00
|
|
|
#endif
|
|
|
|
#if defined (QPX)
|
|
|
|
typedef float fvec __attribute__ ((vector_size (16))); // QPX has same SIMD width irrespective of precision
|
|
|
|
typedef float cvec __attribute__ ((vector_size (16)));
|
|
|
|
|
|
|
|
typedef vector4double dvec;
|
|
|
|
typedef vector4double zvec;
|
|
|
|
#endif
|
|
|
|
#if defined (AVX1) || defined (AVX2) || defined (AVX512)
|
|
|
|
inline void v_prefetch0(int size, const char *ptr){
|
|
|
|
for(int i=0;i<size;i+=64){ // Define L1 linesize above// What about SSE?
|
|
|
|
_mm_prefetch(ptr+i+4096,_MM_HINT_T1);
|
|
|
|
_mm_prefetch(ptr+i+512,_MM_HINT_T0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
inline void v_prefetch0(int size, const char *ptr){};
|
|
|
|
#endif
|
|
|
|
|
2015-04-06 06:30:48 +01:00
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////
|
|
|
|
// Generic extract/merge/permute
|
|
|
|
/////////////////////////////////////////////////////////////////
|
2015-04-23 15:13:00 +01:00
|
|
|
|
2015-04-06 11:26:24 +01:00
|
|
|
template<class vsimd,class scalar>
|
2015-04-09 07:06:03 +01:00
|
|
|
inline void Gextract(const vsimd &y,std::vector<scalar *> &extracted){
|
2015-04-23 15:13:00 +01:00
|
|
|
// FIXME: bounce off memory is painful
|
2015-04-09 07:06:03 +01:00
|
|
|
int Nextr=extracted.size();
|
|
|
|
int Nsimd=vsimd::Nsimd();
|
|
|
|
int s=Nsimd/Nextr;
|
|
|
|
|
|
|
|
std::vector<scalar,alignedAllocator<scalar> > buf(Nsimd);
|
2015-04-23 15:13:00 +01:00
|
|
|
|
2015-04-09 07:06:03 +01:00
|
|
|
vstore(y,&buf[0]);
|
|
|
|
for(int i=0;i<Nextr;i++){
|
|
|
|
*extracted[i] = buf[i*s];
|
|
|
|
extracted[i]++;
|
2015-04-06 06:30:48 +01:00
|
|
|
}
|
|
|
|
};
|
2015-04-06 11:26:24 +01:00
|
|
|
template<class vsimd,class scalar>
|
2015-04-23 15:13:00 +01:00
|
|
|
inline void Gextract(const vsimd &y,std::vector<scalar> &extracted){
|
2015-04-09 07:06:03 +01:00
|
|
|
int Nextr=extracted.size();
|
|
|
|
int Nsimd=vsimd::Nsimd();
|
|
|
|
int s=Nsimd/Nextr;
|
|
|
|
|
2015-04-23 15:13:00 +01:00
|
|
|
std::vector<scalar,alignedAllocator<scalar> > buf(Nsimd);
|
|
|
|
|
|
|
|
vstore(y,&buf[0]);
|
2015-04-09 07:06:03 +01:00
|
|
|
for(int i=0;i<Nextr;i++){
|
2015-04-23 15:13:00 +01:00
|
|
|
extracted[i] = buf[i*s];
|
2015-04-06 06:30:48 +01:00
|
|
|
}
|
2015-04-09 07:06:03 +01:00
|
|
|
};
|
|
|
|
template<class vsimd,class scalar>
|
2015-04-23 15:13:00 +01:00
|
|
|
inline void Gmerge(vsimd &y,std::vector<scalar *> &extracted){
|
2015-04-09 07:06:03 +01:00
|
|
|
int Nextr=extracted.size();
|
|
|
|
int Nsimd=vsimd::Nsimd();
|
|
|
|
int s=Nsimd/Nextr;
|
|
|
|
|
2015-04-23 15:13:00 +01:00
|
|
|
std::vector<scalar> buf(Nsimd);
|
2015-04-09 07:06:03 +01:00
|
|
|
for(int i=0;i<Nextr;i++){
|
2015-04-23 15:13:00 +01:00
|
|
|
for(int ii=0;ii<s;ii++){
|
|
|
|
buf[i*s+ii]=*extracted[i];
|
|
|
|
}
|
|
|
|
extracted[i]++;
|
2015-04-09 07:06:03 +01:00
|
|
|
}
|
2015-04-23 15:13:00 +01:00
|
|
|
vset(y,&buf[0]);
|
2015-04-09 07:06:03 +01:00
|
|
|
};
|
|
|
|
template<class vsimd,class scalar>
|
|
|
|
inline void Gmerge(vsimd &y,std::vector<scalar> &extracted){
|
|
|
|
int Nextr=extracted.size();
|
|
|
|
int Nsimd=vsimd::Nsimd();
|
|
|
|
int s=Nsimd/Nextr;
|
|
|
|
|
|
|
|
std::vector<scalar> buf(Nsimd);
|
|
|
|
for(int i=0;i<Nextr;i++){
|
|
|
|
for(int ii=0;ii<s;ii++){
|
|
|
|
buf[i*s+ii]=extracted[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vset(y,&buf[0]);
|
2015-04-06 06:30:48 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////
|
|
|
|
// Permute
|
|
|
|
// Permute 0 every ABCDEFGH -> BA DC FE HG
|
|
|
|
// Permute 1 every ABCDEFGH -> CD AB GH EF
|
|
|
|
// Permute 2 every ABCDEFGH -> EFGH ABCD
|
|
|
|
// Permute 3 possible on longer iVector lengths (512bit = 8 double = 16 single)
|
|
|
|
// Permute 4 possible on half precision @512bit vectors.
|
|
|
|
//////////////////////////////////////////////////////////
|
|
|
|
template<class vsimd>
|
2015-04-09 07:06:03 +01:00
|
|
|
inline void Gpermute(vsimd &y,const vsimd &b,int perm){
|
2015-04-06 06:30:48 +01:00
|
|
|
switch (perm){
|
|
|
|
#if defined(AVX1)||defined(AVX2)
|
|
|
|
// 8x32 bits=>3 permutes
|
|
|
|
case 2: y.v = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); break;
|
|
|
|
case 1: y.v = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2)); break;
|
|
|
|
case 0: y.v = _mm256_permute2f128_ps(b.v,b.v,0x01); break;
|
|
|
|
#endif
|
2015-04-16 14:47:28 +01:00
|
|
|
#ifdef SSE4
|
2015-04-06 06:30:48 +01:00
|
|
|
case 1: y.v = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); break;
|
|
|
|
case 0: y.v = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2));break;
|
|
|
|
#endif
|
|
|
|
#ifdef AVX512
|
|
|
|
// 16 floats=> permutes
|
|
|
|
// Permute 0 every abcd efgh ijkl mnop -> badc fehg jilk nmpo
|
|
|
|
// Permute 1 every abcd efgh ijkl mnop -> cdab ghef jkij opmn
|
|
|
|
// Permute 2 every abcd efgh ijkl mnop -> efgh abcd mnop ijkl
|
|
|
|
// Permute 3 every abcd efgh ijkl mnop -> ijkl mnop abcd efgh
|
|
|
|
case 3: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_CDAB); break;
|
|
|
|
case 2: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_BADC); break;
|
|
|
|
case 1: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(2,3,0,1)); break;
|
|
|
|
case 0: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(1,0,3,2)); break;
|
|
|
|
#endif
|
|
|
|
#ifdef QPX
|
|
|
|
#error not implemented
|
|
|
|
#endif
|
|
|
|
default: assert(0); break;
|
|
|
|
}
|
|
|
|
};
|
2015-04-06 11:26:24 +01:00
|
|
|
};
|
2015-04-06 06:30:48 +01:00
|
|
|
|
2015-04-18 21:23:32 +01:00
|
|
|
#include <simd/Grid_vInteger.h>
|
|
|
|
#include <simd/Grid_vRealF.h>
|
|
|
|
#include <simd/Grid_vRealD.h>
|
|
|
|
#include <simd/Grid_vComplexF.h>
|
|
|
|
#include <simd/Grid_vComplexD.h>
|
2015-03-04 05:12:19 +00:00
|
|
|
|
2015-04-18 14:36:01 +01:00
|
|
|
|
2015-04-14 20:22:04 +01:00
|
|
|
namespace Grid {
|
|
|
|
|
|
|
|
// NB: Template the following on "type Complex" and then implement *,+,- for
|
|
|
|
// ComplexF, ComplexD, RealF, RealD above to
|
|
|
|
// get full generality of binops with scalars.
|
|
|
|
inline void mac (vComplexF *__restrict__ y,const ComplexF *__restrict__ a,const vComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
inline void mac (vComplexF *__restrict__ y,const vComplexF *__restrict__ a,const ComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
|
|
|
|
inline void mac (vComplexD *__restrict__ y,const ComplexD *__restrict__ a,const vComplexD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
inline void mac (vComplexD *__restrict__ y,const vComplexD *__restrict__ a,const ComplexD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
|
|
|
|
inline void mac (vRealF *__restrict__ y,const RealF *__restrict__ a,const vRealF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
inline void mac (vRealF *__restrict__ y,const vRealF *__restrict__ a,const RealF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
|
|
|
|
inline void mac (vRealD *__restrict__ y,const RealD *__restrict__ a,const vRealD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
inline void mac (vRealD *__restrict__ y,const vRealD *__restrict__ a,const RealD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
|
|
|
inline void mult(vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) * (*r); }
|
|
|
|
inline void sub (vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) - (*r); }
|
|
|
|
inline void add (vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) + (*r); }
|
|
|
|
|
|
|
|
// Default precision
|
2015-04-14 23:20:16 +01:00
|
|
|
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
|
|
|
|
typedef RealD Real;
|
2015-04-14 20:22:04 +01:00
|
|
|
typedef vRealD vReal;
|
|
|
|
typedef vComplexD vComplex;
|
2015-04-14 23:20:16 +01:00
|
|
|
typedef std::complex<Real> Complex;
|
|
|
|
#else
|
|
|
|
typedef RealF Real;
|
|
|
|
typedef vRealF vReal;
|
|
|
|
typedef vComplexF vComplex;
|
|
|
|
typedef std::complex<Real> Complex;
|
|
|
|
#endif
|
2015-04-14 20:22:04 +01:00
|
|
|
}
|
2015-03-04 05:12:19 +00:00
|
|
|
#endif
|