mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-12 20:27:06 +01:00
Merge branch 'master' of https://github.com/paboyle/Grid
Conflicts: lib/Grid_simd.h
This commit is contained in:
@ -16,6 +16,9 @@
|
||||
/* GRID_COMMS_NONE */
|
||||
#define GRID_COMMS_NONE 1
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
/* #undef HAVE_CXX11 */
|
||||
|
||||
/* Define to 1 if you have the declaration of `be64toh', and to 0 if you
|
||||
don't. */
|
||||
#define HAVE_DECL_BE64TOH 1
|
||||
|
@ -15,6 +15,9 @@
|
||||
/* GRID_COMMS_NONE */
|
||||
#undef GRID_COMMS_NONE
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
#undef HAVE_CXX11
|
||||
|
||||
/* Define to 1 if you have the declaration of `be64toh', and to 0 if you
|
||||
don't. */
|
||||
#undef HAVE_DECL_BE64TOH
|
||||
|
153
lib/Grid_simd.h
153
lib/Grid_simd.h
@ -13,28 +13,6 @@
|
||||
|
||||
typedef uint32_t Integer;
|
||||
|
||||
#ifdef SSE4
|
||||
#include <pmmintrin.h>
|
||||
#endif
|
||||
#if defined(AVX1) || defined (AVX2)
|
||||
#include <immintrin.h>
|
||||
|
||||
// _mm256_set_m128i(hi,lo); // not defined in all versions of immintrin.h
|
||||
#ifndef _mm256_set_m128i
|
||||
#define _mm256_set_m128i(hi,lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo),(hi),1)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef AVX512
|
||||
#include <immintrin.h>
|
||||
#ifndef KNC_ONLY_STORES
|
||||
#define _mm512_storenrngo_ps _mm512_store_ps // not present in AVX512
|
||||
#define _mm512_storenrngo_pd _mm512_store_pd // not present in AVX512
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
typedef float RealF;
|
||||
@ -66,45 +44,49 @@ namespace Grid {
|
||||
inline ComplexF innerProduct(const ComplexF & l, const ComplexF & r) { return conjugate(l)*r; }
|
||||
inline RealD innerProduct(const RealD & l, const RealD & r) { return l*r; }
|
||||
inline RealF innerProduct(const RealF & l, const RealF & r) { return l*r; }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//Provide support functions for basic real and complex data types required by Grid
|
||||
//Single and double precision versions. Should be able to template this once only.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
inline void mac (ComplexD * __restrict__ y,const ComplexD * __restrict__ a,const ComplexD *__restrict__ x){ *y = (*a) * (*x)+(*y); };
|
||||
inline void mult(ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) * (*r);}
|
||||
inline void sub (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) - (*r);}
|
||||
inline void add (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) + (*r);}
|
||||
// conjugate already supported for complex
|
||||
|
||||
inline void mac (ComplexF * __restrict__ y,const ComplexF * __restrict__ a,const ComplexF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
||||
inline void mult(ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
//conjugate already supported for complex
|
||||
|
||||
inline ComplexF timesI(const ComplexF &r) { return(r*ComplexF(0.0,1.0));}
|
||||
inline ComplexD timesI(const ComplexD &r) { return(r*ComplexD(0.0,1.0));}
|
||||
inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
|
||||
inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
|
||||
inline void timesI(ComplexF &ret,const ComplexF &r) { ret = timesI(r);}
|
||||
inline void timesI(ComplexD &ret,const ComplexD &r) { ret = timesI(r);}
|
||||
inline void timesMinusI(ComplexF &ret,const ComplexF &r){ ret = timesMinusI(r);}
|
||||
inline void timesMinusI(ComplexD &ret,const ComplexD &r){ ret = timesMinusI(r);}
|
||||
|
||||
inline void mac (RealD * __restrict__ y,const RealD * __restrict__ a,const RealD *__restrict__ x){ *y = (*a) * (*x)+(*y);}
|
||||
inline void mult(RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) * (*r);}
|
||||
inline void sub (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) - (*r);}
|
||||
inline void add (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) + (*r);}
|
||||
|
||||
inline void mac (RealF * __restrict__ y,const RealF * __restrict__ a,const RealF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
||||
inline void mult(RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//Provide support functions for basic real and complex data types required by Grid
|
||||
//Single and double precision versions. Should be able to template this once only.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
inline void mac (ComplexD * __restrict__ y,const ComplexD * __restrict__ a,const ComplexD *__restrict__ x){ *y = (*a) * (*x)+(*y); };
|
||||
inline void mult(ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) * (*r);}
|
||||
inline void sub (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) - (*r);}
|
||||
inline void add (ComplexD * __restrict__ y,const ComplexD * __restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) + (*r);}
|
||||
// conjugate already supported for complex
|
||||
|
||||
inline void mac (ComplexF * __restrict__ y,const ComplexF * __restrict__ a,const ComplexF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
||||
inline void mult(ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (ComplexF * __restrict__ y,const ComplexF * __restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
//conjugate already supported for complex
|
||||
|
||||
inline ComplexF timesI(const ComplexF &r) { return(r*ComplexF(0.0,1.0));}
|
||||
inline ComplexD timesI(const ComplexD &r) { return(r*ComplexD(0.0,1.0));}
|
||||
inline ComplexF timesMinusI(const ComplexF &r){ return(r*ComplexF(0.0,-1.0));}
|
||||
inline ComplexD timesMinusI(const ComplexD &r){ return(r*ComplexD(0.0,-1.0));}
|
||||
inline void timesI(ComplexF &ret,const ComplexF &r) { ret = timesI(r);}
|
||||
inline void timesI(ComplexD &ret,const ComplexD &r) { ret = timesI(r);}
|
||||
inline void timesMinusI(ComplexF &ret,const ComplexF &r){ ret = timesMinusI(r);}
|
||||
inline void timesMinusI(ComplexD &ret,const ComplexD &r){ ret = timesMinusI(r);}
|
||||
|
||||
inline void mac (RealD * __restrict__ y,const RealD * __restrict__ a,const RealD *__restrict__ x){ *y = (*a) * (*x)+(*y);}
|
||||
inline void mult(RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) * (*r);}
|
||||
inline void sub (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) - (*r);}
|
||||
inline void add (RealD * __restrict__ y,const RealD * __restrict__ l,const RealD *__restrict__ r){ *y = (*l) + (*r);}
|
||||
|
||||
inline void mac (RealF * __restrict__ y,const RealF * __restrict__ a,const RealF *__restrict__ x){ *y = (*a) * (*x)+(*y); }
|
||||
inline void mult(RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (RealF * __restrict__ y,const RealF * __restrict__ l,const RealF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
inline void vstream(ComplexF &l, const ComplexF &r){ l=r;}
|
||||
inline void vstream(ComplexD &l, const ComplexD &r){ l=r;}
|
||||
inline void vstream(RealF &l, const RealF &r){ l=r;}
|
||||
inline void vstream(RealD &l, const RealD &r){ l=r;}
|
||||
|
||||
|
||||
class Zero{};
|
||||
static Zero zero;
|
||||
template<class itype> inline void zeroit(itype &arg){ arg=zero;};
|
||||
@ -113,7 +95,6 @@ namespace Grid {
|
||||
template<> inline void zeroit(RealF &arg){ arg=0; };
|
||||
template<> inline void zeroit(RealD &arg){ arg=0; };
|
||||
|
||||
|
||||
#if defined (SSE4)
|
||||
typedef __m128 fvec;
|
||||
typedef __m128d dvec;
|
||||
@ -245,56 +226,12 @@ inline void Gpermute(vsimd &y,const vsimd &b,int perm){
|
||||
default: assert(0); break;
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#include <simd/Grid_vInteger.h>
|
||||
#include <simd/Grid_vRealF.h>
|
||||
#include <simd/Grid_vRealD.h>
|
||||
#include <simd/Grid_vComplexF.h>
|
||||
#include <simd/Grid_vComplexD.h>
|
||||
|
||||
#include <simd/Grid_vector_types.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
// NB: Template the following on "type Complex" and then implement *,+,- for
|
||||
// ComplexF, ComplexD, RealF, RealD above to
|
||||
// get full generality of binops with scalars.
|
||||
inline void mac (vComplexF *__restrict__ y,const ComplexF *__restrict__ a,const vComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vComplexF *__restrict__ y,const ComplexF *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
inline void mac (vComplexF *__restrict__ y,const vComplexF *__restrict__ a,const ComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const ComplexF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
inline void mac (vComplexD *__restrict__ y,const ComplexD *__restrict__ a,const vComplexD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vComplexD *__restrict__ y,const ComplexD *__restrict__ l,const vComplexD *__restrict__ r){ *y = (*l) + (*r); }
|
||||
inline void mac (vComplexD *__restrict__ y,const vComplexD *__restrict__ a,const ComplexD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vComplexD *__restrict__ y,const vComplexD *__restrict__ l,const ComplexD *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
inline void mac (vRealF *__restrict__ y,const RealF *__restrict__ a,const vRealF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vRealF *__restrict__ y,const RealF *__restrict__ l,const vRealF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
inline void mac (vRealF *__restrict__ y,const vRealF *__restrict__ a,const RealF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vRealF *__restrict__ y,const vRealF *__restrict__ l,const RealF *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
inline void mac (vRealD *__restrict__ y,const RealD *__restrict__ a,const vRealD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vRealD *__restrict__ y,const RealD *__restrict__ l,const vRealD *__restrict__ r){ *y = (*l) + (*r); }
|
||||
inline void mac (vRealD *__restrict__ y,const vRealD *__restrict__ a,const RealD *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
inline void mult(vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) * (*r); }
|
||||
inline void sub (vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) - (*r); }
|
||||
inline void add (vRealD *__restrict__ y,const vRealD *__restrict__ l,const RealD *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
// Default precision
|
||||
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
|
||||
typedef vRealD vReal;
|
||||
|
@ -93,12 +93,10 @@ nobase_include_HEADERS = algorithms/approx/bigfloat.h \
|
||||
qcd/Grid_qcd_2spinor.h \
|
||||
qcd/Grid_qcd_dirac.h \
|
||||
qcd/Grid_qcd_wilson_dop.h \
|
||||
simd/Grid_vComplexD.h \
|
||||
simd/Grid_vComplexF.h \
|
||||
simd/Grid_vInteger.h \
|
||||
simd/Grid_vRealD.h \
|
||||
simd/Grid_vRealF.h \
|
||||
simd/Grid_vector_types.h \
|
||||
simd/Grid_sse4.h
|
||||
simd/Grid_sse4.h \
|
||||
simd/Grid_avx.h \
|
||||
simd/Grid_knc.h
|
||||
|
||||
|
||||
|
||||
|
@ -154,26 +154,35 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,Lattice<vobj> &rhs, int
|
||||
cbmask=0x3;
|
||||
}
|
||||
|
||||
|
||||
int ro = rplane*rhs._grid->_ostride[dimension]; // base offset for start of plane
|
||||
int lo = lplane*lhs._grid->_ostride[dimension]; // base offset for start of plane
|
||||
|
||||
PARALLEL_NESTED_LOOP2
|
||||
for(int n=0;n<rhs._grid->_slice_nblock[dimension];n++){
|
||||
for(int b=0;b<rhs._grid->_slice_block[dimension];b++){
|
||||
|
||||
/*
|
||||
int o =n*rhs._grid->_slice_stride[dimension];
|
||||
int ocb=1<<lhs._grid->CheckerBoardFromOindex(o+b);
|
||||
if ( ocb&cbmask ) {
|
||||
lhs._odata[lo+o+b]=rhs._odata[ro+o+b];
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
int o =n*rhs._grid->_slice_stride[dimension]+b;
|
||||
int ocb=1<<lhs._grid->CheckerBoardFromOindex(o);
|
||||
if ( ocb&cbmask ) {
|
||||
//lhs._odata[lo+o]=rhs._odata[ro+o];
|
||||
vstream(lhs._odata[lo+o],rhs._odata[ro+o]);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,Lattice<vobj> &rhs, int dimension,int lplane,int rplane,int cbmask,int permute_type)
|
||||
{
|
||||
|
||||
int rd = rhs._grid->_rdimensions[dimension];
|
||||
|
||||
if ( !rhs._grid->CheckerBoarded(dimension) ) {
|
||||
|
@ -212,6 +212,16 @@ public:
|
||||
|
||||
iMatrix(const Zero &z){ *this = zero; };
|
||||
iMatrix() =default;
|
||||
|
||||
iMatrix& operator=(const iMatrix& rhs){
|
||||
for(int i=0;i<N;i++)
|
||||
for(int j=0;j<N;j++)
|
||||
vstream(_internal[i][j],rhs._internal[i][j]);
|
||||
return *this;
|
||||
};
|
||||
|
||||
|
||||
|
||||
iMatrix(scalar_type s) { (*this) = s ;};// recurse down and hit the constructor for vector_type
|
||||
|
||||
/*
|
||||
@ -220,6 +230,9 @@ public:
|
||||
iMatrix<vtype,N> & operator= (const iMatrix<vtype,N> ©me) = default;
|
||||
iMatrix<vtype,N> & operator= (iMatrix<vtype,N> &©me) = default;
|
||||
*/
|
||||
|
||||
|
||||
|
||||
iMatrix<vtype,N> & operator= (const Zero &hero){
|
||||
zeroit(*this);
|
||||
return *this;
|
||||
|
399
lib/simd/Grid_avx.h
Normal file
399
lib/simd/Grid_avx.h
Normal file
@ -0,0 +1,399 @@
|
||||
//----------------------------------------------------------------------
|
||||
/*! @file Grid_avx.h
|
||||
@brief Optimization libraries for AVX1/2 instructions set
|
||||
|
||||
Using intrinsics
|
||||
*/
|
||||
// Time-stamp: <2015-05-22 18:58:27 neo>
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
#include <immintrin.h>
|
||||
// _mm256_set_m128i(hi,lo); // not defined in all versions of immintrin.h
|
||||
#ifndef _mm256_set_m128i
|
||||
#define _mm256_set_m128i(hi,lo) _mm256_insertf128_si256(_mm256_castsi128_si256(lo),(hi),1)
|
||||
#endif
|
||||
|
||||
namespace Optimization {
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
inline __m256 operator()(float a, float b){
|
||||
return _mm256_set_ps(b,a,b,a,b,a,b,a);
|
||||
}
|
||||
// Real float
|
||||
inline __m256 operator()(float a){
|
||||
return _mm256_set_ps(a,a,a,a,a,a,a,a);
|
||||
}
|
||||
//Complex double
|
||||
inline __m256d operator()(double a, double b){
|
||||
return _mm256_set_pd(b,a,b,a);
|
||||
}
|
||||
//Real double
|
||||
inline __m256d operator()(double a){
|
||||
return _mm256_set_pd(a,a,a,a);
|
||||
}
|
||||
//Integer
|
||||
inline __m256i operator()(Integer a){
|
||||
return _mm256_set1_epi32(a);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
//Float
|
||||
inline void operator()(__m256 a, float* F){
|
||||
_mm256_store_ps(F,a);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(__m256d a, double* D){
|
||||
_mm256_store_pd(D,a);
|
||||
}
|
||||
//Integer
|
||||
inline void operator()(__m256i a, Integer* I){
|
||||
_mm256_store_si256((__m256i*)I,a);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
inline void operator()(float * a, __m256 b){
|
||||
_mm256_stream_ps(a,b);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(double * a, __m256d b){
|
||||
_mm256_stream_pd(a,b);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct Vset{
|
||||
// Complex float
|
||||
inline __m256 operator()(Grid::ComplexF *a){
|
||||
return _mm256_set_ps(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
||||
}
|
||||
// Complex double
|
||||
inline __m256d operator()(Grid::ComplexD *a){
|
||||
return _mm256_set_pd(a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
||||
}
|
||||
// Real float
|
||||
inline __m256 operator()(float *a){
|
||||
return _mm256_set_ps(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
// Real double
|
||||
inline __m256d operator()(double *a){
|
||||
return _mm256_set_pd(a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
// Integer
|
||||
inline __m256i operator()(Integer *a){
|
||||
return _mm256_set_epi32(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
exit(1);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
struct Sum{
|
||||
//Complex/Real float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
return _mm256_add_ps(a,b);
|
||||
}
|
||||
//Complex/Real double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
return _mm256_add_pd(a,b);
|
||||
}
|
||||
//Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
b0 = _mm256_extractf128_si256(b,0);
|
||||
a1 = _mm256_extractf128_si256(a,1);
|
||||
b1 = _mm256_extractf128_si256(b,1);
|
||||
a0 = _mm_add_epi32(a0,b0);
|
||||
a1 = _mm_add_epi32(a1,b1);
|
||||
return _mm256_set_m128i(a1,a0);
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
return _mm256_add_epi32(a,b);
|
||||
#endif
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
//Complex/Real float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
return _mm256_sub_ps(a,b);
|
||||
}
|
||||
//Complex/Real double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
return _mm256_sub_pd(a,b);
|
||||
}
|
||||
//Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
b0 = _mm256_extractf128_si256(b,0);
|
||||
a1 = _mm256_extractf128_si256(a,1);
|
||||
b1 = _mm256_extractf128_si256(b,1);
|
||||
a0 = _mm_sub_epi32(a0,b0);
|
||||
a1 = _mm_sub_epi32(a1,b1);
|
||||
return _mm256_set_m128i(a1,a0);
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
return _mm256_sub_epi32(a,b);
|
||||
#endif
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
__m256 ymm0,ymm1,ymm2;
|
||||
ymm0 = _mm256_shuffle_ps(a,a,_MM_SHUFFLE(2,2,0,0)); // ymm0 <- ar ar,
|
||||
ymm0 = _mm256_mul_ps(ymm0,b); // ymm0 <- ar bi, ar br
|
||||
// FIXME AVX2 could MAC
|
||||
ymm1 = _mm256_shuffle_ps(b,b,_MM_SHUFFLE(2,3,0,1)); // ymm1 <- br,bi
|
||||
ymm2 = _mm256_shuffle_ps(a,a,_MM_SHUFFLE(3,3,1,1)); // ymm2 <- ai,ai
|
||||
ymm1 = _mm256_mul_ps(ymm1,ymm2); // ymm1 <- br ai, ai bi
|
||||
return _mm256_addsub_ps(ymm0,ymm1);
|
||||
}
|
||||
// Complex double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
//Multiplication of (ak+ibk)*(ck+idk)
|
||||
// a + i b can be stored as a data structure
|
||||
//From intel optimisation reference guide
|
||||
/*
|
||||
movsldup xmm0, Src1; load real parts into the destination,
|
||||
; a1, a1, a0, a0
|
||||
movaps xmm1, src2; load the 2nd pair of complex values, ; i.e. d1, c1, d0, c0
|
||||
mulps xmm0, xmm1; temporary results, a1d1, a1c1, a0d0, ; a0c0
|
||||
shufps xmm1, xmm1, b1; reorder the real and imaginary ; parts, c1, d1, c0, d0
|
||||
movshdup xmm2, Src1; load the imaginary parts into the ; destination, b1, b1, b0, b0
|
||||
mulps xmm2, xmm1; temporary results, b1c1, b1d1, b0c0, ; b0d0
|
||||
addsubps xmm0, xmm2; b1c1+a1d1, a1c1 -b1d1, b0c0+a0d
|
||||
VSHUFPD (VEX.256 encoded version)
|
||||
IF IMM0[0] = 0
|
||||
THEN DEST[63:0]=SRC1[63:0] ELSE DEST[63:0]=SRC1[127:64] FI;
|
||||
IF IMM0[1] = 0
|
||||
THEN DEST[127:64]=SRC2[63:0] ELSE DEST[127:64]=SRC2[127:64] FI;
|
||||
IF IMM0[2] = 0
|
||||
THEN DEST[191:128]=SRC1[191:128] ELSE DEST[191:128]=SRC1[255:192] FI;
|
||||
IF IMM0[3] = 0
|
||||
THEN DEST[255:192]=SRC2[191:128] ELSE DEST[255:192]=SRC2[255:192] FI; // Ox5 r<->i ; 0xC unchanged
|
||||
*/
|
||||
|
||||
__m256d ymm0,ymm1,ymm2;
|
||||
ymm0 = _mm256_shuffle_pd(a,a,0x0); // ymm0 <- ar ar, ar,ar b'00,00
|
||||
ymm0 = _mm256_mul_pd(ymm0,b); // ymm0 <- ar bi, ar br
|
||||
ymm1 = _mm256_shuffle_pd(b,b,0x5); // ymm1 <- br,bi b'01,01
|
||||
ymm2 = _mm256_shuffle_pd(a,a,0xF); // ymm2 <- ai,ai b'11,11
|
||||
ymm1 = _mm256_mul_pd(ymm1,ymm2); // ymm1 <- br ai, ai bi
|
||||
return _mm256_addsub_pd(ymm0,ymm1);
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real float
|
||||
inline __m256 operator()(__m256 a, __m256 b){
|
||||
return _mm256_mul_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m256d operator()(__m256d a, __m256d b){
|
||||
return _mm256_mul_pd(a,b);
|
||||
}
|
||||
// Integer
|
||||
inline __m256i operator()(__m256i a, __m256i b){
|
||||
#if defined (AVX1)
|
||||
__m128i a0,a1;
|
||||
__m128i b0,b1;
|
||||
a0 = _mm256_extractf128_si256(a,0);
|
||||
b0 = _mm256_extractf128_si256(b,0);
|
||||
a1 = _mm256_extractf128_si256(a,1);
|
||||
b1 = _mm256_extractf128_si256(b,1);
|
||||
a0 = _mm_mul_epi32(a0,b0);
|
||||
a1 = _mm_mul_epi32(a1,b1);
|
||||
return _mm256_set_m128i(a1,a0);
|
||||
#endif
|
||||
#if defined (AVX2)
|
||||
return _mm256_mul_epi32(a,b);
|
||||
#endif
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m256 operator()(__m256 in){
|
||||
return _mm256_xor_ps(_mm256_addsub_ps(_mm256_setzero_ps(),in), _mm256_set1_ps(-0.f));
|
||||
}
|
||||
// Complex double
|
||||
inline __m256d operator()(__m256d in){
|
||||
return _mm256_xor_pd(_mm256_addsub_pd(_mm256_setzero_pd(),in), _mm256_set1_pd(-0.f));//untested
|
||||
/*
|
||||
// original
|
||||
// addsubps 0, inv=>0+in.v[3] 0-in.v[2], 0+in.v[1], 0-in.v[0], ...
|
||||
__m256d tmp = _mm256_addsub_pd(_mm256_setzero_pd(),_mm256_shuffle_pd(in,in,0x5));
|
||||
return _mm256_shuffle_pd(tmp,tmp,0x5);
|
||||
*/
|
||||
}
|
||||
// do not define for integer input
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
//Complex single
|
||||
inline __m256 operator()(__m256 in, __m256 ret){
|
||||
__m256 tmp =_mm256_addsub_ps(_mm256_setzero_ps(),in); // r,-i
|
||||
return _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(2,3,0,1)); //-i,r
|
||||
}
|
||||
//Complex double
|
||||
inline __m256d operator()(__m256d in, __m256d ret){
|
||||
__m256d tmp = _mm256_addsub_pd(_mm256_setzero_pd(),in); // r,-i
|
||||
return _mm256_shuffle_pd(tmp,tmp,0x5);
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
//Complex single
|
||||
inline __m256 operator()(__m256 in, __m256 ret){
|
||||
__m256 tmp =_mm256_shuffle_ps(in,in,_MM_SHUFFLE(2,3,0,1)); // i,r
|
||||
return _mm256_addsub_ps(_mm256_setzero_ps(),tmp); // i,-r
|
||||
}
|
||||
//Complex double
|
||||
inline __m256d operator()(__m256d in, __m256d ret){
|
||||
__m256d tmp = _mm256_shuffle_pd(in,in,0x5);
|
||||
return _mm256_addsub_pd(_mm256_setzero_pd(),tmp); // i,-r
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
template < typename vtype >
|
||||
void permute(vtype a, vtype b, int perm) {
|
||||
union {
|
||||
__m256 f;
|
||||
vtype v;
|
||||
} conv;
|
||||
conv.v = b;
|
||||
switch (perm){
|
||||
// 8x32 bits=>3 permutes
|
||||
case 2: conv.f = _mm256_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(2,3,0,1)); break;
|
||||
case 1: conv.f = _mm256_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(1,0,3,2)); break;
|
||||
case 0: conv.f = _mm256_permute2f128_ps(conv.f,conv.f,0x01); break;
|
||||
default: assert(0); break;
|
||||
}
|
||||
a = conv.v;
|
||||
|
||||
}
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m256>::operator()(__m256 in){
|
||||
__m256 v1,v2;
|
||||
Optimization::permute(v1,in,0); // sse 128; paired complex single
|
||||
v1 = _mm256_add_ps(v1,in);
|
||||
Optimization::permute(v2,v1,1); // avx 256; quad complex single
|
||||
v1 = _mm256_add_ps(v1,v2);
|
||||
return Grid::ComplexF(v1[0],v1[1]);
|
||||
}
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, __m256>::operator()(__m256 in){
|
||||
__m256 v1,v2;
|
||||
Optimization::permute(v1,in,0); // avx 256; octo-double
|
||||
v1 = _mm256_add_ps(v1,in);
|
||||
Optimization::permute(v2,v1,1);
|
||||
v1 = _mm256_add_ps(v1,v2);
|
||||
Optimization::permute(v2,v1,2);
|
||||
v1 = _mm256_add_ps(v1,v2);
|
||||
return v1[0];
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, __m256d>::operator()(__m256d in){
|
||||
__m256d v1;
|
||||
Optimization::permute(v1,in,0); // sse 128; paired complex single
|
||||
v1 = _mm256_add_pd(v1,in);
|
||||
return Grid::ComplexD(v1[0],v1[1]);
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, __m256d>::operator()(__m256d in){
|
||||
__m256d v1,v2;
|
||||
Optimization::permute(v1,in,0); // avx 256; quad double
|
||||
v1 = _mm256_add_pd(v1,in);
|
||||
Optimization::permute(v2,v1,1);
|
||||
v1 = _mm256_add_pd(v1,v2);
|
||||
return v1[0];
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m256i>::operator()(__m256i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
namespace Grid {
|
||||
typedef __m256 SIMD_Ftype; // Single precision type
|
||||
typedef __m256d SIMD_Dtype; // Double precision type
|
||||
typedef __m256i SIMD_Itype; // Integer type
|
||||
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
typedef Optimization::Vset VsetSIMD;
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
}
|
323
lib/simd/Grid_knc.h
Normal file
323
lib/simd/Grid_knc.h
Normal file
@ -0,0 +1,323 @@
|
||||
//----------------------------------------------------------------------
|
||||
/*! @file Grid_knc.h
|
||||
@brief Optimization libraries for AVX512 instructions set for KNC
|
||||
|
||||
Using intrinsics
|
||||
*/
|
||||
// Time-stamp: <2015-05-22 17:12:44 neo>
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
#include <immintrin.h>
|
||||
#ifndef KNC_ONLY_STORES
|
||||
#define _mm512_storenrngo_ps _mm512_store_ps // not present in AVX512
|
||||
#define _mm512_storenrngo_pd _mm512_store_pd // not present in AVX512
|
||||
#endif
|
||||
|
||||
|
||||
namespace Optimization {
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
inline __m512 operator()(float a, float b){
|
||||
return _mm512_set_ps(b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a);
|
||||
}
|
||||
// Real float
|
||||
inline __m512 operator()(float a){
|
||||
return _mm512_set1_ps(a);
|
||||
}
|
||||
//Complex double
|
||||
inline __m512d operator()(double a, double b){
|
||||
return _mm512_set_pd(b,a,b,a,b,a,b,a);
|
||||
}
|
||||
//Real double
|
||||
inline __m512d operator()(double a){
|
||||
return _mm512_set1_pd(a);
|
||||
}
|
||||
//Integer
|
||||
inline __m512i operator()(Integer a){
|
||||
return _mm512_set1_epi32(a);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
//Float
|
||||
inline void operator()(__m512 a, float* F){
|
||||
_mm512_store_ps(F,a);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(__m512d a, double* D){
|
||||
_mm512_store_pd(D,a);
|
||||
}
|
||||
//Integer
|
||||
inline void operator()(__m512i a, Integer* I){
|
||||
_mm512_store_si512((__m512i *)I,a);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
inline void operator()(float * a, __m512 b){
|
||||
_mm512_storenrngo_ps(a,b);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(double * a, __m512d b){
|
||||
_mm512_storenrngo_pd(a,b);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct Vset{
|
||||
// Complex float
|
||||
inline __m512 operator()(Grid::ComplexF *a){
|
||||
return _mm512_set_ps(a[7].imag(),a[7].real(),a[6].imag(),a[6].real(),
|
||||
a[5].imag(),a[5].real(),a[4].imag(),a[4].real(),
|
||||
a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),
|
||||
a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
||||
}
|
||||
// Complex double
|
||||
inline __m512d operator()(Grid::ComplexD *a){
|
||||
return _mm512_set_pd(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),
|
||||
a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
|
||||
}
|
||||
// Real float
|
||||
inline __m512 operator()(float *a){
|
||||
return _mm512_set_ps( a[15],a[14],a[13],a[12],a[11],a[10],a[9],a[8],
|
||||
a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(double *a){
|
||||
return _mm512_set_pd(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
// Integer
|
||||
inline __m512i operator()(Integer *a){
|
||||
return _mm512_set_epi32( a[15],a[14],a[13],a[12],a[11],a[10],a[9],a[8],
|
||||
a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
exit(1);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
struct Sum{
|
||||
//Complex/Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_add_ps(a,b);
|
||||
}
|
||||
//Complex/Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_add_pd(a,b);
|
||||
}
|
||||
//Integer
|
||||
inline __m512i operator()(__m512i a, __m512i b){
|
||||
return _mm512_add_epi32(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
//Complex/Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_sub_ps(a,b);
|
||||
}
|
||||
//Complex/Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_sub_pd(a,b);
|
||||
}
|
||||
//Integer
|
||||
inline __m512i operator()(__m512i a, __m512i b){
|
||||
return _mm512_sub_epi32(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
__m512 vzero,ymm0,ymm1,real, imag;
|
||||
vzero = _mm512_setzero_ps();
|
||||
ymm0 = _mm512_swizzle_ps(a, _MM_SWIZ_REG_CDAB); //
|
||||
real = (__m512)_mm512_mask_or_epi32((__m512i)a, 0xAAAA,(__m512i)vzero,(__m512i)ymm0);
|
||||
imag = _mm512_mask_sub_ps(a, 0x5555,vzero, ymm0);
|
||||
ymm1 = _mm512_mul_ps(real, b);
|
||||
ymm0 = _mm512_swizzle_ps(b, _MM_SWIZ_REG_CDAB); // OK
|
||||
return _mm512_fmadd_ps(ymm0,imag,ymm1);
|
||||
}
|
||||
// Complex double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
/* This is from
|
||||
* Automatic SIMD Vectorization of Fast Fourier Transforms for the Larrabee and AVX Instruction Sets
|
||||
* @inproceedings{McFarlin:2011:ASV:1995896.1995938,
|
||||
* author = {McFarlin, Daniel S. and Arbatov, Volodymyr and Franchetti, Franz and P\"{u}schel, Markus},
|
||||
* title = {Automatic SIMD Vectorization of Fast Fourier Transforms for the Larrabee and AVX Instruction Sets},
|
||||
* booktitle = {Proceedings of the International Conference on Supercomputing},
|
||||
* series = {ICS '11},
|
||||
* year = {2011},
|
||||
* isbn = {978-1-4503-0102-2},
|
||||
* location = {Tucson, Arizona, USA},
|
||||
* pages = {265--274},
|
||||
* numpages = {10},
|
||||
* url = {http://doi.acm.org/10.1145/1995896.1995938},
|
||||
* doi = {10.1145/1995896.1995938},
|
||||
* acmid = {1995938},
|
||||
* publisher = {ACM},
|
||||
* address = {New York, NY, USA},
|
||||
* keywords = {autovectorization, fourier transform, program generation, simd, super-optimization},
|
||||
* }
|
||||
*/
|
||||
__m512d vzero,ymm0,ymm1,real,imag;
|
||||
vzero =_mm512_setzero_pd();
|
||||
ymm0 = _mm512_swizzle_pd(a, _MM_SWIZ_REG_CDAB); //
|
||||
real =(__m512d)_mm512_mask_or_epi64((__m512i)a, 0xAA,(__m512i)vzero,(__m512i) ymm0);
|
||||
imag = _mm512_mask_sub_pd(a, 0x55,vzero, ymm0);
|
||||
ymm1 = _mm512_mul_pd(real, b);
|
||||
ymm0 = _mm512_swizzle_pd(b, _MM_SWIZ_REG_CDAB); // OK
|
||||
return _mm512_fmadd_pd(ymm0,imag,ymm1);
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real float
|
||||
inline __m512 operator()(__m512 a, __m512 b){
|
||||
return _mm512_mul_ps(a,b);
|
||||
}
|
||||
// Real double
|
||||
inline __m512d operator()(__m512d a, __m512d b){
|
||||
return _mm512_mul_pd(a,b);
|
||||
}
|
||||
// Integer
|
||||
inline __m512i operator()(__m512i a, __m512i b){
|
||||
return _mm512_mullo_epi32(a,b);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline __m512 operator()(__m512 in){
|
||||
return _mm512_mask_sub_ps(in,0xaaaa,_mm512_setzero_ps(),in); // Zero out 0+real 0-imag
|
||||
}
|
||||
// Complex double
|
||||
inline __m512d operator()(__m512d in){
|
||||
return _mm512_mask_sub_pd(in, 0xaa,_mm512_setzero_pd(), in);
|
||||
}
|
||||
// do not define for integer input
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
//Complex single
|
||||
inline __m512 operator()(__m512 in, __m512 ret){
|
||||
__m512 tmp = _mm512_mask_sub_ps(in,0xaaaa,_mm512_setzero_ps(),in); // real -imag
|
||||
return _mm512_swizzle_ps(tmp, _MM_SWIZ_REG_CDAB);// OK
|
||||
}
|
||||
//Complex double
|
||||
inline __m512d operator()(__m512d in, __m512d ret){
|
||||
__m512d tmp = _mm512_mask_sub_pd(in,0xaa,_mm512_setzero_pd(),in); // real -imag
|
||||
return _mm512_swizzle_pd(tmp, _MM_SWIZ_REG_CDAB);// OK
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
//Complex single
|
||||
inline __m512 operator()(__m512 in, __m512 ret){
|
||||
__m512 tmp = _mm512_swizzle_ps(in, _MM_SWIZ_REG_CDAB);// OK
|
||||
return _mm512_mask_sub_ps(tmp,0xaaaa,_mm512_setzero_ps(),tmp); // real -imag
|
||||
}
|
||||
//Complex double
|
||||
inline __m512d operator()(__m512d in, __m512d ret){
|
||||
__m512d tmp = _mm512_swizzle_pd(in, _MM_SWIZ_REG_CDAB);// OK
|
||||
return _mm512_mask_sub_pd(tmp,0xaa,_mm512_setzero_pd(),tmp); // real -imag
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, __m512>::operator()(__m512 in){
|
||||
return Grid::ComplexF(_mm512_mask_reduce_add_ps(0x5555, in),_mm512_mask_reduce_add_ps(0xAAAA, in));
|
||||
}
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, __m512>::operator()(__m512 in){
|
||||
return _mm512_reduce_add_ps(in);
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, __m512d>::operator()(__m512d in){
|
||||
return Grid::ComplexD(_mm512_mask_reduce_add_pd(0x55, in),_mm512_mask_reduce_add_pd(0xAA, in));
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, __m512d>::operator()(__m512d in){
|
||||
return _mm512_reduce_add_pd(in);
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
namespace Grid {
|
||||
typedef __m512 SIMD_Ftype; // Single precision type
|
||||
typedef __m512d SIMD_Dtype; // Double precision type
|
||||
typedef __m512i SIMD_Itype; // Integer type
|
||||
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
typedef Optimization::Vset VsetSIMD;
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
}
|
272
lib/simd/Grid_qpx.h
Normal file
272
lib/simd/Grid_qpx.h
Normal file
@ -0,0 +1,272 @@
|
||||
//----------------------------------------------------------------------
|
||||
/*! @file Grid_qpx.h
|
||||
@brief Optimization libraries for QPX instructions set for BG/Q
|
||||
|
||||
Using intrinsics
|
||||
*/
|
||||
// Time-stamp: <2015-05-22 17:29:26 neo>
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
// lot of undefined functions
|
||||
|
||||
namespace Optimization {
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
inline float operator()(float a, float b){
|
||||
return {a,b,a,b};
|
||||
}
|
||||
// Real float
|
||||
inline float operator()(float a){
|
||||
return {a,a,a,a};
|
||||
}
|
||||
//Complex double
|
||||
inline vector4double operator()(double a, double b){
|
||||
return {a,b,a,b};
|
||||
}
|
||||
//Real double
|
||||
inline vector4double operator()(double a){
|
||||
return {a,a,a,a};
|
||||
}
|
||||
//Integer
|
||||
inline int operator()(Integer a){
|
||||
#error
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
//Float
|
||||
inline void operator()(float a, float* F){
|
||||
assert(0);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(vector4double a, double* D){
|
||||
assert(0);
|
||||
}
|
||||
//Integer
|
||||
inline void operator()(int a, Integer* I){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
inline void operator()(float * a, float b){
|
||||
assert(0);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(double * a, vector4double b){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct Vset{
|
||||
// Complex float
|
||||
inline float operator()(Grid::ComplexF *a){
|
||||
return {a[0].real(),a[0].imag(),a[1].real(),a[1].imag(),a[2].real(),a[2].imag(),a[3].real(),a[3].imag()};
|
||||
}
|
||||
// Complex double
|
||||
inline vector4double operator()(Grid::ComplexD *a){
|
||||
return {a[0].real(),a[0].imag(),a[1].real(),a[1].imag(),a[2].real(),a[2].imag(),a[3].real(),a[3].imag()};
|
||||
}
|
||||
// Real float
|
||||
inline float operator()(float *a){
|
||||
return {a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7]};
|
||||
}
|
||||
// Real double
|
||||
inline vector4double operator()(double *a){
|
||||
return {a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7]};
|
||||
}
|
||||
// Integer
|
||||
inline int operator()(Integer *a){
|
||||
#error
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
exit(1);
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Arithmetic operations
|
||||
/////////////////////////////////////////////////////
|
||||
struct Sum{
|
||||
//Complex/Real float
|
||||
inline float operator()(float a, float b){
|
||||
#error
|
||||
}
|
||||
//Complex/Real double
|
||||
inline vector4double operator()(vector4double a, vector4double b){
|
||||
return vec_add(a,b);
|
||||
}
|
||||
//Integer
|
||||
inline int operator()(int a, int b){
|
||||
#error
|
||||
}
|
||||
};
|
||||
|
||||
struct Sub{
|
||||
//Complex/Real float
|
||||
inline float operator()(float a, float b){
|
||||
#error
|
||||
}
|
||||
//Complex/Real double
|
||||
inline vector4double operator()(vector4double a, vector4double b){
|
||||
#error
|
||||
}
|
||||
//Integer
|
||||
inline floati operator()(int a, int b){
|
||||
#error
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline float operator()(float a, float b){
|
||||
#error
|
||||
}
|
||||
// Complex double
|
||||
inline vector4double operator()(vector4double a, vector4double b){
|
||||
#error
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real float
|
||||
inline float operator()(float a, float b){
|
||||
#error
|
||||
}
|
||||
// Real double
|
||||
inline vector4double operator()(vector4double a, vector4double b){
|
||||
#error
|
||||
}
|
||||
// Integer
|
||||
inline int operator()(int a, int b){
|
||||
#error
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline float operator()(float in){
|
||||
assert(0);
|
||||
}
|
||||
// Complex double
|
||||
inline vector4double operator()(vector4double in){
|
||||
assert(0);
|
||||
}
|
||||
// do not define for integer input
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
//Complex single
|
||||
inline float operator()(float in, float ret){
|
||||
assert(0);
|
||||
}
|
||||
//Complex double
|
||||
inline vector4double operator()(vector4double in, vector4double ret){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
//Complex single
|
||||
inline float operator()(float in, float ret){
|
||||
|
||||
}
|
||||
//Complex double
|
||||
inline vector4double operator()(vector4double in, vector4double ret){
|
||||
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, float>::operator()(float in){
|
||||
assert(0);
|
||||
}
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, float>::operator()(float in){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, vector4double>::operator()(vector4double in){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, vector4double>::operator()(vector4double in){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, floati>::operator()(float in){
|
||||
assert(0);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
namespace Grid {
|
||||
typedef float SIMD_Ftype __attribute__ ((vector_size (16))); // Single precision type
|
||||
typedef vector4double SIMD_Dtype; // Double precision type
|
||||
typedef int SIMD_Itype; // Integer type
|
||||
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
typedef Optimization::Vset VsetSIMD;
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
}
|
@ -4,7 +4,7 @@
|
||||
|
||||
Using intrinsics
|
||||
*/
|
||||
// Time-stamp: <2015-05-20 16:45:39 neo>
|
||||
// Time-stamp: <2015-05-21 18:06:30 neo>
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
#include <pmmintrin.h>
|
||||
@ -53,12 +53,12 @@ namespace Optimization {
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
inline void operator()(__m128 a, __m128 b){
|
||||
_mm_stream_ps((float *)&a,b);
|
||||
inline void operator()(float * a, __m128 b){
|
||||
_mm_stream_ps(a,b);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(__m128d a, __m128d b){
|
||||
_mm_stream_pd((double *)&a,b);
|
||||
inline void operator()(double * a, __m128d b){
|
||||
_mm_stream_pd(a,b);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2,13 +2,23 @@
|
||||
/*! @file Grid_vector_types.h
|
||||
@brief Defines templated class Grid_simd to deal with inner vector types
|
||||
*/
|
||||
// Time-stamp: <2015-05-20 17:31:55 neo>
|
||||
// Time-stamp: <2015-05-26 13:44:54 neo>
|
||||
//---------------------------------------------------------------------------
|
||||
#ifndef GRID_VECTOR_TYPES
|
||||
#define GRID_VECTOR_TYPES
|
||||
|
||||
#ifdef SSE4
|
||||
#include "Grid_sse4.h"
|
||||
|
||||
#endif
|
||||
#if defined (AVX1)|| defined (AVX2)
|
||||
#include "Grid_avx.h"
|
||||
#endif
|
||||
#if defined AVX512
|
||||
#include "Grid_knc.h"
|
||||
#endif
|
||||
#if defined QPX
|
||||
#include "Grid_qpx.h"
|
||||
#endif
|
||||
|
||||
namespace Grid {
|
||||
|
||||
@ -25,8 +35,6 @@ namespace Grid {
|
||||
template <typename Condition, typename ReturnType> using EnableIf = Invoke<std::enable_if<Condition::value, ReturnType>>;
|
||||
template <typename Condition, typename ReturnType> using NotEnableIf= Invoke<std::enable_if<!Condition::value, ReturnType>>;
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Check for complexity with type traits
|
||||
template <typename T> struct is_complex : std::false_type {};
|
||||
@ -36,18 +44,71 @@ namespace Grid {
|
||||
// general forms to allow for vsplat syntax
|
||||
// need explicit declaration of types when used since
|
||||
// clang cannot automatically determine the output type sometimes
|
||||
// use decltype?
|
||||
template < class Out, class Input1, class Input2, class Operation >
|
||||
Out binary(Input1 src_1, Input2 src_2, Operation op){
|
||||
return op(src_1, src_2);
|
||||
}
|
||||
|
||||
template < class SIMDout, class Input, class Operation >
|
||||
SIMDout unary(Input src, Operation op){
|
||||
template < class Out, class Input, class Operation >
|
||||
Out unary(Input src, Operation op){
|
||||
return op(src);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// Permute
|
||||
// Permute 0 every ABCDEFGH -> BA DC FE HG
|
||||
// Permute 1 every ABCDEFGH -> CD AB GH EF
|
||||
// Permute 2 every ABCDEFGH -> EFGH ABCD
|
||||
// Permute 3 possible on longer iVector lengths (512bit = 8 double = 16 single)
|
||||
// Permute 4 possible on half precision @512bit vectors.
|
||||
//////////////////////////////////////////////////////////
|
||||
template<class vsimd>
|
||||
inline void Gpermute(vsimd &y,const vsimd &b,int perm){
|
||||
union {
|
||||
SIMD_Ftype f;
|
||||
decltype(vsimd::v) v;
|
||||
} conv;
|
||||
conv.v = b.v;
|
||||
switch (perm){
|
||||
#if defined(AVX1)||defined(AVX2)
|
||||
// 8x32 bits=>3 permutes
|
||||
case 2:
|
||||
conv.f = _mm256_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(2,3,0,1));
|
||||
break;
|
||||
case 1: conv.f = _mm256_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(1,0,3,2)); break;
|
||||
case 0: conv.f = _mm256_permute2f128_ps(conv.f,conv.f,0x01); break;
|
||||
#endif
|
||||
#ifdef SSE4
|
||||
case 1: conv.f = _mm_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(2,3,0,1)); break;
|
||||
case 0: conv.f = _mm_shuffle_ps(conv.f,conv.f,_MM_SHUFFLE(1,0,3,2));break;
|
||||
#endif
|
||||
#ifdef AVX512
|
||||
// 16 floats=> permutes
|
||||
// Permute 0 every abcd efgh ijkl mnop -> badc fehg jilk nmpo
|
||||
// Permute 1 every abcd efgh ijkl mnop -> cdab ghef jkij opmn
|
||||
// Permute 2 every abcd efgh ijkl mnop -> efgh abcd mnop ijkl
|
||||
// Permute 3 every abcd efgh ijkl mnop -> ijkl mnop abcd efgh
|
||||
case 3: conv.f = _mm512_swizzle_ps(conv.f,_MM_SWIZ_REG_CDAB); break;
|
||||
case 2: conv.f = _mm512_swizzle_ps(conv.f,_MM_SWIZ_REG_BADC); break;
|
||||
case 1: conv.f = _mm512_permute4f128_ps(conv.f,(_MM_PERM_ENUM)_MM_SHUFFLE(2,3,0,1)); break;
|
||||
case 0: conv.f = _mm512_permute4f128_ps(conv.f,(_MM_PERM_ENUM)_MM_SHUFFLE(1,0,3,2)); break;
|
||||
#endif
|
||||
#ifdef QPX
|
||||
#error not implemented
|
||||
#endif
|
||||
default: assert(0); break;
|
||||
}
|
||||
y.v=conv.v;
|
||||
|
||||
};
|
||||
|
||||
///////////////////////////////////////
|
||||
|
||||
|
||||
|
||||
/*
|
||||
@brief Grid_simd class for the SIMD vector type operations
|
||||
*/
|
||||
@ -56,27 +117,34 @@ namespace Grid {
|
||||
|
||||
public:
|
||||
typedef typename RealPart < Scalar_type >::type Real;
|
||||
typedef Vector_type vector_type;
|
||||
typedef Scalar_type scalar_type;
|
||||
|
||||
Vector_type v;
|
||||
|
||||
|
||||
|
||||
static inline int Nsimd(void) { return sizeof(Vector_type)/sizeof(Scalar_type);}
|
||||
|
||||
|
||||
// Constructors
|
||||
Grid_simd & operator = ( Zero & z){
|
||||
vzero(*this);
|
||||
return (*this);
|
||||
}
|
||||
Grid_simd(){};
|
||||
|
||||
|
||||
Grid_simd& operator=(const Grid_simd&& rhs){v=rhs.v;return *this;};
|
||||
Grid_simd& operator=(const Grid_simd& rhs){v=rhs.v;return *this;}; //faster than not declaring it and leaving to the compiler
|
||||
Grid_simd()=default;
|
||||
Grid_simd(const Grid_simd& rhs):v(rhs.v){}; //compiles in movaps
|
||||
Grid_simd(const Grid_simd&& rhs):v(rhs.v){};
|
||||
|
||||
//Enable if complex type
|
||||
template < class S = Scalar_type >
|
||||
Grid_simd(typename std::enable_if< is_complex < S >::value, S>::type a){
|
||||
Grid_simd(const typename std::enable_if< is_complex < S >::value, S>::type a){
|
||||
vsplat(*this,a);
|
||||
};
|
||||
|
||||
|
||||
Grid_simd(Real a){
|
||||
Grid_simd(const Real a){
|
||||
vsplat(*this,Scalar_type(a));
|
||||
};
|
||||
|
||||
@ -88,18 +156,25 @@ namespace Grid {
|
||||
friend inline void sub (Grid_simd * __restrict__ y,const Grid_simd * __restrict__ l,const Grid_simd *__restrict__ r){ *y = (*l) - (*r); }
|
||||
friend inline void add (Grid_simd * __restrict__ y,const Grid_simd * __restrict__ l,const Grid_simd *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
//not for integer types... FIXME
|
||||
|
||||
friend inline void mac (Grid_simd *__restrict__ y,const Scalar_type *__restrict__ a,const Grid_simd *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
friend inline void mult(Grid_simd *__restrict__ y,const Scalar_type *__restrict__ l,const Grid_simd *__restrict__ r){ *y = (*l) * (*r); }
|
||||
friend inline void sub (Grid_simd *__restrict__ y,const Scalar_type *__restrict__ l,const Grid_simd *__restrict__ r){ *y = (*l) - (*r); }
|
||||
friend inline void add (Grid_simd *__restrict__ y,const Scalar_type *__restrict__ l,const Grid_simd *__restrict__ r){ *y = (*l) + (*r); }
|
||||
friend inline void mac (Grid_simd *__restrict__ y,const Grid_simd *__restrict__ a,const Scalar_type *__restrict__ x){ *y = (*a)*(*x)+(*y); };
|
||||
friend inline void mult(Grid_simd *__restrict__ y,const Grid_simd *__restrict__ l,const Scalar_type *__restrict__ r){ *y = (*l) * (*r); }
|
||||
friend inline void sub (Grid_simd *__restrict__ y,const Grid_simd *__restrict__ l,const Scalar_type *__restrict__ r){ *y = (*l) - (*r); }
|
||||
friend inline void add (Grid_simd *__restrict__ y,const Grid_simd *__restrict__ l,const Scalar_type *__restrict__ r){ *y = (*l) + (*r); }
|
||||
|
||||
|
||||
|
||||
//not for integer types...
|
||||
template < class S = Scalar_type, NotEnableIf<std::is_integral < S >, int> = 0 >
|
||||
friend inline Grid_simd adj(const Grid_simd &in){ return conjugate(in); }
|
||||
|
||||
///////////////////////////////////////////////
|
||||
// Initialise to 1,0,i for the correct types
|
||||
///////////////////////////////////////////////
|
||||
// if not complex overload here
|
||||
template < class S = Scalar_type, NotEnableIf<is_complex < S >,int> = 0 >
|
||||
friend inline void vone(Grid_simd &ret) { vsplat(ret,1.0); }
|
||||
template < class S = Scalar_type, NotEnableIf<is_complex < S >,int> = 0 >
|
||||
friend inline void vzero(Grid_simd &ret) { vsplat(ret,0.0); }
|
||||
|
||||
// For complex types
|
||||
template < class S = Scalar_type, EnableIf<is_complex < S >, int> = 0 >
|
||||
friend inline void vone(Grid_simd &ret) { vsplat(ret,1.0,0.0); }
|
||||
@ -107,6 +182,14 @@ namespace Grid {
|
||||
friend inline void vzero(Grid_simd &ret) { vsplat(ret,0.0,0.0); }// use xor?
|
||||
template < class S = Scalar_type, EnableIf<is_complex < S >, int> = 0 >
|
||||
friend inline void vcomplex_i(Grid_simd &ret){ vsplat(ret,0.0,1.0);}
|
||||
|
||||
// if not complex overload here
|
||||
template < class S = Scalar_type, EnableIf<std::is_floating_point < S >,int> = 0 >
|
||||
friend inline void vone(Grid_simd &ret) { vsplat(ret,1.0); }
|
||||
template < class S = Scalar_type, EnableIf<std::is_floating_point < S >,int> = 0 >
|
||||
friend inline void vzero(Grid_simd &ret) { vsplat(ret,0.0); }
|
||||
|
||||
|
||||
|
||||
// For integral types
|
||||
template < class S = Scalar_type, EnableIf<std::is_integral < S >, int> = 0 >
|
||||
@ -116,7 +199,7 @@ namespace Grid {
|
||||
template < class S = Scalar_type, EnableIf<std::is_integral < S >, int> = 0 >
|
||||
friend inline void vtrue (Grid_simd &ret){vsplat(ret,0xFFFFFFFF);}
|
||||
template < class S = Scalar_type, EnableIf<std::is_integral < S >, int> = 0 >
|
||||
friend inline void vfalse(vInteger &ret){vsplat(ret,0);}
|
||||
friend inline void vfalse(Grid_simd &ret){vsplat(ret,0);}
|
||||
|
||||
////////////////////////////////////
|
||||
// Arithmetic operator overloads +,-,*
|
||||
@ -192,8 +275,9 @@ namespace Grid {
|
||||
///////////////////////
|
||||
// Vstream
|
||||
///////////////////////
|
||||
template < class S = Scalar_type, NotEnableIf<std::is_integral < S >, int> = 0 >
|
||||
friend inline void vstream(Grid_simd &out,const Grid_simd &in){
|
||||
binary<void>(out.v, in.v, VstreamSIMD());
|
||||
binary<void>((Real*)&out.v, in.v, VstreamSIMD());
|
||||
}
|
||||
|
||||
template < class S = Scalar_type, EnableIf<std::is_integral < S >, int> = 0 >
|
||||
@ -291,7 +375,7 @@ namespace Grid {
|
||||
// Unary negation
|
||||
///////////////////////
|
||||
friend inline Grid_simd operator -(const Grid_simd &r) {
|
||||
vComplexF ret;
|
||||
Grid_simd ret;
|
||||
vzero(ret);
|
||||
ret = ret - r;
|
||||
return ret;
|
||||
@ -336,7 +420,7 @@ namespace Grid {
|
||||
}
|
||||
|
||||
template<class scalar_type, class vector_type >
|
||||
inline void zeroit(Grid_simd< scalar_type, vector_type> &z){ vzero(z);}
|
||||
inline void zeroit(Grid_simd< scalar_type, vector_type> &z){ vzero(z);}
|
||||
|
||||
|
||||
template<class scalar_type, class vector_type >
|
||||
@ -354,33 +438,15 @@ namespace Grid {
|
||||
|
||||
// Define available types (now change names to avoid clashing with the rest of the code)
|
||||
|
||||
typedef Grid_simd< float , SIMD_Ftype > MyRealF;
|
||||
typedef Grid_simd< double , SIMD_Dtype > MyRealD;
|
||||
typedef Grid_simd< std::complex< float > , SIMD_Ftype > MyComplexF;
|
||||
typedef Grid_simd< std::complex< double >, SIMD_Dtype > MyComplexD;
|
||||
typedef Grid_simd< float , SIMD_Ftype > vRealF;
|
||||
typedef Grid_simd< double , SIMD_Dtype > vRealD;
|
||||
typedef Grid_simd< std::complex< float > , SIMD_Ftype > vComplexF;
|
||||
typedef Grid_simd< std::complex< double >, SIMD_Dtype > vComplexD;
|
||||
typedef Grid_simd< Integer , SIMD_Itype > vInteger;
|
||||
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Temporary hack to keep independent from the rest of the code
|
||||
template<> struct isGridTensor<MyRealD > {
|
||||
static const bool value = false;
|
||||
static const bool notvalue = true;
|
||||
};
|
||||
template<> struct isGridTensor<MyRealF > {
|
||||
static const bool value = false;
|
||||
static const bool notvalue = true;
|
||||
};
|
||||
template<> struct isGridTensor<MyComplexD > {
|
||||
static const bool value = false;
|
||||
static const bool notvalue = true;
|
||||
};
|
||||
template<> struct isGridTensor<MyComplexF > {
|
||||
static const bool value = false;
|
||||
static const bool notvalue = true;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -54,7 +54,7 @@ namespace Grid {
|
||||
//////////////////////////////////
|
||||
friend inline void vone(vComplexF &ret) { vsplat(ret,1.0,0.0); }
|
||||
friend inline void vzero(vComplexF &ret) { vsplat(ret,0.0,0.0); }
|
||||
friend inline void vcomplex_i(vComplexF &ret){ vsplat(ret,0.0,1.0);}
|
||||
friend inline void vcomplex_i(vComplexF &ret){ vsplat(ret,0.0,1.0); }
|
||||
|
||||
////////////////////////////////////
|
||||
// Arithmetic operator overloads +,-,*
|
Reference in New Issue
Block a user