1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

Simplify the compressor interface again.

This commit is contained in:
Peter Boyle
2016-02-17 18:16:45 -06:00
parent c650bb3f3d
commit c9fadf97a5
8 changed files with 228 additions and 46 deletions

View File

@ -44,8 +44,8 @@ template<class vsimd,class scalar>
inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const vsimd >::type * y,
std::vector<scalar *> &extracted,int offset){
// FIXME: bounce off memory is painful
static const int Nsimd=vsimd::Nsimd();
int Nextr=extracted.size();
int Nsimd=vsimd::Nsimd();
int s=Nsimd/Nextr;
scalar*buf = (scalar *)y;
@ -59,8 +59,8 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
template<class vsimd,class scalar>
inline void merge(typename std::enable_if<!isGridTensor<vsimd>::value, vsimd >::type * y,
std::vector<scalar *> &extracted,int offset){
static const int Nsimd=vsimd::Nsimd();
int Nextr=extracted.size();
int Nsimd=vsimd::Nsimd();
int s=Nsimd/Nextr; // can have sparse occupation of simd vector if simd_layout does not fill it
// replicate n-fold. Use to allow Integer masks to
// predicate floating point of various width assignments and maintain conformable.
@ -85,6 +85,7 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
scalar *buf = (scalar *)&y;
for(int i=0;i<Nextr;i++){
extracted[i]=buf[i*s];
#ifdef PARANOID
for(int ii=1;ii<s;ii++){
if ( buf[i*s]!=buf[i*s+ii] ){
std::cout<<GridLogMessage << " SIMD extract failure splat = "<<s<<" ii "<<ii<<" " <<Nextr<<" "<< Nsimd<<" "<<std::endl;
@ -96,6 +97,7 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
}
assert(buf[i*s]==buf[i*s+ii]);
}
#endif
}
};
@ -106,7 +108,7 @@ inline void extract(typename std::enable_if<!isGridTensor<vsimd>::value, const v
template<class vsimd,class scalar>
inline void merge(typename std::enable_if<!isGridTensor<vsimd>::value, vsimd >::type &y,std::vector<scalar> &extracted){
int Nextr=extracted.size();
int Nsimd=vsimd::Nsimd();
static const int Nsimd=vsimd::Nsimd();
int s=Nsimd/Nextr;
scalar *buf = (scalar *)&y;
@ -125,9 +127,9 @@ template<class vobj> inline void extract(const vobj &vec,std::vector<typename vo
typedef typename vobj::scalar_type scalar_type ;
typedef typename vobj::vector_type vector_type ;
const int Nsimd=vobj::vector_type::Nsimd();
static const int Nsimd=vobj::vector_type::Nsimd();
static const int words=sizeof(vobj)/sizeof(vector_type);
int Nextr=extracted.size();
const int words=sizeof(vobj)/sizeof(vector_type);
int s=Nsimd/Nextr;
std::vector<scalar_type *> pointers(Nextr);
@ -148,8 +150,8 @@ void extract(const vobj &vec,std::vector<typename vobj::scalar_object *> &extrac
typedef typename vobj::scalar_type scalar_type ;
typedef typename vobj::vector_type vector_type ;
const int words=sizeof(vobj)/sizeof(vector_type);
const int Nsimd=vobj::vector_type::Nsimd();
static const int words=sizeof(vobj)/sizeof(vector_type);
static const int Nsimd=vobj::vector_type::Nsimd();
int Nextr=extracted.size();
int s = Nsimd/Nextr;
@ -172,8 +174,8 @@ void merge(vobj &vec,std::vector<typename vobj::scalar_object> &extracted)
typedef typename vobj::scalar_type scalar_type ;
typedef typename vobj::vector_type vector_type ;
const int Nsimd=vobj::vector_type::Nsimd();
const int words=sizeof(vobj)/sizeof(vector_type);
static const int Nsimd=vobj::vector_type::Nsimd();
static const int words=sizeof(vobj)/sizeof(vector_type);
int Nextr = extracted.size();
int splat=Nsimd/Nextr;
@ -224,8 +226,8 @@ void merge1(vobj &vec,std::vector<typename vobj::scalar_object *> &extracted,int
typedef typename vobj::scalar_type scalar_type ;
typedef typename vobj::vector_type vector_type ;
const int Nsimd=vobj::vector_type::Nsimd();
const int words=sizeof(vobj)/sizeof(vector_type);
static const int Nsimd=vobj::vector_type::Nsimd();
static const int words=sizeof(vobj)/sizeof(vector_type);
scalar_type *vp = (scalar_type *)&vec;