1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-09 23:45:36 +00:00

Initial commit of Grid to GitHub

This commit is contained in:
Peter Boyle 2015-03-04 03:12:19 +00:00
parent 8144438023
commit 8b17cbf9d7
11 changed files with 3616 additions and 0 deletions

42
.gitignore vendored Normal file
View File

@ -0,0 +1,42 @@
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# http://www.gnu.org/software/automake
Makefile.in
# http://www.gnu.org/software/autoconf
/autom4te.cache
/aclocal.m4
/compile
/configure
/depcomp
/install-sh
/missing
/stamp-h1

1171
Grid.h Normal file

File diff suppressed because it is too large Load Diff

178
Grid_Cartesian.h Normal file
View File

@ -0,0 +1,178 @@
#include "Grid.h"
#include "Grid_vComplexD.h"
namespace dpo{
class GridCartesian: public Grid {
public:
virtual int CheckerBoard(std::vector<int> site){
return 0;
}
virtual int CheckerBoardDestination(int cb,int shift){
return 0;
}
virtual int CheckerBoardShift(int source_cb,int dim,int shift){
return shift;
}
GridCartesian(std::vector<int> &dimensions,std::vector<int> layout)
{
///////////////////////
// Grid information
///////////////////////
_ndimension = dimensions.size();
_dimensions.resize(_ndimension);
_rdimensions.resize(_ndimension);
_layout.resize(_ndimension);
_ostride.resize(_ndimension);
_istride.resize(_ndimension);
_osites = 1;
_isites = 1;
for(int d=0;d<_ndimension;d++){
_dimensions[d] = dimensions[d];
_layout[d] = layout[d];
// Use a reduced simd grid
_rdimensions[d]= _dimensions[d]/_layout[d];
_osites *= _rdimensions[d];
_isites *= _layout[d];
// Addressing support
if ( d==0 ) {
_ostride[d] = 1;
_istride[d] = 1;
} else {
_ostride[d] = _ostride[d-1]*_rdimensions[d-1];
_istride[d] = _istride[d-1]*_layout[d-1];
}
}
///////////////////////
// subplane information
///////////////////////
_slice_block.resize(_ndimension);
_slice_stride.resize(_ndimension);
_slice_nblock.resize(_ndimension);
int block =1;
int nblock=1;
for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d];
for(int d=0;d<_ndimension;d++){
nblock/=_rdimensions[d];
_slice_block[d] =block;
_slice_stride[d]=_ostride[d]*_rdimensions[d];
_slice_nblock[d]=nblock;
block = block*_rdimensions[d];
}
if ( _isites != vComplex::Nsimd()) {
printf("bad layout for grid isites %d Nsimd %d\n",_isites,vComplex::Nsimd());
exit(0);
}
};
};
// Specialise this for red black grids storing half the data like a chess board.
class GridRedBlackCartesian : public Grid
{
public:
virtual int CheckerBoard(std::vector<int> site){
return site[0]&0x1;
}
virtual int CheckerBoardShift(int source_cb,int dim,int shift){
if ( dim == 0 ){
int fulldim =2*_dimensions[0];
shift = (shift+fulldim)%fulldim;
if ( source_cb ) {
// Shift 0,1 -> 0
return (shift+1)/2;
} else {
// Shift 0->0, 1 -> 1, 2->1
return (shift)/2;
}
} else {
return shift;
}
}
virtual int CheckerBoardDestination(int source_cb,int shift){
if ((shift+2*_dimensions[0])&0x1) {
return 1-source_cb;
} else {
return source_cb;
}
};
GridRedBlackCartesian(std::vector<int> &dimensions,std::vector<int> layout)
{
///////////////////////
// Grid information
///////////////////////
_ndimension = dimensions.size();
_dimensions.resize(_ndimension);
_rdimensions.resize(_ndimension);
_layout.resize(_ndimension);
_ostride.resize(_ndimension);
_istride.resize(_ndimension);
_osites = 1;
_isites = 1;
for(int d=0;d<_ndimension;d++){
_dimensions[d] = dimensions[d];
_dimensions[0] = dimensions[0]/2;
_layout[d] = layout[d];
// Use a reduced simd grid
_rdimensions[d]= _dimensions[d]/_layout[d];
_osites *= _rdimensions[d];
_isites *= _layout[d];
// Addressing support
if ( d==0 ) {
_ostride[d] = 1;
_istride[d] = 1;
} else {
_ostride[d] = _ostride[d-1]*_rdimensions[d-1];
_istride[d] = _istride[d-1]*_layout[d-1];
}
}
////////////////////////////////////////////////////////////////////////////////////////////
// subplane information
// It may be worth the investment of generating a more general subplane "iterator",
// and providing support for threads grabbing a unit of allocation.
////////////////////////////////////////////////////////////////////////////////////////////
_slice_block.resize(_ndimension);
_slice_stride.resize(_ndimension);
_slice_nblock.resize(_ndimension);
int block =1;
int nblock=1;
for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d];
for(int d=0;d<_ndimension;d++){
nblock/=_rdimensions[d];
_slice_block[d] =block;
_slice_stride[d]=_ostride[d]*_rdimensions[d];
_slice_nblock[d]=nblock;
block = block*_rdimensions[d];
}
if ( _isites != vComplex::Nsimd()) {
printf("bad layout for grid isites %d Nsimd %d\n",_isites,vComplex::Nsimd());
exit(0);
}
};
protected:
virtual int oIndex(std::vector<int> &coor)
{
int idx=_ostride[0]*((coor[0]/2)%_rdimensions[0]);
for(int d=1;d<_ndimension;d++) idx+=_ostride[d]*(coor[d]%_rdimensions[d]);
return idx;
};
};
}

648
Grid_Lattice.h Normal file
View File

@ -0,0 +1,648 @@
#include "Grid.h"
#include "Grid_vComplexD.h"
namespace dpo {
template<class vobj>
class Lattice
{
public:
Grid *_grid;
int checkerboard;
std::vector<vobj,myallocator<vobj> > _odata;
public:
Lattice(Grid *grid) : _grid(grid) {
_odata.reserve(_grid->oSites());
if ( ((uint64_t)&_odata[0])&0xF) {
exit(-1);
}
checkerboard=0;
}
// overloading dpo::conformable but no conformable in dpo ...?:w
template<class obj1,class obj2>
friend void conformable(const Lattice<obj1> &lhs,const Lattice<obj2> &rhs);
// Performance difference between operator * and mult is troubling.
// Auto move constructor seems to lose surprisingly much.
// Site wise binary operations
// We eliminate a temporary object assignment if use the mult,add,sub routines.
// For the operator versions we rely on move constructor to eliminate the
// vector copy back.
template<class obj1,class obj2,class obj3>
friend void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs);
template<class obj1,class obj2,class obj3>
friend void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs);
template<class obj1,class obj2,class obj3>
friend void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs);
friend void axpy(Lattice<vobj> &ret,double a,const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
axpy(&ret._odata[ss],a,&lhs._odata[ss],&rhs._odata[ss]);
}
}
friend void axpy(Lattice<vobj> &ret,std::complex<double> a,const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
axpy(&ret._odata[ss],a,&lhs._odata[ss],&rhs._odata[ss]);
}
}
inline friend Lattice<vobj> operator / (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
conformable(lhs,rhs);
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = lhs._odata[ss]/rhs._odata[ss];
}
return ret;
};
#if 0
friend Lattice<vobj> Cshift(Lattice<vobj> &rhs,int dimension,int shift)
{
Lattice<vobj> ret(rhs._grid);
ret.checkerboard = rhs._grid->CheckerBoardDestination(rhs.checkerboard,shift);
shift = rhs._grid->CheckerBoardShift(rhs.checkerboard,dimension,shift);
int sx,so,o;
int rd = rhs._grid->_rdimensions[dimension];
int ld = rhs._grid->_dimensions[dimension];
// Map to always positive shift.
shift = (shift+ld)%ld;
// Work out whether to permute and the permute type
// ABCDEFGH -> AE BF CG DH permute
// Shift 0 AE BF CG DH 0 0 0 0 ABCDEFGH
// Shift 1 DH AE BF CG 1 0 0 0 HABCDEFG
// Shift 2 CG DH AE BF 1 1 0 0 GHABCDEF
// Shift 3 BF CG DH AE 1 1 1 0 FGHACBDE
// Shift 4 AE BF CG DH 1 1 1 1 EFGHABCD
// Shift 5 DH AE BF CG 0 1 1 1 DEFGHABC
// Shift 6 CG DH AE BF 0 0 1 1 CDEFGHAB
// Shift 7 BF CG DH AE 0 0 0 1 BCDEFGHA
int permute_dim =rhs._grid->_layout[dimension]>1 ;
int permute_type=0;
for(int d=0;d<dimension;d++)
if (rhs._grid->_layout[d]>1 ) permute_type++;
// loop over perp slices.
// Threading considerations:
// Need to map thread_num to
//
// x_min,x_max for Loop-A
// n_min,n_max for Loop-B
// b_min,b_max for Loop-C
// In a way that maximally load balances.
//
// Optimal:
// There are rd*n_block*block items of work.
// These serialise as item "w"
// b=w%block; w=w/block
// n=w%nblock; x=w/nblock. Perhaps 20 cycles?
//
// Logic:
// x_chunk = (rd+thread)/nthreads simply divide work across nodes.
//
// rd=5 , threads = 8;
// 0 1 2 3 4 5 6 7
// 0 0 0 1 1 1 1 1
for(int x=0;x<rd;x++){ // Loop A
sx = (x-shift+ld)%rd;
o = x*rhs._grid->_ostride[dimension];
so =sx*rhs._grid->_ostride[dimension];
int permute_slice=0;
if ( permute_dim ) {
permute_slice = shift/rd;
if ( x<shift%rd ) permute_slice = 1-permute_slice;
}
if ( permute_slice ) {
int internal=sizeof(vobj)/sizeof(vComplex);
int num =rhs._grid->_slice_block[dimension]*internal;
for(int n=0;n<rhs._grid->_slice_nblock[dimension];n++){
vComplex *optr = (vComplex *)&ret._odata[o];
vComplex *iptr = (vComplex *)&rhs._odata[so];
for(int b=0;b<num;b++){
permute(optr[b],iptr[b],permute_type);
}
o+=rhs._grid->_slice_stride[dimension];
so+=rhs._grid->_slice_stride[dimension];
}
} else {
for(int n=0;n<rhs._grid->_slice_nblock[dimension];n++){
for(int i=0;i<rhs._grid->_slice_block[dimension];i++){
ret._odata[o+i]=rhs._odata[so+i];
}
o+=rhs._grid->_slice_stride[dimension];
so+=rhs._grid->_slice_stride[dimension];
}
}
}
return ret;
}
#else
friend Lattice<vobj> Cshift(Lattice<vobj> &rhs,int dimension,int shift)
{
Lattice<vobj> ret(rhs._grid);
int sx,so,o;
ret.checkerboard = rhs._grid->CheckerBoardDestination(rhs.checkerboard,shift);
shift = rhs._grid->CheckerBoardShift(rhs.checkerboard,dimension,shift);
int rd = rhs._grid->_rdimensions[dimension];
int ld = rhs._grid->_dimensions[dimension];
// Map to always positive shift.
shift = (shift+ld)%ld;
// Work out whether to permute and the permute type
// ABCDEFGH -> AE BF CG DH permute
// Shift 0 AE BF CG DH 0 0 0 0 ABCDEFGH
// Shift 1 DH AE BF CG 1 0 0 0 HABCDEFG
// Shift 2 CG DH AE BF 1 1 0 0 GHABCDEF
// Shift 3 BF CG DH AE 1 1 1 0 FGHACBDE
// Shift 4 AE BF CG DH 1 1 1 1 EFGHABCD
// Shift 5 DH AE BF CG 0 1 1 1 DEFGHABC
// Shift 6 CG DH AE BF 0 0 1 1 CDEFGHAB
// Shift 7 BF CG DH AE 0 0 0 1 BCDEFGHA
int permute_dim =rhs._grid->_layout[dimension]>1 ;
int permute_type=0;
for(int d=0;d<dimension;d++)
if (rhs._grid->_layout[d]>1 ) permute_type++;
// loop over all work
int work =rd*rhs._grid->_slice_nblock[dimension]*rhs._grid->_slice_block[dimension];
//#pragma omp parallel for
for(int ww=0;ww<work;ww++){
// can optimise this if know w moves congtiguously for a given thread.
// b=(b+1);
// if (b==_slice_block) {b=0; n=n+1;}
// if (n==_slice_nblock) { n=0; x=x+1}
//
// Perhaps a five cycle iterator, or so.
int w=ww;
int b = w%rhs._grid->_slice_block[dimension] ; w=w/rhs._grid->_slice_block[dimension];
int n = w%rhs._grid->_slice_nblock[dimension]; w=w/rhs._grid->_slice_nblock[dimension];
int x = w;
sx = (x-shift+ld)%rd;
o = x*rhs._grid->_ostride[dimension]+n*rhs._grid->_slice_stride[dimension]; // common sub expression alert.
so =sx*rhs._grid->_ostride[dimension]+n*rhs._grid->_slice_stride[dimension];
int permute_slice=0;
if ( permute_dim ) {
permute_slice = shift/rd;
if ( x<shift%rd ) permute_slice = 1-permute_slice;
}
if ( permute_slice ) {
int internal=sizeof(vobj)/sizeof(vComplex);
vComplex *optr = (vComplex *)&ret._odata[o+b];
vComplex *iptr = (vComplex *)&rhs._odata[so+b];
for(int i=0;i<internal;i++){
permute(optr[i],iptr[i],permute_type);
}
} else {
ret._odata[o+b]=rhs._odata[so+b];
}
}
return ret;
}
#endif
template<class sobj>
inline Lattice<vobj> & operator = (const sobj & r){
#pragma omp parallel for
for(int ss=0;ss<_grid->oSites();ss++){
this->_odata[ss]=r;
}
return *this;
}
// Poke a scalar object into the SIMD array
template<class sobj>
friend void pokeSite(const sobj &s,Lattice<vobj> &l,std::vector<int> &site){
if ( l._grid.checkerboard != l._grid->Checkerboard(site)){
printf("Poking wrong checkerboard\n");
exit(EXIT_FAILURE);
}
int o_index = l._grid->oIndex(site);
int i_index = l._grid->iIndex(site);
Real *v_ptr = (Real *)&l._odata[o_index];
Real *s_ptr = (Real *)&s;
v_ptr = v_ptr + 2*i_index;
for(int i=0;i<sizeof(sobj);i+=2*sizeof(Real)){
v_ptr[0] = s_ptr[0];
v_ptr[1] = s_ptr[1];
v_ptr+=2*vComplex::Nsimd();
s_ptr+=2;
}
return;
};
// Peek a scalar object from the SIMD array
template<class sobj>
friend void peekSite(sobj &s,const Lattice<vobj> &l,std::vector<int> &site){
// FIXME : define exceptions set and throw up.
if ( l.checkerboard != l._grid->CheckerBoard(site)){
printf("Peeking wrong checkerboard\n");
exit(EXIT_FAILURE);
}
int o_index = l._grid->oIndex(site);
int i_index = l._grid->iIndex(site);
Real *v_ptr = (Real *)&l._odata[o_index];
Real *s_ptr = (Real *)&s;
v_ptr = v_ptr + 2*i_index;
for(int i=0;i<sizeof(sobj);i+=2*sizeof(Real)){
s_ptr[0] = v_ptr[0];
s_ptr[1] = v_ptr[1];
v_ptr+=2*vComplex::Nsimd();
s_ptr+=2;
}
return;
};
// Randomise
friend void random(Lattice<vobj> &l){
Real *v_ptr = (Real *)&l._odata[0];
size_t v_len = l._grid->oSites()*sizeof(vobj);
size_t d_len = v_len/sizeof(Real);
for(int i=0;i<d_len;i++){
v_ptr[i]=drand48();
}
};
friend void gaussian(Lattice<vobj> &l){
// Zero mean, unit variance.
std::normal_distribution<double> distribution(0.0,1.0);
Real *v_ptr = (Real *)&l._odata[0];
size_t v_len = l._grid->oSites()*sizeof(vobj);
size_t d_len = v_len/sizeof(Real);
// Not a parallel RNG. Could make up some seed per 4d site, seed
// per hypercube type scheme.
for(int i=0;i<d_len;i++){
v_ptr[i]= drand48();
}
};
// Unary functions and Unops
// Unary negation
friend inline Lattice<vobj> operator -(const Lattice<vobj> &r) {
Lattice<vobj> ret(r._grid);
#pragma omp parallel for
for(int ss=0;ss<r._grid->oSites();ss++){
ret._odata[ss]= -r._odata[ss];
}
return ret;
}
// *=,+=,-= operators
template<class T>
inline Lattice<vobj> &operator *=(const T &r) {
*this = (*this)*r;
return *this;
}
template<class T>
inline Lattice<vobj> &operator -=(const T &r) {
*this = (*this)-r;
return *this;
}
template<class T>
inline Lattice<vobj> &operator +=(const T &r) {
*this = (*this)+r;
return *this;
}
inline friend Lattice<iScalar<vComplex> > _trace(const Lattice<vobj> &lhs){
Lattice<iScalar<vComplex> > ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = trace(lhs._odata[ss]);
}
return ret;
};
inline friend Lattice<iScalar<iScalar< vComplex > > > trace(const Lattice<vobj> &lhs){
Lattice<iScalar< iScalar<vComplex> > > ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = trace(lhs._odata[ss]);
}
return ret;
};
inline friend Lattice<vobj> adj(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = adj(lhs._odata[ss]);
}
return ret;
};
inline friend Lattice<vobj> conj(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = conj(lhs._odata[ss]);
}
return ret;
};
// remove and insert a half checkerboard
friend void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
#pragma omp parallel for
for(int ss=0;ss<half._grid->oSites();ss++){
half._odata[ss] = full._odata[ss*2+cb];
}
half.checkerboard = cb;
}
friend void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
int cb = half.checkerboard;
#pragma omp parallel for
for(int ss=0;ss<half._grid->oSites();ss++){
full._odata[ss*2+cb]=half._odata[ss];
}
}
}; // class Lattice
/* Need to implement the multiplication return type matching S S -> S, S M -> M, M S -> M through
all nested possibilities.
template<template<class> class lhs,template<class> class rhs>
class MultTypeSelector {
template<typename vtype> using ltype = lhs
typedef lhs type;
};
*/
template<class obj1,class obj2>
void conformable(const Lattice<obj1> &lhs,const Lattice<obj2> &rhs)
{
assert(lhs._grid == rhs._grid);
assert(lhs.checkerboard == rhs.checkerboard);
}
template<class obj1,class obj2,class obj3>
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
uint32_t vec_len = lhs._grid->oSites();
#pragma omp parallel for
for(int ss=0;ss<vec_len;ss++){
const char * ptr =(const char*)&lhs._odata[ss];
#ifdef PREFETCH
v_prefetch0(sizeof(obj2), ptr);
#endif
for(int i=0;i<sizeof(obj2);i+=64){
_mm_prefetch(ptr+i+4096,_MM_HINT_T1);
_mm_prefetch(ptr+i+256,_MM_HINT_T0);
}
ptr =(const char*)&rhs._odata[ss];
#ifdef PREFETCH
v_prefetch0(sizeof(obj3), ptr);
#endif
for(int i=0;i<sizeof(obj3);i+=64){
_mm_prefetch(ptr+i+4096,_MM_HINT_T1);
_mm_prefetch(ptr+i+256,_MM_HINT_T0);
}
mult(&ret._odata[ss],&lhs._odata[ss],&rhs._odata[ss]);
}
}
template<class obj1,class obj2,class obj3>
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
sub(&ret._odata[ss],&lhs._odata[ss],&rhs._odata[ss]);
}
}
template<class obj1,class obj2,class obj3>
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
for(int ss=0;ss<lhs._grid->oSites();ss++){
add(&ret._odata[ss],&lhs._odata[ss],&rhs._odata[ss]);
}
}
// Lattice BinOp Lattice,
template<class left,class right>
inline auto operator * (const Lattice<left> &lhs,const Lattice<right> &rhs)-> Lattice<decltype(lhs._odata[0]*rhs._odata[0])>
{
//NB mult performs conformable check. Do not reapply here for performance.
Lattice<decltype(lhs._odata[0]*rhs._odata[0])> ret(rhs._grid);
mult(ret,lhs,rhs);
return ret;
}
template<class left,class right>
inline auto operator + (const Lattice<left> &lhs,const Lattice<right> &rhs)-> Lattice<decltype(lhs._odata[0]*rhs._odata[0])>
{
//NB mult performs conformable check. Do not reapply here for performance.
Lattice<decltype(lhs._odata[0]*rhs._odata[0])> ret(rhs._grid);
add(ret,lhs,rhs);
return ret;
}
template<class left,class right>
inline auto operator - (const Lattice<left> &lhs,const Lattice<right> &rhs)-> Lattice<decltype(lhs._odata[0]*rhs._odata[0])>
{
//NB mult performs conformable check. Do not reapply here for performance.
Lattice<decltype(lhs._odata[0]*rhs._odata[0])> ret(rhs._grid);
sub(ret,lhs,rhs);
return ret;
}
// Scalar BinOp Lattice ;generate return type
template<class left,class right>
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
{
std::cerr <<"Oscalar * Lattice calling mult"<<std::endl;
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs*rhs._odata[ss];
}
return ret;
}
template<class left,class right>
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
{
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs+rhs._odata[ss];
}
return ret;
}
template<class left,class right>
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
{
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs-rhs._odata[ss];
}
return ret;
}
template<class left,class right>
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
{
std::cerr <<"Lattice * Oscalar calling mult"<<std::endl;
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs._odata[ss]*rhs;
}
return ret;
}
template<class left,class right>
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
{
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs._odata[ss]+rhs;
}
return ret;
}
template<class left,class right>
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
{
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=lhs._odata[ss]-rhs;
}
return ret;
}
namespace QCD {
static const int Nc=3;
static const int Ns=4;
static const int CbRed =0;
static const int CbBlack=1;
// QCD iMatrix types
template<typename vtype> using iSinglet = iScalar<iScalar<vtype> > ;
template<typename vtype> using iSpinMatrix = iMatrix<iScalar<vtype>, Ns>;
template<typename vtype> using iSpinColourMatrix = iMatrix<iMatrix<vtype, Nc>, Ns>;
template<typename vtype> using iColourMatrix = iScalar<iMatrix<vtype, Nc>> ;
template<typename vtype> using iSpinVector = iVector<iScalar<vtype>, Ns>;
template<typename vtype> using iColourVector = iScalar<iVector<vtype, Nc> >;
template<typename vtype> using iSpinColourVector = iVector<iVector<vtype, Nc>, Ns>;
typedef iSinglet<Complex > TComplex; // This is painful. Tensor singlet complex type.
typedef iSinglet<vComplex > vTComplex;
typedef iSinglet<Real > TReal; // This is painful. Tensor singlet complex type.
typedef iSpinMatrix<Complex > SpinMatrix;
typedef iColourMatrix<Complex > ColourMatrix;
typedef iSpinColourMatrix<Complex > SpinColourMatrix;
typedef iSpinVector<Complex > SpinVector;
typedef iColourVector<Complex > ColourVector;
typedef iSpinColourVector<Complex > SpinColourVector;
typedef iSpinMatrix<vComplex > vSpinMatrix;
typedef iColourMatrix<vComplex > vColourMatrix;
typedef iSpinColourMatrix<vComplex > vSpinColourMatrix;
typedef iSpinVector<vComplex > vSpinVector;
typedef iColourVector<vComplex > vColourVector;
typedef iSpinColourVector<vComplex > vSpinColourVector;
typedef Lattice<vTComplex> LatticeComplex;
typedef Lattice<vColourMatrix> LatticeColourMatrix;
typedef Lattice<vSpinMatrix> LatticeSpinMatrix;
typedef Lattice<vSpinColourMatrix> LatticePropagator;
typedef LatticePropagator LatticeSpinColourMatrix;
typedef Lattice<vSpinColourVector> LatticeFermion;
typedef Lattice<vSpinColourVector> LatticeSpinColourVector;
typedef Lattice<vSpinVector> LatticeSpinVector;
typedef Lattice<vColourVector> LatticeColourVector;
// localNorm2,
template<class tt>
inline LatticeComplex localNorm2 (const Lattice<tt> &rhs)
{
LatticeComplex ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=trace(adj(rhs)*rhs);
}
return ret;
}
// localInnerProduct
template<class tt>
inline LatticeComplex localInnerProduct (const Lattice<tt> &lhs,const Lattice<tt> &rhs)
{
LatticeComplex ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=localInnerProduct(lhs._odata[ss],rhs._odata[ss]);
}
return ret;
}
// outerProduct Scalar x Scalar -> Scalar
// Vector x Vector -> Matrix
template<class ll,class rr>
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
{
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
#pragma omp parallel for
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
}
return ret;
}
} //namespace QCD
}

278
Grid_main.cc Normal file
View File

@ -0,0 +1,278 @@
#include "Grid.h"
#include "Grid_vRealD.h"
#include "Grid_vRealF.h"
#include "Grid_vComplexD.h"
#include "Grid_vComplexF.h"
#include "Grid_Cartesian.h"
#include "Grid_Lattice.h"
using namespace std;
using namespace dpo;
using namespace dpo::QCD;
int main (int argc, char ** argv)
{
#ifdef KNL
struct sigaction sa,osa;
sigemptyset (&sa.sa_mask);
sa.sa_sigaction= sa_action;
sa.sa_flags = SA_ONSTACK|SA_SIGINFO;
sigaction(SIGSEGV,&sa,NULL);
sigaction(SIGTRAP,&sa,NULL);
#endif
std::vector<int> latt_size(4);
std::vector<int> simd_layout(4);
for(int omp=32;omp<237;omp*=2){
#ifdef OMP
omp_set_num_threads(omp);
#endif
for(int lat=16;lat<=32;lat+=8){
latt_size[0] = lat;
latt_size[1] = lat;
latt_size[2] = lat;
latt_size[3] = lat;
double volume = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
simd_layout[0] = 1;
simd_layout[1] = 2;
simd_layout[2] = 2;
simd_layout[3] = 2;
GridCartesian Fine(latt_size,simd_layout);
GridRedBlackCartesian rbFine(latt_size,simd_layout);
LatticeColourMatrix Foo(&Fine);
LatticeColourMatrix Bar(&Fine);
LatticeSpinColourMatrix scFoo(&Fine);
LatticeSpinColourMatrix scBar(&Fine);
LatticeColourMatrix Shifted(&Fine);
LatticeColourMatrix ShiftedCheck(&Fine);
LatticeColourMatrix rShifted(&rbFine);
LatticeColourMatrix bShifted(&rbFine);
LatticeColourMatrix rFoo(&rbFine);
LatticeColourMatrix bFoo(&rbFine);
LatticeColourMatrix FooBar(&Fine);
LatticeSpinColourMatrix scFooBar(&Fine);
LatticeColourVector cVec(&Fine);
LatticeSpinVector sVec(&Fine);
LatticeSpinColourVector scVec(&Fine);
LatticeColourMatrix cMat(&Fine);
LatticeSpinMatrix sMat(&Fine);
LatticeSpinColourMatrix scMat(&Fine);
LatticeComplex scalar(&Fine);
SpinMatrix GammaFive;
iSpinMatrix<vComplex> iGammaFive;
ColourMatrix cmat;
random(Foo);
gaussian(Bar);
random(scFoo);
random(scBar);
random(cMat);
random(sMat);
random(scMat);
random(cVec);
random(sVec);
random(scVec);
fflush(stdout);
cVec = cMat * cVec; // LatticeColourVector = LatticeColourMatrix * LatticeColourVector
sVec = sMat * sVec; // LatticeSpinVector = LatticeSpinMatrix * LatticeSpinVector
scVec= scMat * scVec;// LatticeSpinColourVector = LatticeSpinColourMatrix * LatticeSpinColourVector
scVec= cMat * scVec; // LatticeSpinColourVector = LatticeColourMatrix * LatticeSpinColourVector
scVec= sMat * scVec; // LatticeSpinColourVector = LatticeSpinMatrix * LatticeSpinColourVector
cMat = outerProduct(cVec,cVec);
scalar = localInnerProduct(cVec,cVec);
scMat = sMat*scMat; // LatticeSpinColourMatrix = LatticeSpinMatrix * LatticeSpinColourMatrix
// Non-lattice (const objects) * Lattice
ColourMatrix cm;
SpinColourMatrix scm;
scMat = cMat*scMat;
scm = cm * scm; // SpinColourMatrix = ColourMatrix * SpinColourMatrix
scm = scm *cm; // SpinColourMatrix = SpinColourMartix * ColourMatrix
scm = GammaFive * scm ; // SpinColourMatrix = SpinMatrix * SpinColourMatrix
scm = scm* GammaFive ; // SpinColourMatrix = SpinColourMatrix * SpinMatrix
sMat = adj(sMat); // LatticeSpinMatrix adjoint
sMat = iGammaFive*sMat; // SpinMatrix * LatticeSpinMatrix
sMat = GammaFive*sMat; // SpinMatrix * LatticeSpinMatrix
scMat= adj(scMat);
cMat= adj(cMat);
cm=adj(cm);
scm=adj(scm);
// Foo = Foo+scalar; // LatticeColourMatrix+Scalar
// Foo = Foo*scalar; // LatticeColourMatrix*Scalar
// Foo = Foo-scalar; // LatticeColourMatrix-Scalar
// Foo = scalar*Foo; // Scalar*LatticeColourMatrix
// Foo = scalar+Foo; // Scalar+LatticeColourMatrix
// Foo = scalar-Foo; // Scalar-LatticeColourMatrix
LatticeComplex trscMat(&Fine);
trscMat = trace(scMat); // Trace
FooBar = Bar;
int shift=1;
int dir=0;
random(Foo);
pickCheckerboard(CbRed,rFoo,Foo); // Pick out red or black checkerboards
pickCheckerboard(CbBlack,bFoo,Foo);
Shifted = Cshift(Foo,dir,shift); // Shift everything
bShifted = Cshift(rFoo,dir,shift); // Shift red
rShifted = Cshift(bFoo,dir,shift); // Shift black
setCheckerboard(ShiftedCheck,bShifted); // Put them all together
setCheckerboard(ShiftedCheck,rShifted); // and check the results (later)
// Lattice SU(3) x SU(3)
FooBar = Foo * Bar;
// Lattice 12x12 GEMM
scFooBar = scFoo * scBar;
// Benchmark some simple operations LatticeSU3 * Lattice SU3.
double t0,t1,flops;
double bytes;
int ncall=100;
int Nc = dpo::QCD::Nc;
flops = ncall*1.0*volume*(8*Nc*Nc*Nc);
bytes = ncall*1.0*volume*Nc*Nc *2*3*sizeof(dpo::Real);
printf("%f flop and %f bytes\n",flops,bytes/ncall);
FooBar = Foo * Bar;
t0=usecond();
for(int i=0;i<ncall;i++){
mult(FooBar,Foo,Bar); // this is better
}
t1=usecond();
#ifdef OMP
printf("mult NumThread %d , Lattice size %d , %f us per call\n",omp_get_max_threads(),lat,(t1-t0)/ncall);
#endif
printf("mult NumThread %d , Lattice size %d , %f Mflop/s\n",omp,lat,flops/(t1-t0));
printf("mult NumThread %d , Lattice size %d , %f MB/s\n",omp,lat,bytes/(t1-t0));
/*
mult(FooBar,Foo,Bar);
// FooBar = Foo * Bar;
t0=usecond();
for(int i=0;i<ncall;i++){
// mult(FooBar,Foo,Cshift(Bar,1,-2));
mult(FooBar,Foo,Bar);
// FooBar = Foo * Bar; // this is bad
}
t1=usecond();
printf("A: NumThread %d , Lattice size %d , %f us per call\n",omp,lat,(t1-t0)/ncall);
printf("A: NumThread %d , Lattice size %d , %f Mflop/s\n",omp,lat,flops/(t1-t0));
printf("A: NumThread %d , Lattice size %d , %f MB/s\n",omp,lat,bytes/(t1-t0));
*/
pickCheckerboard(0,rFoo,FooBar);
pickCheckerboard(1,bFoo,FooBar);
setCheckerboard(FooBar,rFoo);
setCheckerboard(FooBar,bFoo);
double nrm=0;
std::vector<int> coor(4);
for(coor[0]=0;coor[0]<latt_size[0];coor[0]++){
for(coor[1]=0;coor[1]<latt_size[1];coor[1]++){
for(coor[2]=0;coor[2]<latt_size[2];coor[2]++){
for(coor[3]=0;coor[3]<latt_size[3];coor[3]++){
std::complex<dpo::Real> diff;
std::vector<int> shiftcoor = coor;
shiftcoor[dir]=(shiftcoor[dir]-shift+latt_size[dir])%latt_size[dir];
ColourMatrix foo;
ColourMatrix bar;
ColourMatrix shifted1;
ColourMatrix shifted2;
ColourMatrix shifted3;
ColourMatrix foobar1;
ColourMatrix foobar2;
ColourMatrix mdiff,amdiff;
peekSite(shifted1,Shifted,coor);
peekSite(shifted2,Foo,shiftcoor);
peekSite(shifted3,ShiftedCheck,coor);
peekSite(foo,Foo,coor);
mdiff = shifted1-shifted2;
amdiff=adj(mdiff);
ColourMatrix prod = amdiff*mdiff;
TReal Ttr=real(trace(prod));
double nn=Ttr._internal._internal;
if ( nn > 0 )
cout<<"Shift real trace fail "<<coor[0]<<coor[1]<<coor[2]<<coor[3] <<endl;
for(int r=0;r<3;r++){
for(int c=0;c<3;c++){
diff =shifted1._internal._internal[r][c]-shifted2._internal._internal[r][c];
nn=real(conj(diff)*diff);
if ( nn > 0 )
cout<<"Shift fail "<<coor[0]<<coor[1]<<coor[2]<<coor[3] <<" "
<<shifted1._internal._internal[r][c]<<" "<<shifted2._internal._internal[r][c]
<< " "<< foo._internal._internal[r][c]<<endl;
else if(0)
cout<<"Shift pass "<<coor[0]<<coor[1]<<coor[2]<<coor[3] <<" "
<<shifted1._internal._internal[r][c]<<" "<<shifted2._internal._internal[r][c]
<< " "<< foo._internal._internal[r][c]<<endl;
}}
for(int r=0;r<3;r++){
for(int c=0;c<3;c++){
diff =shifted3._internal._internal[r][c]-shifted2._internal._internal[r][c];
nn=real(conj(diff)*diff);
if ( nn > 0 )
cout<<"Shift rb fail "<<coor[0]<<coor[1]<<coor[2]<<coor[3] <<" "
<<shifted3._internal._internal[r][c]<<" "<<shifted2._internal._internal[r][c]
<< " "<< foo._internal._internal[r][c]<<endl;
else if(0)
cout<<"Shift rb pass "<<coor[0]<<coor[1]<<coor[2]<<coor[3] <<" "
<<shifted3._internal._internal[r][c]<<" "<<shifted2._internal._internal[r][c]
<< " "<< foo._internal._internal[r][c]<<endl;
}}
peekSite(bar,Bar,coor);
peekSite(foobar1,FooBar,coor);
foobar2 = foo*bar;
for(int r=0;r<Nc;r++){
for(int c=0;c<Nc;c++){
diff =foobar2._internal._internal[r][c]-foobar1._internal._internal[r][c];
nrm = nrm + real(conj(diff)*diff);
}}
}}}}
std::cout << "LatticeColorMatrix * LatticeColorMatrix nrm diff = "<<nrm<<std::endl;
} // loop for lat
} // loop for omp
}

80
Grid_signal.cc Executable file
View File

@ -0,0 +1,80 @@
/****************************************************************************/
/* PAB: Signal magic. Processor state dump is x86-64 specific */
/****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <signal.h>
#include "Grid.h"
#define __X86_64
namespace dpo {
void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr);
void Grid_debug_handler_init(void);
void Grid_init(void)
{
Grid_debug_handler_init();
}
void Grid_sa_signal_handler(int sig,siginfo_t *si,void * ptr)
{
ucontext_t * uc= (ucontext_t *)ptr;
struct sigcontext *sc = (struct sigcontext *)&uc->uc_mcontext;
printf("Caught signal %d\n",si->si_signo);
printf(" mem address %lx\n",si->si_addr);
printf(" code %d\n",si->si_code);
printf(" instruction %lx\n",sc->rip);
#ifdef __X86_64
#define REG(A) printf(" %s %lx\n",#A, sc-> A);
REG(rdi);
REG(rsi);
REG(rbp);
REG(rbx);
REG(rdx);
REG(rax);
REG(rcx);
REG(rsp);
REG(rip);
REG(r8);
REG(r9);
REG(r10);
REG(r11);
REG(r12);
REG(r13);
REG(r14);
REG(r15);
#endif
fflush(stdout);
if ( si->si_signo == SIGSEGV ) {
printf("Grid_sa_signal_handler: Oops... this was a sigsegv you naughty naughty programmer. Goodbye\n");
fflush(stdout);
exit(-1);
}
return;
};
void Grid_debug_handler_init(void)
{
struct sigaction sa,osa;
sigemptyset (&sa.sa_mask);
sa.sa_sigaction= Grid_sa_signal_handler;
sa.sa_flags = SA_SIGINFO;
sigaction(SIGSEGV,&sa,NULL);
sigaction(SIGTRAP,&sa,NULL);
}
}

340
Grid_vComplexD.h Normal file
View File

@ -0,0 +1,340 @@
#ifndef VCOMPLEXD_H
#define VCOMPLEXD_H
#include "Grid.h"
#include "Grid_vComplexF.h"
namespace dpo {
class vComplexD {
protected:
zvec v;
public:
vComplexD & operator = ( Zero & z){
vzero(*this);
return (*this);
}
vComplexD(){};
///////////////////////////////////////////////
// mac, mult, sub, add, adj
// Should do an AVX2 version with mac.
///////////////////////////////////////////////
friend inline void mac (vComplexD * __restrict__ y,const vComplexD * __restrict__ a,const vComplexD *__restrict__ x) {*y = (*a)*(*x)+(*y);};
friend inline void mult(vComplexD * __restrict__ y,const vComplexD * __restrict__ l,const vComplexD *__restrict__ r) {*y = (*l) * (*r);}
friend inline void sub (vComplexD * __restrict__ y,const vComplexD * __restrict__ l,const vComplexD *__restrict__ r) {*y = (*l) - (*r);}
friend inline void add (vComplexD * __restrict__ y,const vComplexD * __restrict__ l,const vComplexD *__restrict__ r) {*y = (*l) + (*r);}
friend inline vComplexD adj(const vComplexD &in){ return conj(in); }
//////////////////////////////////
// Initialise to 1,0,i
//////////////////////////////////
friend inline void vone (vComplexD &ret){ vsplat(ret,1.0,0.0);}
friend inline void vzero (vComplexD &ret){ vsplat(ret,0.0,0.0);}
friend inline void vcomplex_i(vComplexD &ret){ vsplat(ret,0.0,1.0);}
////////////////////////////////////
// Arithmetic operator overloads +,-,*
////////////////////////////////////
friend inline vComplexD operator + (vComplexD a, vComplexD b)
{
vComplexD ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_add_pd(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_add_pd(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_add_pd(a.v,b.v);
//printf("%s %f\n",__func__,_mm512_reduce_mul_pd(ret.v));
#endif
#ifdef QPX
ret.v = vec_add(a.v,b.v);
#endif
return ret;
};
friend inline vComplexD operator - (vComplexD a, vComplexD b)
{
vComplexD ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_sub_pd(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_sub_pd(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_sub_pd(a.v,b.v);
#endif
#ifdef QPX
ret.v = vec_sub(a.v,b.v);
#endif
return ret;
};
friend inline vComplexD operator * (vComplexD a, vComplexD b)
{
vComplexD ret;
//Multiplicationof (ak+ibk)*(ck+idk)
// a + i b can be stored as a data structure
//From intel optimisation reference guide
/*
movsldup xmm0, Src1; load real parts into the destination,
; a1, a1, a0, a0
movaps xmm1, src2; load the 2nd pair of complex values, ; i.e. d1, c1, d0, c0
mulps xmm0, xmm1; temporary results, a1d1, a1c1, a0d0, ; a0c0
shufps xmm1, xmm1, b1; reorder the real and imaginary ; parts, c1, d1, c0, d0
movshdup xmm2, Src1; load the imaginary parts into the ; destination, b1, b1, b0, b0
mulps xmm2, xmm1; temporary results, b1c1, b1d1, b0c0, ; b0d0
addsubps xmm0, xmm2; b1c1+a1d1, a1c1 -b1d1, b0c0+a0d
VSHUFPD (VEX.256 encoded version)
IF IMM0[0] = 0
THEN DEST[63:0]=SRC1[63:0] ELSE DEST[63:0]=SRC1[127:64] FI;
IF IMM0[1] = 0
THEN DEST[127:64]=SRC2[63:0] ELSE DEST[127:64]=SRC2[127:64] FI;
IF IMM0[2] = 0
THEN DEST[191:128]=SRC1[191:128] ELSE DEST[191:128]=SRC1[255:192] FI;
IF IMM0[3] = 0
THEN DEST[255:192]=SRC2[191:128] ELSE DEST[255:192]=SRC2[255:192] FI;
*/
#if defined (AVX1)|| defined (AVX2)
zvec ymm0,ymm1,ymm2;
ymm0 = _mm256_shuffle_pd(a.v,a.v,0x0); // ymm0 <- ar ar, ar,ar b'00,00
ymm0 = _mm256_mul_pd(ymm0,b.v); // ymm0 <- ar bi, ar br
ymm1 = _mm256_shuffle_pd(b.v,b.v,0x5); // ymm1 <- br,bi b'01,01
ymm2 = _mm256_shuffle_pd(a.v,a.v,0xF); // ymm2 <- ai,ai b'11,11
ymm1 = _mm256_mul_pd(ymm1,ymm2); // ymm1 <- br ai, ai bi
ret.v= _mm256_addsub_pd(ymm0,ymm1);
#endif
#ifdef SSE2
zvec ymm0,ymm1,ymm2;
ymm0 = _mm_shuffle_pd(a.v,a.v,0x0); // ymm0 <- ar ar,
ymm0 = _mm_mul_pd(ymm0,b.v); // ymm0 <- ar bi, ar br
ymm1 = _mm_shuffle_pd(b.v,b.v,0x1); // ymm1 <- br,bi b01
ymm2 = _mm_shuffle_pd(a.v,a.v,0x3); // ymm2 <- ai,ai b11
ymm1 = _mm_mul_pd(ymm1,ymm2); // ymm1 <- br ai, ai bi
ret.v= _mm_addsub_pd(ymm0,ymm1);
#endif
#ifdef AVX512
/* This is from
* Automatic SIMD Vectorization of Fast Fourier Transforms for the Larrabee and AVX Instruction Sets
* @inproceedings{McFarlin:2011:ASV:1995896.1995938,
* author = {McFarlin, Daniel S. and Arbatov, Volodymyr and Franchetti, Franz and P\"{u}schel, Markus},
* title = {Automatic SIMD Vectorization of Fast Fourier Transforms for the Larrabee and AVX Instruction Sets},
* booktitle = {Proceedings of the International Conference on Supercomputing},
* series = {ICS '11},
* year = {2011},
* isbn = {978-1-4503-0102-2},
* location = {Tucson, Arizona, USA},
* pages = {265--274},
* numpages = {10},
* url = {http://doi.acm.org/10.1145/1995896.1995938},
* doi = {10.1145/1995896.1995938},
* acmid = {1995938},
* publisher = {ACM},
* address = {New York, NY, USA},
* keywords = {autovectorization, fourier transform, program generation, simd, super-optimization},
* }
*/
zvec vzero,ymm0,ymm1,real,imag;
vzero = _mm512_setzero();
ymm0 = _mm512_swizzle_pd(a.v, _MM_SWIZ_REG_CDAB); //
real = _mm512_mask_or_epi64(a.v, 0xAAAA,vzero, ymm0);
imag = _mm512_mask_sub_pd(a.v, 0x5555,vzero, ymm0);
ymm1 = _mm512_mul_pd(real, b.v);
ymm0 = _mm512_swizzle_pd(b.v, _MM_SWIZ_REG_CDAB); // OK
ret.v= _mm512_fmadd_pd(ymm0,imag,ymm1);
/* Imag OK */
#endif
#ifdef QPX
ret.v = vec_mul(a.v,b.v);
#endif
return ret;
};
/////////////////////////////////////////////////////////////////
// Permute
/////////////////////////////////////////////////////////////////
friend inline void permute(vComplexD &y,vComplexD b,int perm){
switch (perm){
// 2 complex=>1 permute
#if defined(AVX1)||defined(AVX2)
case 0: y.v = _mm256_permute2f128_pd(b.v,b.v,0x01); break;
// AB => BA i.e. ab cd =>cd ab
#endif
#ifdef SSE2
//No cases here
#endif
#ifdef AVX512
// 4 complex=>2 permute
// ABCD => BADC i.e. abcd efgh => cdab ghef
// ABCD => CDAB i.e. abcd efgh => efgh abcd
case 0: y.v = _mm512_swizzle_pd(b.v,_MM_SWIZ_REG_BADC); break;
case 1: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(1,0,3,2)); // permute for double is not implemented
#endif
#ifdef QPX
#error // Not implemented yet
#endif
default: exit(EXIT_FAILURE); break;
}
};
void vload(zvec& a){
this->v = a;
}
zvec vget(){
return this->v ;
}
///////////////////////
// Splat
///////////////////////
friend inline void vsplat(vComplexD &ret,double rl,double ig){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_pd(ig,rl,ig,rl);
#endif
#ifdef SSE2
ret.v = _mm_set_pd(a,b);
#endif
#ifdef AVX512
ret.v = _mm512_set_pd(ig,rl,ig,rl,ig,rl,ig,rl);
#endif
#ifdef QPX
ret.v = {a,b,a,b};
#endif
}
friend inline void vset(vComplexD &ret, std::complex<double> *a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_pd(a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
#endif
#ifdef SSE2
ret.v = _mm_set_pd(a[0].imag(),a[0].real());
#endif
#ifdef AVX512
ret.v = _mm512_set_pd(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
// Note v has a0 a1 a2 a3
#endif
#ifdef QPX
ret.v = {a[0].real(),a[0].imag(),a[1].real(),a[3].imag()};
#endif
}
friend inline void vstore(vComplexD &ret, std::complex<double> *a){
#if defined (AVX1)|| defined (AVX2)
_mm256_store_pd((double *)a,ret.v);
#endif
#ifdef SSE2
_mm_store_pd((double *)a,ret.v);
#endif
#ifdef AVX512
_mm512_store_pd((double *)a,ret.v);
//Note v has a3 a2 a1 a0
#endif
#ifdef QPX
printf("%s Not implemented\n",__func__);
exit(-1);
#endif
}
friend inline void vprefetch(const vComplexD &v)
{
_mm_prefetch((const char*)&v.v,_MM_HINT_T0);
}
////////////////////////
// Conjugate
////////////////////////
friend inline vComplexD conj(const vComplexD &in){
vComplexD ret ; vzero(ret);
#if defined (AVX1)|| defined (AVX2)
// addsubps 0, inv=>0+in.v[3] 0-in.v[2], 0+in.v[1], 0-in.v[0], ...
__m256d tmp = _mm256_addsub_pd(ret.v,_mm256_shuffle_pd(in.v,in.v,0x5));
ret.v=_mm256_shuffle_pd(tmp,tmp,0x5);
#endif
#ifdef SSE2
ret.v = _mm_addsub_pd(ret.v,in.v);
#endif
#ifdef AVX512
// Xeon does not have fmaddsub or addsub
// with mask 0xa (1010), v[0] -v[1] v[2] -v[3] ....
ret.v = _mm512_mask_sub_pd(in.v, 0xaaaa,ret.v, in.v);
#endif
#ifdef QPX
exit(0); // not implemented
#endif
return ret;
}
// REDUCE
friend inline ComplexD Reduce(const vComplexD & in)
{
#if defined (AVX1) || defined(AVX2)
// return std::complex<double>(_mm256_mask_reduce_add_pd(0x55, in.v),_mm256_mask_reduce_add_pd(0xAA, in.v));
__attribute__ ((aligned(32))) double c_[4];
_mm256_store_pd(c_,in.v);
return std::complex<double>(c_[0]+c_[2],c_[1]+c_[3]);
#endif
#ifdef AVX512
return std::complex<double>(_mm512_mask_reduce_add_pd(0x5555, in.v),_mm512_mask_reduce_add_pd(0xAAAA, in.v));
#endif
#ifdef QPX
#endif
}
// Unary negation
friend inline vComplexD operator -(const vComplexD &r) {
vComplexD ret;
vzero(ret);
ret = ret - r;
return ret;
}
// *=,+=,-= operators
inline vComplexD &operator *=(const vComplexD &r) {
*this = (*this)*r;
return *this;
}
inline vComplexD &operator +=(const vComplexD &r) {
*this = *this+r;
return *this;
}
inline vComplexD &operator -=(const vComplexD &r) {
*this = *this-r;
return *this;
}
public:
static int Nsimd(void) { return sizeof(zvec)/sizeof(double)/2;}
};
inline vComplexD localInnerProduct(const vComplexD & l, const vComplexD & r) { return conj(l)*r; }
typedef vComplexD vDComplex;
inline void zeroit(vComplexD &z){ vzero(z);}
inline vComplexD outerProduct(const vComplexD &l, const vComplexD& r)
{
return l*r;
}
inline vComplex trace(const vComplex &arg){
return arg;
}
/////////////////////////////////////////////////////////////////////////
//// Generic routine to promote object<complex> -> object<vcomplex>
//// Supports the array reordering transformation that gives me SIMD utilisation
///////////////////////////////////////////////////////////////////////////
template<template<class> class object>
inline object<vComplex> splat(object<Complex >s){
object<vComplex> ret;
vComplex * v_ptr = (vComplex *)& ret;
Complex * s_ptr = (Complex *) &s;
for(int i=0;i<sizeof(ret);i+=sizeof(vComplex)){
vsplat(*(v_ptr++),*(s_ptr++));
}
return ret;
}
}
#endif

343
Grid_vComplexF.h Normal file
View File

@ -0,0 +1,343 @@
#ifndef VCOMPLEXF
#define VCOMPLEXF
#include "Grid.h"
namespace dpo {
class vComplexF {
protected:
cvec v;
public:
vComplexF & operator = ( Zero & z){
vzero(*this);
return (*this);
}
vComplexF(){};
///////////////////////////////////////////////
// mac, mult, sub, add, adj
// Should do an AVX2 version with mac.
///////////////////////////////////////////////
friend inline void mac (vComplexF * __restrict__ y,const vComplexF * __restrict__ a,const vComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
friend inline void mult(vComplexF * __restrict__ y,const vComplexF * __restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) * (*r); }
friend inline void sub (vComplexF * __restrict__ y,const vComplexF * __restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) - (*r); }
friend inline void add (vComplexF * __restrict__ y,const vComplexF * __restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) + (*r); }
friend inline vComplexF adj(const vComplexF &in){ return conj(in); }
//////////////////////////////////
// Initialise to 1,0,i
//////////////////////////////////
friend inline void vone(vComplexF &ret) { vsplat(ret,1.0,0.0); }
friend inline void vzero(vComplexF &ret) { vsplat(ret,0.0,0.0); }
friend inline void vcomplex_i(vComplexF &ret){ vsplat(ret,0.0,1.0);}
////////////////////////////////////
// Arithmetic operator overloads +,-,*
////////////////////////////////////
friend inline vComplexF operator + (vComplexF a, vComplexF b)
{
vComplexF ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_add_ps(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_add_ps(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_add_ps(a.v,b.v);
#endif
#ifdef QPX
#error
#endif
return ret;
};
friend inline vComplexF operator - (vComplexF a, vComplexF b)
{
vComplexF ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_sub_ps(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_sub_ps(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_sub_ps(a.v,b.v);
#endif
#ifdef QPX
#error
#endif
return ret;
};
friend inline vComplexF operator * (vComplexF a, vComplexF b)
{
vComplexF ret;
//Multiplicationof (ak+ibk)*(ck+idk)
// a + i b can be stored as a data structure
//From intel optimisation reference
/*
movsldup xmm0, Src1; load real parts into the destination,
; a1, a1, a0, a0
movaps xmm1, src2; load the 2nd pair of complex values, ; i.e. d1, c1, d0, c0
mulps xmm0, xmm1; temporary results, a1d1, a1c1, a0d0, ; a0c0
shufps xmm1, xmm1, b1; reorder the real and imaginary ; parts, c1, d1, c0, d0
movshdup xmm2, Src1; load the imaginary parts into the ; destination, b1, b1, b0, b0
mulps xmm2, xmm1; temporary results, b1c1, b1d1, b0c0, ; b0d0
addsubps xmm0, xmm2; b1c1+a1d1, a1c1 -b1d1, b0c0+a0d
*/
#if defined (AVX1)|| defined (AVX2)
cvec ymm0,ymm1,ymm2;
ymm0 = _mm256_shuffle_ps(a.v,a.v,_MM_SHUFFLE(2,2,0,0)); // ymm0 <- ar ar,
ymm0 = _mm256_mul_ps(ymm0,b.v); // ymm0 <- ar bi, ar br
// FIXME AVX2 could MAC
ymm1 = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); // ymm1 <- br,bi
ymm2 = _mm256_shuffle_ps(a.v,a.v,_MM_SHUFFLE(3,3,1,1)); // ymm2 <- ai,ai
ymm1 = _mm256_mul_ps(ymm1,ymm2); // ymm1 <- br ai, ai bi
ret.v= _mm256_addsub_ps(ymm0,ymm1); // FIXME -- AVX2 could MAC
#endif
#ifdef SSE2
cvec ymm0,ymm1,ymm2;
ymm0 = _mm_shuffle_ps(a.v,a.v,_MM_SHUFFLE(2,2,0,0)); // ymm0 <- ar ar,
ymm0 = _mm_mul_ps(ymm0,b.v); // ymm0 <- ar bi, ar br
ymm1 = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); // ymm1 <- br,bi
ymm2 = _mm_shuffle_ps(a.v,a.v,_MM_SHUFFLE(3,3,1,1)); // ymm2 <- ai,ai
ymm1 = _mm_mul_ps(ymm1,ymm2); // ymm1 <- br ai, ai bi
ret.v= _mm_addsub_ps(ymm0,ymm1);
#endif
#ifdef AVX512
//
cvec vzero,ymm0,ymm1,real, imag;
vzero = _mm512_setzero();
ymm0 = _mm512_swizzle_ps(a.v, _MM_SWIZ_REG_CDAB); //
real = _mm512_mask_or_epi32(a.v, 0xAAAA,vzero, ymm0);
imag = _mm512_mask_sub_ps(a.v, 0x5555,vzero, ymm0);
ymm1 = _mm512_mul_ps(real, b.v);
ymm0 = _mm512_swizzle_ps(b.v, _MM_SWIZ_REG_CDAB); // OK
ret.v = _mm512_fmadd_ps(ymm0,imag,ymm1);
#endif
#ifdef QPX
ret.v = vec_mul(a.v,b.v);
#endif
return ret;
};
/////////////////////////////////////////////////////////////////
// Permute
/////////////////////////////////////////////////////////////////
friend inline void permute(vComplexF &y,vComplexF b,int perm){
switch (perm){
#if defined(AVX1)||defined(AVX2)
//HERE
// 4 complex=>2 permutes
// case 0 ABCD->BADC
// case 1 ABCD->CDAB
case 0: y.v = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2)); break;
case 1: y.v = _mm256_permute2f128_ps(b.v,b.v,0x01); break;
#endif
#ifdef SSE2
case 0: y.v = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2));break;
#endif
#ifdef AVX512
//#error should permute for 512
// 8 complex=>3 permutes
// case 0 ABCD EFGH -> BADC FEHG
// case 1 ABCD EFGH -> CDAB GHEF
// case 2 ABCD EFGH -> EFGH ABCD
case 0: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_CDAB); break; // OK
case 1: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_BADC); break; // OK
case 2: y.v = _mm512_permute4f128_ps(b.v, (_MM_PERM_ENUM)_MM_SHUFFLE(2,3,0,1)); break; // OK
#endif
#ifdef QPX
#error
#endif
default: exit(EXIT_FAILURE); break;
}
};
friend inline void vset(vComplexF &ret, Complex *a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_ps(a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
#endif
#ifdef SSE2
ret.v = _mm_set_ps(a[1].imag, a[1].real(),a[0].imag(),a[0].real());
#endif
#ifdef AVX512
ret.v = _mm512_set_ps(a[7].imag(),a[7].real(),a[6].imag(),a[6].real(),a[5].imag(),a[5].real(),a[4].imag(),a[4].real(),a[3].imag(),a[3].real(),a[2].imag(),a[2].real(),a[1].imag(),a[1].real(),a[0].imag(),a[0].real());
// Note v has a0 a1 a2 a3 a4 a5 a6 a7
#endif
#ifdef QPX
ret.v = {a[0].real(),a[0].imag(),a[1].real(),a[1].imag(),a[2].real(),a[2].imag(),a[3].real(),a[3].imag()};
#endif
}
///////////////////////
// Splat
///////////////////////
friend inline void vsplat(vComplexF &ret,ComplexF c){
float a= real(c);
float b= imag(c);
vsplat(ret,a,b);
}
friend inline void vstore(vComplexF &ret, std::complex<float> *a){
#if defined (AVX1)|| defined (AVX2)
_mm256_store_ps((float *)a,ret.v);
#endif
#ifdef SSE2
_mm_store_ps((float *)a,ret.v);
#endif
#ifdef AVX512
_mm512_store_ps((float *)a,ret.v);
//Note v has a3 a2 a1 a0
#endif
#ifdef QPX
printf("%s Not implemented\n",__func__);
exit(-1);
#endif
}
friend inline void vprefetch(const vComplexF &v)
{
_mm_prefetch((const char*)&v.v,_MM_HINT_T0);
}
friend inline void vsplat(vComplexF &ret,float a,float b){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_ps(b,a,b,a,b,a,b,a);
#endif
#ifdef SSE2
ret.v = _mm_set_ps(a,b,a,b);
#endif
#ifdef AVX512
ret.v = _mm512_set_ps(b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a);
#endif
#ifdef QPX
ret.v = {a,b,a,b};
#endif
}
friend inline ComplexF Reduce(const vComplexF & in)
{
#if defined (AVX1) || defined(AVX2)
__attribute__ ((aligned(32))) float c_[8];
_mm256_store_ps(c_,in.v);
return std::complex<float>(c_[0]+c_[2]+c_[4]+c_[6],c_[1]+c_[3]+c_[5]+c_[7]);
#endif
#ifdef AVX512
return std::complex<float>(_mm512_mask_reduce_add_ps(0x5555, in.v),_mm512_mask_reduce_add_ps(0xAAAA, in.v));
#endif
#ifdef QPX
#endif
}
friend inline vComplexF operator * (const Complex &a, vComplexF b){
vComplexF va;
vsplat(va,a);
return va*b;
}
friend inline vComplexF operator * (vComplexF b,const Complex &a){
vComplexF va;
vsplat(va,a);
return va*b;
}
friend inline vComplexF operator + (const Complex &a, vComplexF b){
vComplexF va;
vsplat(va,a);
return va+b;
}
friend inline vComplexF operator + (vComplexF b,const Complex &a){
vComplexF va;
vsplat(va,a);
return b+va;
}
friend inline vComplexF operator - (const Complex &a, vComplexF b){
vComplexF va;
vsplat(va,a);
return va-b;
}
friend inline vComplexF operator - (vComplexF b,const Complex &a){
vComplexF va;
vsplat(va,a);
return b-va;
}
// NB: Template the following on "type Complex" and then implement *,+,- for ComplexF, ComplexD, RealF, RealD above to
// get full generality of binops with scalars.
friend inline void mac (vComplexF *__restrict__ y,const Complex *__restrict__ a,const vComplexF *__restrict__ x){ *y = (*a)*(*x)+(*y); };
friend inline void mult(vComplexF *__restrict__ y,const Complex *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) * (*r); }
friend inline void sub (vComplexF *__restrict__ y,const Complex *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) - (*r); }
friend inline void add (vComplexF *__restrict__ y,const Complex *__restrict__ l,const vComplexF *__restrict__ r){ *y = (*l) + (*r); }
friend inline void mac (vComplexF *__restrict__ y,const vComplexF *__restrict__ a,const Complex *__restrict__ x){ *y = (*a)*(*x)+(*y); };
friend inline void mult(vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const Complex *__restrict__ r){ *y = (*l) * (*r); }
friend inline void sub (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const Complex *__restrict__ r){ *y = (*l) - (*r); }
friend inline void add (vComplexF *__restrict__ y,const vComplexF *__restrict__ l,const Complex *__restrict__ r){ *y = (*l) + (*r); }
///////////////////////
// Conjugate
///////////////////////
friend inline vComplexF conj(const vComplexF &in){
vComplexF ret ; vzero(ret);
#if defined (AVX1)|| defined (AVX2)
cvec tmp;
tmp = _mm256_addsub_ps(ret.v,_mm256_shuffle_ps(in.v,in.v,_MM_SHUFFLE(2,3,0,1))); // ymm1 <- br,bi
ret.v=_mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(2,3,0,1));
#endif
#ifdef SSE2
ret.v = _mm_addsub_ps(ret.v,in.v);
#endif
#ifdef AVX512
ret.v = _mm512_mask_sub_ps(in.v,0xaaaa,ret.v,in.v); // Zero out 0+real 0-imag
#endif
#ifdef QPX
exit(0); // not implemented
#endif
return ret;
}
// Unary negation
friend inline vComplexF operator -(const vComplexF &r) {
vComplexF ret;
vzero(ret);
ret = ret - r;
return ret;
}
// *=,+=,-= operators
inline vComplexF &operator *=(const vComplexF &r) {
*this = (*this)*r;
return *this;
}
inline vComplexF &operator +=(const vComplexF &r) {
*this = *this+r;
return *this;
}
inline vComplexF &operator -=(const vComplexF &r) {
*this = *this-r;
return *this;
}
public:
static int Nsimd(void) { return sizeof(cvec)/sizeof(float)/2;}
};
inline vComplexF localInnerProduct(const vComplexF & l, const vComplexF & r) { return conj(l)*r; }
typedef vComplexF vFComplex;
typedef vComplexF vComplex;
inline void zeroit(vComplexF &z){ vzero(z);}
inline vComplexF outerProduct(const vComplexF &l, const vComplexF& r)
{
return l*r;
}
}
#endif

262
Grid_vRealD.h Normal file
View File

@ -0,0 +1,262 @@
#ifndef VREALD_H
#define VREALD_H
#include "Grid.h"
namespace dpo{
class vRealD {
protected:
dvec v; // dvec is double precision vector
public:
vRealD(){};
friend inline void mult(vRealD * __restrict__ y,const vRealD * __restrict__ l,const vRealD *__restrict__ r) {*y = (*l) * (*r);}
friend inline void sub (vRealD * __restrict__ y,const vRealD * __restrict__ l,const vRealD *__restrict__ r) {*y = (*l) - (*r);}
friend inline void add (vRealD * __restrict__ y,const vRealD * __restrict__ l,const vRealD *__restrict__ r) {*y = (*l) + (*r);}
friend inline vRealD adj(const vRealD &in) { return in; }
friend inline vRealD conj(const vRealD &in){ return in; }
friend inline void mac (vRealD &y,const vRealD a,const vRealD x){
#if defined (AVX1) || defined (SSE2)
y = a*x+y;
#endif
#ifdef AVX2 // AVX 2 introduced FMA support. FMA4 eliminates a copy, but AVX only has FMA3
// accelerates multiply accumulate, but not general multiply add
y.v = _mm256_fmadd_pd(a.v,x.v,y.v);
#endif
#ifdef AVX512
// here precision of vector are still single
y.v = _mm512_fmadd_pd(a.v,x.v,y.v);
#endif
#ifdef QPX
y.v = vec_madd(a.v,x.v,y.v);
#endif
}
//////////////////////////////////
// Initialise to 1,0
//////////////////////////////////
friend inline void vone (vRealD &ret){ vsplat(ret,1.0);}
friend inline void vzero(vRealD &ret){ vsplat(ret,0.0);}
////////////////////////////////////
// Arithmetic operator overloads +,-,*
////////////////////////////////////
friend inline vRealD operator + (vRealD a, vRealD b)
{
vRealD ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_add_pd(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_add_pd(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_add_pd(a.v,b.v);
#endif
#ifdef QPX
ret.v = vec_add(a.v,b.v);
#endif
return ret;
};
friend inline vRealD operator - (vRealD a, vRealD b)
{
vRealD ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_sub_pd(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_sub_pd(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_sub_pd(a.v,b.v);
#endif
#ifdef QPX
ret.v = vec_sub(a.v,b.v);
#endif
return ret;
};
friend inline vRealD operator * (vRealD a, vRealD b)
{
vRealD ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_mul_pd(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_mul_pd(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_mul_pd(a.v,b.v);
#endif
#ifdef QPX
ret.v = vec_mul(a.v,b.v);
#endif
return ret;
};
// Permute plans
// Permute 0 every ABCDEFGH -> BA DC FE HG
// Permute 1 every ABCDEFGH -> CD AB GH EF
// Permute 2 every ABCDEFGH -> EFGH ABCD
// Permute 3 possible on longer iVector lengths (512bit = 8 double = 16 single)
// Permute 4 possible on half precision @512bit vectors.
friend inline void permute(vRealD &y,vRealD b,int perm){
switch (perm){
// 4 doubles=>2 permutes
#if defined(AVX1)||defined(AVX2)
case 0: y.v = _mm256_shuffle_pd(b.v,b.v,0x5); break;
case 1: y.v = _mm256_permute2f128_pd(b.v,b.v,0x01); break;
#endif
#ifdef SSE2
case 0: y.v = _mm_shuffle_pd(b.v,b.v,0x1); break;
#endif
#ifdef AVX512
// 8 double => 3 permutes
// Permute 0 every abcd efgh -> badc fehg
// Permute 1 every abcd efgh -> cdab ghef
// Permute 2 every abcd efgh -> efgh abcd
// NOTE: mm_512_permutex_pd not implemented
// NOTE: ignore warning
case 0: y.v = _mm512_swizzle_pd(b.v,_MM_SWIZ_REG_CDAB); break;
case 1: y.v = _mm512_swizzle_pd(b.v,_MM_SWIZ_REG_BADC); break;
case 2: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(1,0,3,2)); break;
#endif
#ifdef QPX
#error
#endif
default: exit(EXIT_FAILURE); break;
}
};
// gona be bye bye
void vload(dvec& a){
this->v = a;
}
dvec vget(){
return this->v ;
}
friend inline void vsplat(vRealD &ret,double a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_pd(a,a,a,a);
#endif
#ifdef SSE2
ret.v = _mm_set_pd(a,a);
#endif
#ifdef AVX512
ret.v = _mm512_set1_pd(a);
#endif
#ifdef QPX
ret.v = {a,a,a,a};
#endif
}
friend inline void vset(vRealD &ret, double *a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_pd(a[3],a[2],a[1],a[0]);
#endif
#ifdef SSE2
ret.v = _mm_set_pd(a[0],a[1]);
#endif
#ifdef AVX512
ret.v = _mm512_set_pd(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
// Note v has a0 a1 a2 a3 a4 a5 a6 a7
#endif
#ifdef QPX
ret.v = {a[0],a[1],a[2],a[3]};
#endif
}
friend inline void vstore(vRealD &ret, double *a){
#if defined (AVX1)|| defined (AVX2)
_mm256_store_pd(a,ret.v);
#endif
#ifdef SSE2
_mm_store_pd(a,ret.v);
#endif
#ifdef AVX512
_mm512_store_pd(a,ret.v);
// Note v has a7 a6 a5ba4 a3 a2 a1 a0
#endif
#ifdef QPX
printf("%s Not implemented\n",__func__);
exit(-1);
#endif
}
friend inline void vprefetch(const vRealD &v)
{
_mm_prefetch((const char*)&v.v,_MM_HINT_T0);
}
// Unary negation
friend inline vRealD operator -(const vRealD &r) {
vRealD ret;
vzero(ret);
ret = ret - r;
return ret;
}
friend inline RealD Reduce(const vRealD & in)
{
#if defined (AVX1) || defined(AVX2)
typedef union {
uint64_t l;
double d;
} my_conv_t;
my_conv_t converter;
// more reduce_add
/*
__attribute__ ((aligned(32))) double c_[16];
__m256d tmp = _mm256_permute2f128_pd(in.v,in.v,0x01); // tmp 1032; in= 3210
__m256d hadd = _mm256_hadd_pd(in.v,tmp); // hadd = 1+0,3+2,3+2,1+0
tmp = _mm256_permute2f128_pd(hadd,hadd,0x01);// tmp = 3+2,1+0,1+0,3+2
hadd = _mm256_hadd_pd(tmp,tmp); // tmp = 3+2+1+0,3+2+1+0,1+0+3+2,1+0+3+2
_mm256_store_pd(c_,hadd);ô
return c[0]
*/
__m256d tmp = _mm256_permute2f128_pd(in.v,in.v,0x01); // tmp 1032; in= 3210
__m256d hadd = _mm256_hadd_pd(in.v,tmp); // hadd = 1+0,3+2,3+2,1+0
hadd = _mm256_hadd_pd(hadd,hadd); // hadd = 1+0+3+2...
converter.l = _mm256_extract_epi64(hadd,0);
return converter.d;
#endif
#ifdef AVX512
return _mm512_reduce_add_pd(in.v);
/*
__attribute__ ((aligned(32))) double c_[8];
_mm512_store_pd(c_,in.v);
return c_[0]+c_[1]+c_[2]+c_[3]+c_[4]+c_[5]+c_[6]+c_[7];
*/
#endif
#ifdef QPX
#endif
}
// *=,+=,-= operators
inline vRealD &operator *=(const vRealD &r) {
*this = (*this)*r;
return *this;
}
inline vRealD &operator +=(const vRealD &r) {
*this = *this+r;
return *this;
}
inline vRealD &operator -=(const vRealD &r) {
*this = *this-r;
return *this;
}
public:
static int Nsimd(void) { return sizeof(dvec)/sizeof(double);}
};
inline vRealD localInnerProduct(const vRealD & l, const vRealD & r) { return conj(l)*r; }
inline void zeroit(vRealD &z){ vzero(z);}
inline vRealD outerProduct(const vRealD &l, const vRealD& r)
{
return l*r;
}
}
#endif

272
Grid_vRealF.h Normal file
View File

@ -0,0 +1,272 @@
#ifndef VREALF_H
#define VREALF_H
#include "Grid.h"
namespace dpo {
class vRealF {
protected:
fvec v;
public:
vRealF(){};
////////////////////////////////////
// Arithmetic operator overloads +,-,*
////////////////////////////////////
friend inline vRealF operator + ( vRealF a, vRealF b)
{
vRealF ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_add_ps(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_add_ps(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_add_ps(a.v,b.v);
#endif
#ifdef QPX
vector4double aa,bb,cc;
aa = vec_lda(0,(float *)&a);
bb = vec_lda(0,(float *)&b);
cc = vec_add(aa,bb);
vec_sta(cc,0,(float *)&ret.v);
#endif
return ret;
};
friend inline vRealF operator - ( vRealF a, vRealF b)
{
vRealF ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_sub_ps(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_sub_ps(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_sub_ps(a.v,b.v);
#endif
#ifdef QPX
vector4double aa,bb,cc;
aa = vec_lda(0,(float *)&a);
bb = vec_lda(0,(float *)&b);
cc = vec_sub(aa,bb);
vec_sta(cc,0,(float *)&ret.v);
#endif
return ret;
};
friend inline vRealF operator * ( vRealF a, vRealF b)
{
vRealF ret;
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_mul_ps(a.v,b.v);
#endif
#ifdef SSE2
ret.v = _mm_mul_ps(a.v,b.v);
#endif
#ifdef AVX512
ret.v = _mm512_mul_ps(a.v,b.v);
#endif
#ifdef QPX
vector4double aa,bb,cc; // QPX single we are forced to load as this promotes single mem->double regs.
aa = vec_lda(0,(float *)&a);
bb = vec_lda(0,(float *)&b);
cc = vec_mul(aa,bb);
vec_sta(cc,0,(float *)&ret.v);
#endif
return ret;
};
///////////////////////////////////////////////
// mult, sub, add, adj,conj, mac functions
///////////////////////////////////////////////
friend inline void mult(vRealF * __restrict__ y,const vRealF * __restrict__ l,const vRealF *__restrict__ r) {*y = (*l) * (*r);}
friend inline void sub (vRealF * __restrict__ y,const vRealF * __restrict__ l,const vRealF *__restrict__ r) {*y = (*l) - (*r);}
friend inline void add (vRealF * __restrict__ y,const vRealF * __restrict__ l,const vRealF *__restrict__ r) {*y = (*l) + (*r);}
friend inline vRealF adj(const vRealF &in) { return in; }
friend inline vRealF conj(const vRealF &in){ return in; }
friend inline void mac (vRealF &y,const vRealF a,const vRealF x){
#if defined (AVX1) || defined (SSE2)
y = a*x+y;
#endif
#ifdef AVX2 // AVX 2 introduced FMA support. FMA4 eliminates a copy, but AVX only has FMA3
// accelerates multiply accumulate, but not general multiply add
y.v = _mm256_fmadd_ps(a.v,x.v,y.v);
#endif
#ifdef AVX512
y.v = _mm512_fmadd_ps(a.v,x.v,y.v);
#endif
#ifdef QPX
vector4double aa,xx,yy; // QPX single we are forced to load as this promotes single mem->double regs.
aa = vec_lda(0,(float *)&a.v);
xx = vec_lda(0,(float *)&x.v);
yy = vec_lda(0,(float *)&y.v);
yy = vec_madd(aa,xx,yy);
vec_sta(yy,0,(float *)&y.v);
#endif
}
//////////////////////////////////
// Initialise to 1,0,i
//////////////////////////////////
friend inline void vone (vRealF &ret){vsplat(ret,1.0);}
friend inline void vzero(vRealF &ret){vsplat(ret,0.0);}
//////////////////////////////////////////////////////////
// Permute
// Permute 0 every ABCDEFGH -> BA DC FE HG
// Permute 1 every ABCDEFGH -> CD AB GH EF
// Permute 2 every ABCDEFGH -> EFGH ABCD
// Permute 3 possible on longer iVector lengths (512bit = 8 double = 16 single)
// Permute 4 possible on half precision @512bit vectors.
//////////////////////////////////////////////////////////
friend inline void permute(vRealF &y,vRealF b,int perm){
switch (perm){
// 8 floats=>3 permutes
#if defined(AVX1)||defined(AVX2)
case 0: y.v = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); break;
case 1: y.v = _mm256_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2)); break;
case 2: y.v = _mm256_permute2f128_ps(b.v,b.v,0x01); break;
#endif
#ifdef SSE2
case 0: y.v = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(2,3,0,1)); break;
case 1: y.v = _mm_shuffle_ps(b.v,b.v,_MM_SHUFFLE(1,0,3,2));break;
#endif
#ifdef AVX512
// 16 floats=> permutes
// Permute 0 every abcd efgh ijkl mnop -> badc fehg jilk nmpo
// Permute 1 every abcd efgh ijkl mnop -> cdab ghef jkij opmn
// Permute 2 every abcd efgh ijkl mnop -> efgh abcd mnop ijkl
// Permute 3 every abcd efgh ijkl mnop -> ijkl mnop abcd efgh
//#error not implemented should do something
case 0: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_CDAB); break;
case 1: y.v = _mm512_swizzle_ps(b.v,_MM_SWIZ_REG_BADC); break;
case 2: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(2,3,0,1)); break;
case 3: y.v = _mm512_permute4f128_ps(b.v,(_MM_PERM_ENUM)_MM_SHUFFLE(1,0,3,2)); break;
#endif
#ifdef QPX
#error not implemented
#endif
default: exit(EXIT_FAILURE); break;
}
};
/////////////////////////////////////////////////////
// Broadcast a value across Nsimd copies.
/////////////////////////////////////////////////////
friend inline void vsplat(vRealF &ret,float a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_ps(a,a,a,a,a,a,a,a);
#endif
#ifdef SSE2
ret.v = _mm_set_ps(a,a,a,a);
#endif
#ifdef AVX512
//ret.v = _mm512_set_ps(a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a);
ret.v = _mm512_set1_ps(a);
#endif
#ifdef QPX
ret.v = {a,a,a,a};
#endif
}
friend inline void vset(vRealF &ret, float *a){
#if defined (AVX1)|| defined (AVX2)
ret.v = _mm256_set_ps(a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
#endif
#ifdef SSE2
ret.v = _mm_set_ps(a[0],a[1],a[2],a[3]);
#endif
#ifdef AVX512
ret.v = _mm512_set_ps( a[15],a[14],a[13],a[12],a[11],a[10],a[9],a[8],
a[7],a[6],a[5],a[4],a[3],a[2],a[1],a[0]);
// Note v has a0 a1 a2 a3 a4 a5 a6 a7
#endif
#ifdef QPX
ret.v = {a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7]};
#endif
}
friend inline void vstore(vRealF &ret, float *a){
#if defined (AVX1)|| defined (AVX2)
_mm256_store_ps(a,ret.v);
#endif
#ifdef SSE2
_mm_store_ps(a,ret.v);
#endif
#ifdef AVX512
_mm512_store_ps(a,ret.v);
// Note v has a7 a6 a5ba4 a3 a2 a1 a0
#endif
#ifdef QPX
printf("%s Not implemented\n",__func__);
exit(-1);
#endif
}
friend inline void vprefetch(const vRealF &v)
{
_mm_prefetch((const char*)&v.v,_MM_HINT_T0);
}
// Unary negation
friend inline vRealF operator -(const vRealF &r) {
vRealF ret;
vzero(ret);
ret = ret - r;
return ret;
}
friend inline RealF Reduce(const vRealF & in)
{
#if defined (AVX1) || defined(AVX2)
__attribute__ ((aligned(32))) float c_[16];
__m256 tmp = _mm256_permute2f128_ps(in.v,in.v,0x01);
__m256 hadd = _mm256_hadd_ps(in.v,tmp);
tmp = _mm256_permute2f128_ps(hadd,hadd,0x01);
hadd = _mm256_hadd_ps(tmp,tmp);
_mm256_store_ps(c_,hadd);
return (float)c_[0];
#endif
#ifdef AVX512
return _mm512_reduce_add_ps(in.v);
/*
__attribute__ ((aligned(64))) float c_[16];
_mm512_store_ps(c_,in.v);
return c_[0]+c_[1]+c_[2]+c_[3]+c_[4]+c_[5]+c_[6]+c_[7]
+c_[8]+c_[9]+c_[10]+c_[11]+c_[12]+c_[13]+c_[14]+c_[15];
*/
#endif
#ifdef QPX
#endif
}
// *=,+=,-= operators
inline vRealF &operator *=(const vRealF &r) {
*this = (*this)*r;
return *this;
}
inline vRealF &operator +=(const vRealF &r) {
*this = *this+r;
return *this;
}
inline vRealF &operator -=(const vRealF &r) {
*this = *this-r;
return *this;
}
public:
static int Nsimd(void) { return sizeof(fvec)/sizeof(float);}
};
inline vRealF localInnerProduct(const vRealF & l, const vRealF & r) { return conj(l)*r; }
inline void zeroit(vRealF &z){ vzero(z);}
inline vRealF outerProduct(const vRealF &l, const vRealF& r)
{
return l*r;
}
}
#endif

2
configure.ac Normal file
View File

@ -0,0 +1,2 @@
AC_INIT([Grid], [1.0])
AC_OUTPUT