mirror of
https://github.com/paboyle/Grid.git
synced 2025-08-23 22:47:10 +01:00
merged sycl to feature-gpt
This commit is contained in:
@@ -26,6 +26,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
#include <Grid/lattice/Lattice_view.h>
|
||||
#include <Grid/lattice/Lattice_base.h>
|
||||
#include <Grid/lattice/Lattice_conformable.h>
|
||||
#include <Grid/lattice/Lattice_ET.h>
|
||||
|
@@ -92,12 +92,18 @@ const lobj & eval(const uint64_t ss, const LatticeView<lobj> &arg)
|
||||
{
|
||||
return arg[ss];
|
||||
}
|
||||
|
||||
// What needs this?
|
||||
// Cannot be legal on accelerator
|
||||
// Comparison must convert
|
||||
#if 1
|
||||
template <class lobj> accelerator_inline
|
||||
const lobj & eval(const uint64_t ss, const Lattice<lobj> &arg)
|
||||
{
|
||||
auto view = arg.AcceleratorView(ViewRead);
|
||||
auto view = arg.View(AcceleratorRead);
|
||||
return view[ss];
|
||||
}
|
||||
#endif
|
||||
|
||||
///////////////////////////////////////////////////
|
||||
// handle nodes in syntax tree- eval one operand
|
||||
@@ -180,16 +186,12 @@ inline void CBFromExpression(int &cb, const T1 &lat) // Lattice leaf
|
||||
cb = lat.Checkerboard();
|
||||
}
|
||||
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
|
||||
inline void CBFromExpression(int &cb, const T1 ¬lat) // non-lattice leaf
|
||||
{
|
||||
}
|
||||
|
||||
inline void CBFromExpression(int &cb, const T1 ¬lat) {} // non-lattice leaf
|
||||
template <typename Op, typename T1> inline
|
||||
void CBFromExpression(int &cb,const LatticeUnaryExpression<Op, T1> &expr)
|
||||
{
|
||||
CBFromExpression(cb, expr.arg1); // recurse AST
|
||||
}
|
||||
|
||||
template <typename Op, typename T1, typename T2> inline
|
||||
void CBFromExpression(int &cb,const LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
{
|
||||
@@ -204,6 +206,68 @@ inline void CBFromExpression(int &cb, const LatticeTrinaryExpression<Op, T1, T2,
|
||||
CBFromExpression(cb, expr.arg3); // recurse AST
|
||||
}
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// ViewOpen
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
|
||||
inline void ExpressionViewOpen(T1 &lat) // Lattice leaf
|
||||
{
|
||||
lat.ViewOpen(AcceleratorRead);
|
||||
}
|
||||
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
|
||||
inline void ExpressionViewOpen(T1 ¬lat) {}
|
||||
|
||||
template <typename Op, typename T1> inline
|
||||
void ExpressionViewOpen(LatticeUnaryExpression<Op, T1> &expr)
|
||||
{
|
||||
ExpressionViewOpen(expr.arg1); // recurse AST
|
||||
}
|
||||
|
||||
template <typename Op, typename T1, typename T2> inline
|
||||
void ExpressionViewOpen(LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
{
|
||||
ExpressionViewOpen(expr.arg1); // recurse AST
|
||||
ExpressionViewOpen(expr.arg2); // recurse AST
|
||||
}
|
||||
template <typename Op, typename T1, typename T2, typename T3>
|
||||
inline void ExpressionViewOpen(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
{
|
||||
ExpressionViewOpen(expr.arg1); // recurse AST
|
||||
ExpressionViewOpen(expr.arg2); // recurse AST
|
||||
ExpressionViewOpen(expr.arg3); // recurse AST
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// ViewClose
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
template <class T1,typename std::enable_if<is_lattice<T1>::value, T1>::type * = nullptr>
|
||||
inline void ExpressionViewClose( T1 &lat) // Lattice leaf
|
||||
{
|
||||
lat.ViewClose();
|
||||
}
|
||||
template <class T1,typename std::enable_if<!is_lattice<T1>::value, T1>::type * = nullptr>
|
||||
inline void ExpressionViewClose(T1 ¬lat) {}
|
||||
|
||||
template <typename Op, typename T1> inline
|
||||
void ExpressionViewClose(LatticeUnaryExpression<Op, T1> &expr)
|
||||
{
|
||||
ExpressionViewClose(expr.arg1); // recurse AST
|
||||
}
|
||||
template <typename Op, typename T1, typename T2> inline
|
||||
void ExpressionViewClose(LatticeBinaryExpression<Op, T1, T2> &expr)
|
||||
{
|
||||
ExpressionViewClose(expr.arg1); // recurse AST
|
||||
ExpressionViewClose(expr.arg2); // recurse AST
|
||||
}
|
||||
template <typename Op, typename T1, typename T2, typename T3>
|
||||
inline void ExpressionViewClose(LatticeTrinaryExpression<Op, T1, T2, T3> &expr)
|
||||
{
|
||||
ExpressionViewClose(expr.arg1); // recurse AST
|
||||
ExpressionViewClose(expr.arg2); // recurse AST
|
||||
ExpressionViewClose(expr.arg3); // recurse AST
|
||||
}
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Unary operators and funcs
|
||||
////////////////////////////////////////////
|
||||
|
@@ -37,9 +37,9 @@ NAMESPACE_BEGIN(Grid);
|
||||
template<class obj1,class obj2,class obj3> inline
|
||||
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
auto rhs_v = rhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
conformable(ret,rhs);
|
||||
conformable(lhs,rhs);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
@@ -56,9 +56,9 @@ void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
conformable(lhs,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
auto rhs_v = rhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -73,9 +73,9 @@ void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
conformable(lhs,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
auto rhs_v = rhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -89,9 +89,9 @@ void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
conformable(lhs,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
auto rhs_v = rhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -108,8 +108,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(lhs,ret);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
mult(&tmp,&lhs_v(ss),&rhs);
|
||||
@@ -121,8 +121,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(ret,lhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -135,8 +135,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(ret,lhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -148,8 +148,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
conformable(lhs,ret);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto lhs_t=lhs_v(ss);
|
||||
@@ -165,8 +165,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto rhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( rhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto rhs_t=rhs_v(ss);
|
||||
@@ -179,8 +179,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto rhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( rhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto rhs_t=rhs_v(ss);
|
||||
@@ -193,8 +193,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto rhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( rhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto rhs_t=rhs_v(ss);
|
||||
@@ -206,8 +206,8 @@ template<class obj1,class obj2,class obj3> inline
|
||||
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
conformable(ret,rhs);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto rhs_v = lhs.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( rhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for(ss,rhs_v.size(),obj1::Nsimd(),{
|
||||
decltype(coalescedRead(obj1())) tmp;
|
||||
auto rhs_t=rhs_v(ss);
|
||||
@@ -221,9 +221,9 @@ void axpy(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &x,const Lattice<vobj> &
|
||||
ret.Checkerboard() = x.Checkerboard();
|
||||
conformable(ret,x);
|
||||
conformable(x,y);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto x_v = x.AcceleratorView(ViewRead);
|
||||
auto y_v = y.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( x_v , x, AcceleratorRead);
|
||||
autoView( y_v , y, AcceleratorRead);
|
||||
accelerator_for(ss,x_v.size(),vobj::Nsimd(),{
|
||||
auto tmp = a*x_v(ss)+y_v(ss);
|
||||
coalescedWrite(ret_v[ss],tmp);
|
||||
@@ -234,9 +234,9 @@ void axpby(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const Lattice
|
||||
ret.Checkerboard() = x.Checkerboard();
|
||||
conformable(ret,x);
|
||||
conformable(x,y);
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
auto x_v = x.AcceleratorView(ViewRead);
|
||||
auto y_v = y.AcceleratorView(ViewRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( x_v , x, AcceleratorRead);
|
||||
autoView( y_v , y, AcceleratorRead);
|
||||
accelerator_for(ss,x_v.size(),vobj::Nsimd(),{
|
||||
auto tmp = a*x_v(ss)+b*y_v(ss);
|
||||
coalescedWrite(ret_v[ss],tmp);
|
||||
|
@@ -29,6 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
#define STREAMING_STORES
|
||||
@@ -37,161 +38,6 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
extern int GridCshiftPermuteMap[4][16];
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// Base class which can be used by traits to pick up behaviour
|
||||
///////////////////////////////////////////////////////////////////
|
||||
class LatticeBase {};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Conformable checks; same instance of Grid required
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
void accelerator_inline conformable(GridBase *lhs,GridBase *rhs)
|
||||
{
|
||||
assert(lhs == rhs);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Advise the LatticeAccelerator class
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
enum LatticeAcceleratorAdvise {
|
||||
AdviseInfrequentUse = 0x1, // Advise that the data is used infrequently. This can
|
||||
// significantly influence performance of bulk storage.
|
||||
AdviseReadMostly = 0x2, // Data will mostly be read. On some architectures
|
||||
// enables read-only copies of memory to be kept on
|
||||
// host and device.
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// View Access Mode
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
enum ViewMode {
|
||||
ViewRead = 0x1,
|
||||
ViewWrite = 0x2,
|
||||
ViewReadWrite = 0x3
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Minimal base class containing only data valid to access from accelerator
|
||||
// _odata will be a managed pointer in CUDA
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Force access to lattice through a view object.
|
||||
// prevents writing of code that will not offload to GPU, but perhaps annoyingly
|
||||
// strict since host could could in principle direct access through the lattice object
|
||||
// Need to decide programming model.
|
||||
#define LATTICE_VIEW_STRICT
|
||||
template<class vobj> class LatticeAccelerator : public LatticeBase
|
||||
{
|
||||
protected:
|
||||
GridBase *_grid;
|
||||
int checkerboard;
|
||||
vobj *_odata; // A managed pointer
|
||||
uint64_t _odata_size;
|
||||
public:
|
||||
accelerator_inline LatticeAccelerator() : checkerboard(0), _odata(nullptr), _odata_size(0), _grid(nullptr) { };
|
||||
accelerator_inline uint64_t oSites(void) const { return _odata_size; };
|
||||
accelerator_inline int Checkerboard(void) const { return checkerboard; };
|
||||
accelerator_inline int &Checkerboard(void) { return this->checkerboard; }; // can assign checkerboard on a container, not a view
|
||||
accelerator_inline void Conformable(GridBase * &grid) const
|
||||
{
|
||||
if (grid) conformable(grid, _grid);
|
||||
else grid = _grid;
|
||||
};
|
||||
|
||||
accelerator_inline void AcceleratorPrefetch(int accessMode = ViewReadWrite) { // will use accessMode in future
|
||||
gridAcceleratorPrefetch(_odata,_odata_size*sizeof(vobj));
|
||||
};
|
||||
|
||||
accelerator_inline void HostPrefetch(int accessMode = ViewReadWrite) { // will use accessMode in future
|
||||
#ifdef GRID_NVCC
|
||||
#ifndef __CUDA_ARCH__ // only on host
|
||||
//cudaMemPrefetchAsync(_odata,_odata_size*sizeof(vobj),cudaCpuDeviceId);
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// A View class which provides accessor to the data.
|
||||
// This will be safe to call from accelerator_for and is trivially copy constructible
|
||||
// The copy constructor for this will need to be used by device lambda functions
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj>
|
||||
class LatticeView : public LatticeAccelerator<vobj>
|
||||
{
|
||||
public:
|
||||
|
||||
|
||||
// Rvalue
|
||||
#ifdef __CUDA_ARCH__
|
||||
accelerator_inline const typename vobj::scalar_object operator()(size_t i) const { return coalescedRead(this->_odata[i]); }
|
||||
#else
|
||||
accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; }
|
||||
#endif
|
||||
|
||||
accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
accelerator_inline vobj & operator[](size_t i) { return this->_odata[i]; };
|
||||
|
||||
accelerator_inline uint64_t begin(void) const { return 0;};
|
||||
accelerator_inline uint64_t end(void) const { return this->_odata_size; };
|
||||
accelerator_inline uint64_t size(void) const { return this->_odata_size; };
|
||||
|
||||
LatticeView(const LatticeAccelerator<vobj> &refer_to_me) : LatticeAccelerator<vobj> (refer_to_me)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Lattice expression types used by ET to assemble the AST
|
||||
//
|
||||
// Need to be able to detect code paths according to the whether a lattice object or not
|
||||
// so introduce some trait type things
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class LatticeExpressionBase {};
|
||||
|
||||
template <typename T> using is_lattice = std::is_base_of<LatticeBase, T>;
|
||||
template <typename T> using is_lattice_expr = std::is_base_of<LatticeExpressionBase,T >;
|
||||
|
||||
template<class T, bool isLattice> struct ViewMapBase { typedef T Type; };
|
||||
template<class T> struct ViewMapBase<T,true> { typedef LatticeView<typename T::vector_object> Type; };
|
||||
template<class T> using ViewMap = ViewMapBase<T,std::is_base_of<LatticeBase, T>::value >;
|
||||
|
||||
template <typename Op, typename _T1>
|
||||
class LatticeUnaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
LatticeUnaryExpression(Op _op,const _T1 &_arg1) : op(_op), arg1(_arg1) {};
|
||||
};
|
||||
|
||||
template <typename Op, typename _T1, typename _T2>
|
||||
class LatticeBinaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
typedef typename ViewMap<_T2>::Type T2;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
T2 arg2;
|
||||
LatticeBinaryExpression(Op _op,const _T1 &_arg1,const _T2 &_arg2) : op(_op), arg1(_arg1), arg2(_arg2) {};
|
||||
};
|
||||
|
||||
template <typename Op, typename _T1, typename _T2, typename _T3>
|
||||
class LatticeTrinaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
typedef typename ViewMap<_T2>::Type T2;
|
||||
typedef typename ViewMap<_T3>::Type T3;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
T2 arg2;
|
||||
T3 arg3;
|
||||
LatticeTrinaryExpression(Op _op,const _T1 &_arg1,const _T2 &_arg2,const _T3 &_arg3) : op(_op), arg1(_arg1), arg2(_arg2), arg3(_arg3) {};
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// The real lattice class, with normal copy and assignment semantics.
|
||||
// This contains extra (host resident) grid pointer data that may be accessed by host code
|
||||
@@ -248,31 +94,25 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// Can use to make accelerator dirty without copy from host ; useful for temporaries "dont care" prev contents
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
void SetViewMode(ViewMode mode) {
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
|
||||
accessor.ViewClose();
|
||||
}
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// Return a view object that may be dereferenced in site loops.
|
||||
// The view is trivially copy constructible and may be copied to an accelerator device
|
||||
// in device lambdas
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
LatticeView<vobj> View (void) const // deprecated, should pick AcceleratorView for accelerator_for
|
||||
{ // and HostView for thread_for
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this));
|
||||
|
||||
LatticeView<vobj> View (ViewMode mode) const
|
||||
{
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
|
||||
return accessor;
|
||||
}
|
||||
|
||||
LatticeView<vobj> AcceleratorView(int mode = ViewReadWrite) const
|
||||
{
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this));
|
||||
//accessor.AcceleratorPrefetch(mode);
|
||||
return accessor;
|
||||
}
|
||||
|
||||
LatticeView<vobj> HostView(int mode = ViewReadWrite) const
|
||||
{
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this));
|
||||
//accessor.HostPrefetch(mode);
|
||||
return accessor;
|
||||
}
|
||||
|
||||
~Lattice() {
|
||||
if ( this->_odata_size ) {
|
||||
dealloc();
|
||||
@@ -292,12 +132,16 @@ public:
|
||||
CBFromExpression(cb,expr);
|
||||
assert( (cb==Odd) || (cb==Even));
|
||||
this->checkerboard=cb;
|
||||
|
||||
auto me = AcceleratorView(ViewWrite);
|
||||
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
auto tmp = eval(ss,expr);
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
return *this;
|
||||
}
|
||||
template <typename Op, typename T1,typename T2> inline Lattice<vobj> & operator=(const LatticeBinaryExpression<Op,T1,T2> &expr)
|
||||
@@ -312,11 +156,15 @@ public:
|
||||
assert( (cb==Odd) || (cb==Even));
|
||||
this->checkerboard=cb;
|
||||
|
||||
auto me = AcceleratorView(ViewWrite);
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
auto tmp = eval(ss,expr);
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
return *this;
|
||||
}
|
||||
template <typename Op, typename T1,typename T2,typename T3> inline Lattice<vobj> & operator=(const LatticeTrinaryExpression<Op,T1,T2,T3> &expr)
|
||||
@@ -330,11 +178,15 @@ public:
|
||||
CBFromExpression(cb,expr);
|
||||
assert( (cb==Odd) || (cb==Even));
|
||||
this->checkerboard=cb;
|
||||
auto me = AcceleratorView(ViewWrite);
|
||||
auto exprCopy = expr;
|
||||
ExpressionViewOpen(exprCopy);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
accelerator_for(ss,me.size(),1,{
|
||||
auto tmp = eval(ss,expr);
|
||||
auto tmp = eval(ss,exprCopy);
|
||||
vstream(me[ss],tmp);
|
||||
});
|
||||
me.ViewClose();
|
||||
ExpressionViewClose(exprCopy);
|
||||
return *this;
|
||||
}
|
||||
//GridFromExpression is tricky to do
|
||||
@@ -385,10 +237,11 @@ public:
|
||||
}
|
||||
|
||||
template<class sobj> inline Lattice<vobj> & operator = (const sobj & r){
|
||||
auto me = View();
|
||||
auto me = View(CpuWrite);
|
||||
thread_for(ss,me.size(),{
|
||||
me[ss] = r;
|
||||
me[ss]= r;
|
||||
});
|
||||
me.ViewClose();
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -398,11 +251,12 @@ public:
|
||||
///////////////////////////////////////////
|
||||
// user defined constructor
|
||||
///////////////////////////////////////////
|
||||
Lattice(GridBase *grid) {
|
||||
Lattice(GridBase *grid,ViewMode mode=AcceleratorWriteDiscard) {
|
||||
this->_grid = grid;
|
||||
resize(this->_grid->oSites());
|
||||
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
|
||||
this->checkerboard=0;
|
||||
SetViewMode(mode);
|
||||
}
|
||||
|
||||
// virtual ~Lattice(void) = default;
|
||||
@@ -440,11 +294,12 @@ public:
|
||||
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
|
||||
conformable(*this,r);
|
||||
this->checkerboard = r.Checkerboard();
|
||||
auto me = AcceleratorView(ViewWrite);
|
||||
auto him= r.AcceleratorView(ViewRead);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
auto him= r.View(AcceleratorRead);
|
||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(me[ss],him(ss));
|
||||
});
|
||||
me.ViewClose(); him.ViewClose();
|
||||
return *this;
|
||||
}
|
||||
|
||||
@@ -454,11 +309,12 @@ public:
|
||||
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
|
||||
this->checkerboard = r.Checkerboard();
|
||||
conformable(*this,r);
|
||||
auto me = AcceleratorView(ViewWrite);
|
||||
auto him= r.AcceleratorView(ViewRead);
|
||||
auto me = View(AcceleratorWriteDiscard);
|
||||
auto him= r.View(AcceleratorRead);
|
||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(me[ss],him(ss));
|
||||
});
|
||||
me.ViewClose(); him.ViewClose();
|
||||
return *this;
|
||||
}
|
||||
///////////////////////////////////////////
|
||||
|
@@ -51,34 +51,18 @@ template<class VField, class Matrix>
|
||||
void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
{
|
||||
typedef decltype(basis[0]) Field;
|
||||
typedef decltype(basis[0].View()) View;
|
||||
auto tmp_v = basis[0].AcceleratorView(ViewReadWrite);
|
||||
Vector<View> basis_v(basis.size(),tmp_v);
|
||||
typedef typename std::remove_reference<decltype(tmp_v[0])>::type vobj;
|
||||
typedef decltype(basis[0].View(AcceleratorRead)) View;
|
||||
|
||||
Vector<View> basis_v; basis_v.reserve(basis.size());
|
||||
GridBase* grid = basis[0].Grid();
|
||||
|
||||
for(int k=0;k<basis.size();k++){
|
||||
basis_v[k] = basis[k].AcceleratorView(ViewReadWrite);
|
||||
basis_v.push_back(basis[k].View(AcceleratorWrite));
|
||||
}
|
||||
|
||||
#ifndef GRID_NVCC
|
||||
thread_region
|
||||
{
|
||||
std::vector < vobj > B(Nm); // Thread private
|
||||
thread_for_in_region(ss, grid->oSites(),{
|
||||
for(int j=j0; j<j1; ++j) B[j]=0.;
|
||||
|
||||
for(int j=j0; j<j1; ++j){
|
||||
for(int k=k0; k<k1; ++k){
|
||||
B[j] +=Qt(j,k) * basis_v[k][ss];
|
||||
}
|
||||
}
|
||||
for(int j=j0; j<j1; ++j){
|
||||
basis_v[j][ss] = B[j];
|
||||
}
|
||||
});
|
||||
}
|
||||
#else
|
||||
|
||||
View *basis_vp = &basis_v[0];
|
||||
|
||||
int nrot = j1-j0;
|
||||
if (!nrot) // edge case not handled gracefully by Cuda
|
||||
return;
|
||||
@@ -86,6 +70,8 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
uint64_t oSites =grid->oSites();
|
||||
uint64_t siteBlock=(grid->oSites()+nrot-1)/nrot; // Maximum 1 additional vector overhead
|
||||
|
||||
typedef typename std::remove_reference<decltype(basis_v[0][0])>::type vobj;
|
||||
|
||||
Vector <vobj> Bt(siteBlock * nrot);
|
||||
auto Bp=&Bt[0];
|
||||
|
||||
@@ -96,7 +82,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
int j = i/Nm;
|
||||
int k = i%Nm;
|
||||
Qt_p[i]=Qt(j,k);
|
||||
});
|
||||
});
|
||||
|
||||
// Block the loop to keep storage footprint down
|
||||
for(uint64_t s=0;s<oSites;s+=siteBlock){
|
||||
@@ -132,27 +118,30 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
|
||||
coalescedWrite(basis_v[jj][sss],coalescedRead(Bp[ss*nrot+j]));
|
||||
});
|
||||
}
|
||||
#endif
|
||||
|
||||
for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
|
||||
}
|
||||
|
||||
// Extract a single rotated vector
|
||||
template<class Field>
|
||||
void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,int j, int k0,int k1,int Nm)
|
||||
{
|
||||
typedef decltype(basis[0].AcceleratorView()) View;
|
||||
typedef decltype(basis[0].View(AcceleratorRead)) View;
|
||||
typedef typename Field::vector_object vobj;
|
||||
GridBase* grid = basis[0].Grid();
|
||||
|
||||
result.Checkerboard() = basis[0].Checkerboard();
|
||||
auto result_v=result.AcceleratorView(ViewWrite);
|
||||
Vector<View> basis_v(basis.size(),result_v);
|
||||
|
||||
Vector<View> basis_v; basis_v.reserve(basis.size());
|
||||
for(int k=0;k<basis.size();k++){
|
||||
basis_v[k] = basis[k].AcceleratorView(ViewRead);
|
||||
basis_v.push_back(basis[k].View(AcceleratorRead));
|
||||
}
|
||||
vobj zz=Zero();
|
||||
Vector<double> Qt_jv(Nm);
|
||||
double * Qt_j = & Qt_jv[0];
|
||||
for(int k=0;k<Nm;++k) Qt_j[k]=Qt(j,k);
|
||||
|
||||
autoView(result_v,result,AcceleratorWrite);
|
||||
accelerator_for(ss, grid->oSites(),vobj::Nsimd(),{
|
||||
auto B=coalescedRead(zz);
|
||||
for(int k=k0; k<k1; ++k){
|
||||
@@ -160,6 +149,7 @@ void basisRotateJ(Field &result,std::vector<Field> &basis,Eigen::MatrixXd& Qt,in
|
||||
}
|
||||
coalescedWrite(result_v[ss], B);
|
||||
});
|
||||
for(int k=0;k<basis.size();k++) basis_v[k].ViewClose();
|
||||
}
|
||||
|
||||
template<class Field>
|
||||
|
@@ -78,9 +78,9 @@ template<class vfunctor,class lobj,class robj>
|
||||
inline Lattice<vPredicate> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
|
||||
{
|
||||
Lattice<vPredicate> ret(rhs.Grid());
|
||||
auto lhs_v = lhs.View();
|
||||
auto rhs_v = rhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( lhs_v, lhs, CpuRead);
|
||||
autoView( rhs_v, rhs, CpuRead);
|
||||
autoView( ret_v, ret, CpuWrite);
|
||||
thread_for( ss, rhs_v.size(), {
|
||||
ret_v[ss]=op(lhs_v[ss],rhs_v[ss]);
|
||||
});
|
||||
@@ -93,8 +93,8 @@ template<class vfunctor,class lobj,class robj>
|
||||
inline Lattice<vPredicate> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
|
||||
{
|
||||
Lattice<vPredicate> ret(lhs.Grid());
|
||||
auto lhs_v = lhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( lhs_v, lhs, CpuRead);
|
||||
autoView( ret_v, ret, CpuWrite);
|
||||
thread_for( ss, lhs_v.size(), {
|
||||
ret_v[ss]=op(lhs_v[ss],rhs);
|
||||
});
|
||||
@@ -107,8 +107,8 @@ template<class vfunctor,class lobj,class robj>
|
||||
inline Lattice<vPredicate> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
|
||||
{
|
||||
Lattice<vPredicate> ret(rhs.Grid());
|
||||
auto rhs_v = rhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( rhs_v, rhs, CpuRead);
|
||||
autoView( ret_v, ret, CpuWrite);
|
||||
thread_for( ss, rhs_v.size(), {
|
||||
ret_v[ss]=op(lhs,rhs_v[ss]);
|
||||
});
|
||||
|
@@ -37,7 +37,7 @@ template<class iobj> inline void LatticeCoordinate(Lattice<iobj> &l,int mu)
|
||||
GridBase *grid = l.Grid();
|
||||
int Nsimd = grid->iSites();
|
||||
|
||||
auto l_v = l.View();
|
||||
autoView(l_v, l, CpuWrite);
|
||||
thread_for( o, grid->oSites(), {
|
||||
vector_type vI;
|
||||
Coordinate gcoor;
|
||||
@@ -51,23 +51,5 @@ template<class iobj> inline void LatticeCoordinate(Lattice<iobj> &l,int mu)
|
||||
});
|
||||
};
|
||||
|
||||
// LatticeCoordinate();
|
||||
// FIXME for debug; deprecate this; made obscelete by
|
||||
template<class vobj> void lex_sites(Lattice<vobj> &l){
|
||||
auto l_v = l.View();
|
||||
Real *v_ptr = (Real *)&l_v[0];
|
||||
size_t o_len = l.Grid()->oSites();
|
||||
size_t v_len = sizeof(vobj)/sizeof(vRealF);
|
||||
size_t vec_len = vRealF::Nsimd();
|
||||
|
||||
for(int i=0;i<o_len;i++){
|
||||
for(int j=0;j<v_len;j++){
|
||||
for(int vv=0;vv<vec_len;vv+=2){
|
||||
v_ptr[i*v_len*vec_len+j*vec_len+vv ]= i+vv*500;
|
||||
v_ptr[i*v_len*vec_len+j*vec_len+vv+1]= i+vv*500;
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@@ -43,8 +43,8 @@ template<class vobj>
|
||||
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
|
||||
{
|
||||
Lattice<typename vobj::tensor_reduced> ret(rhs.Grid());
|
||||
auto rhs_v = rhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(ret_v[ss],innerProduct(rhs_v(ss),rhs_v(ss)));
|
||||
});
|
||||
@@ -56,9 +56,9 @@ template<class vobj>
|
||||
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
|
||||
{
|
||||
Lattice<typename vobj::tensor_reduced> ret(rhs.Grid());
|
||||
auto lhs_v = lhs.View();
|
||||
auto rhs_v = rhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(ret_v[ss],innerProduct(lhs_v(ss),rhs_v(ss)));
|
||||
});
|
||||
@@ -73,9 +73,9 @@ inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Latt
|
||||
typedef decltype(coalescedRead(ll())) sll;
|
||||
typedef decltype(coalescedRead(rr())) srr;
|
||||
Lattice<decltype(outerProduct(ll(),rr()))> ret(rhs.Grid());
|
||||
auto lhs_v = lhs.View();
|
||||
auto rhs_v = rhs.View();
|
||||
auto ret_v = ret.View();
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
accelerator_for(ss,rhs_v.size(),1,{
|
||||
// FIXME had issues with scalar version of outer
|
||||
// Use vector [] operator and don't read coalesce this loop
|
||||
|
@@ -51,9 +51,9 @@ static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice
|
||||
int block =FullGrid->_slice_block [Orthog];
|
||||
int nblock=FullGrid->_slice_nblock[Orthog];
|
||||
int ostride=FullGrid->_ostride[Orthog];
|
||||
auto X_v = X.View();
|
||||
auto Y_v = Y.View();
|
||||
auto R_v = R.View();
|
||||
autoView( X_v , X, CpuRead);
|
||||
autoView( Y_v , Y, CpuRead);
|
||||
autoView( R_v , R, CpuWrite);
|
||||
thread_region
|
||||
{
|
||||
std::vector<vobj> s_x(Nblock);
|
||||
@@ -97,8 +97,8 @@ static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<
|
||||
int nblock=FullGrid->_slice_nblock[Orthog];
|
||||
int ostride=FullGrid->_ostride[Orthog];
|
||||
|
||||
auto X_v = X.View();
|
||||
auto R_v = R.View();
|
||||
autoView( X_v , X, CpuRead);
|
||||
autoView( R_v , R, CpuWrite);
|
||||
|
||||
thread_region
|
||||
{
|
||||
@@ -156,8 +156,8 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
||||
int ostride=FullGrid->_ostride[Orthog];
|
||||
|
||||
typedef typename vobj::vector_typeD vector_typeD;
|
||||
auto lhs_v = lhs.View();
|
||||
auto rhs_v = rhs.View();
|
||||
autoView( lhs_v , lhs, CpuRead);
|
||||
autoView( rhs_v , rhs, CpuRead);
|
||||
thread_region {
|
||||
std::vector<vobj> Left(Nblock);
|
||||
std::vector<vobj> Right(Nblock);
|
||||
|
@@ -46,9 +46,9 @@ auto PeekIndex(const Lattice<vobj> &lhs,int i) -> Lattice<decltype(peekIndex<Ind
|
||||
{
|
||||
Lattice<decltype(peekIndex<Index>(vobj(),i))> ret(lhs.Grid());
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
thread_for( ss, lhs_v.size(), {
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] = peekIndex<Index>(lhs_v[ss],i);
|
||||
});
|
||||
return ret;
|
||||
@@ -58,9 +58,9 @@ auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekInd
|
||||
{
|
||||
Lattice<decltype(peekIndex<Index>(vobj(),i,j))> ret(lhs.Grid());
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
thread_for( ss, lhs_v.size(), {
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
ret_v[ss] = peekIndex<Index>(lhs_v[ss],i,j);
|
||||
});
|
||||
return ret;
|
||||
@@ -72,18 +72,18 @@ auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekInd
|
||||
template<int Index,class vobj>
|
||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(vobj(),0))> & rhs,int i)
|
||||
{
|
||||
auto rhs_v = rhs.View();
|
||||
auto lhs_v = lhs.View();
|
||||
thread_for( ss, lhs_v.size(), {
|
||||
autoView( rhs_v, rhs, AcceleratorRead);
|
||||
autoView( lhs_v, lhs, AcceleratorWrite);
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
pokeIndex<Index>(lhs_v[ss],rhs_v[ss],i);
|
||||
});
|
||||
}
|
||||
template<int Index,class vobj>
|
||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(vobj(),0,0))> & rhs,int i,int j)
|
||||
{
|
||||
auto rhs_v = rhs.View();
|
||||
auto lhs_v = lhs.View();
|
||||
thread_for( ss, lhs_v.size(), {
|
||||
autoView( rhs_v, rhs, AcceleratorRead);
|
||||
autoView( lhs_v, lhs, AcceleratorWrite);
|
||||
accelerator_for( ss, lhs_v.size(), 1, {
|
||||
pokeIndex<Index>(lhs_v[ss],rhs_v[ss],i,j);
|
||||
});
|
||||
}
|
||||
@@ -111,7 +111,7 @@ void pokeSite(const sobj &s,Lattice<vobj> &l,const Coordinate &site){
|
||||
|
||||
// extract-modify-merge cycle is easiest way and this is not perf critical
|
||||
ExtractBuffer<sobj> buf(Nsimd);
|
||||
auto l_v = l.View();
|
||||
autoView( l_v , l, CpuWrite);
|
||||
if ( rank == grid->ThisRank() ) {
|
||||
extract(l_v[odx],buf);
|
||||
buf[idx] = s;
|
||||
@@ -141,7 +141,7 @@ void peekSite(sobj &s,const Lattice<vobj> &l,const Coordinate &site){
|
||||
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
|
||||
|
||||
ExtractBuffer<sobj> buf(Nsimd);
|
||||
auto l_v = l.View();
|
||||
autoView( l_v , l, CpuWrite);
|
||||
extract(l_v[odx],buf);
|
||||
|
||||
s = buf[idx];
|
||||
@@ -151,13 +151,12 @@ void peekSite(sobj &s,const Lattice<vobj> &l,const Coordinate &site){
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////
|
||||
// Peek a scalar object from the SIMD array
|
||||
//////////////////////////////////////////////////////////
|
||||
template<class vobj,class sobj>
|
||||
inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site){
|
||||
|
||||
inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site)
|
||||
{
|
||||
GridBase *grid = l.Grid();
|
||||
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
@@ -173,7 +172,7 @@ inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site){
|
||||
idx= grid->iIndex(site);
|
||||
odx= grid->oIndex(site);
|
||||
|
||||
auto l_v = l.View();
|
||||
autoView( l_v , l, CpuRead);
|
||||
scalar_type * vp = (scalar_type *)&l_v[odx];
|
||||
scalar_type * pt = (scalar_type *)&s;
|
||||
|
||||
@@ -185,8 +184,8 @@ inline void peekLocalSite(sobj &s,const Lattice<vobj> &l,Coordinate &site){
|
||||
};
|
||||
|
||||
template<class vobj,class sobj>
|
||||
inline void pokeLocalSite(const sobj &s,Lattice<vobj> &l,Coordinate &site){
|
||||
|
||||
inline void pokeLocalSite(const sobj &s,Lattice<vobj> &l,Coordinate &site)
|
||||
{
|
||||
GridBase *grid=l.Grid();
|
||||
|
||||
typedef typename vobj::scalar_type scalar_type;
|
||||
@@ -202,13 +201,12 @@ inline void pokeLocalSite(const sobj &s,Lattice<vobj> &l,Coordinate &site){
|
||||
idx= grid->iIndex(site);
|
||||
odx= grid->oIndex(site);
|
||||
|
||||
auto l_v = l.View();
|
||||
autoView( l_v , l, CpuWrite);
|
||||
scalar_type * vp = (scalar_type *)&l_v[odx];
|
||||
scalar_type * pt = (scalar_type *)&s;
|
||||
for(int w=0;w<words;w++){
|
||||
vp[idx+w*Nsimd] = pt[w];
|
||||
}
|
||||
|
||||
return;
|
||||
};
|
||||
|
||||
|
@@ -40,9 +40,11 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
||||
Lattice<vobj> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard()=lhs.Checkerboard();
|
||||
auto lhs_v = lhs.View();
|
||||
auto ret_v = ret.View();
|
||||
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
|
||||
coalescedWrite(ret_v[ss], adj(lhs_v(ss)));
|
||||
});
|
||||
@@ -51,9 +53,11 @@ template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
||||
|
||||
template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
||||
Lattice<vobj> ret(lhs.Grid());
|
||||
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
|
||||
ret.Checkerboard() = lhs.Checkerboard();
|
||||
auto lhs_v = lhs.View();
|
||||
auto ret_v = ret.View();
|
||||
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
|
||||
coalescedWrite( ret_v[ss] , conjugate(lhs_v(ss)));
|
||||
});
|
||||
|
@@ -25,7 +25,7 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
||||
#include <Grid/Grid_Eigen_Dense.h>
|
||||
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
#include <Grid/lattice/Lattice_reduction_gpu.h>
|
||||
#endif
|
||||
|
||||
@@ -39,7 +39,7 @@ inline typename vobj::scalar_object sum_cpu(const vobj *arg, Integer osites)
|
||||
{
|
||||
typedef typename vobj::scalar_object sobj;
|
||||
|
||||
const int Nsimd = vobj::Nsimd();
|
||||
// const int Nsimd = vobj::Nsimd();
|
||||
const int nthread = GridThread::GetThreads();
|
||||
|
||||
Vector<sobj> sumarray(nthread);
|
||||
@@ -65,21 +65,69 @@ inline typename vobj::scalar_object sum_cpu(const vobj *arg, Integer osites)
|
||||
|
||||
return ssum;
|
||||
}
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_objectD sumD_cpu(const vobj *arg, Integer osites)
|
||||
{
|
||||
typedef typename vobj::scalar_objectD sobj;
|
||||
|
||||
const int nthread = GridThread::GetThreads();
|
||||
|
||||
Vector<sobj> sumarray(nthread);
|
||||
for(int i=0;i<nthread;i++){
|
||||
sumarray[i]=Zero();
|
||||
}
|
||||
|
||||
thread_for(thr,nthread, {
|
||||
int nwork, mywork, myoff;
|
||||
nwork = osites;
|
||||
GridThread::GetWork(nwork,thr,mywork,myoff);
|
||||
vobj vvsum=Zero();
|
||||
for(int ss=myoff;ss<mywork+myoff; ss++){
|
||||
vvsum = vvsum + arg[ss];
|
||||
}
|
||||
sumarray[thr]=Reduce(vvsum);
|
||||
});
|
||||
|
||||
sobj ssum=Zero(); // sum across threads
|
||||
for(int i=0;i<nthread;i++){
|
||||
ssum = ssum+sumarray[i];
|
||||
}
|
||||
|
||||
return ssum;
|
||||
}
|
||||
|
||||
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
|
||||
{
|
||||
#ifdef GRID_NVCC
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
return sum_gpu(arg,osites);
|
||||
#else
|
||||
return sum_cpu(arg,osites);
|
||||
#endif
|
||||
}
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
|
||||
{
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
return sumD_gpu(arg,osites);
|
||||
#else
|
||||
return sumD_cpu(arg,osites);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class vobj>
|
||||
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
||||
{
|
||||
auto arg_v = arg.View();
|
||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||
autoView( arg_v, arg, AcceleratorRead);
|
||||
Integer osites = arg.Grid()->oSites();
|
||||
auto ssum= sum(&arg_v[0],osites);
|
||||
auto ssum= sum_gpu(&arg_v[0],osites);
|
||||
#else
|
||||
autoView(arg_v, arg, CpuRead);
|
||||
Integer osites = arg.Grid()->oSites();
|
||||
auto ssum= sum_cpu(&arg_v[0],osites);
|
||||
#endif
|
||||
arg.Grid()->GlobalSum(ssum);
|
||||
return ssum;
|
||||
}
|
||||
@@ -101,43 +149,30 @@ inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &
|
||||
ComplexD nrm;
|
||||
|
||||
GridBase *grid = left.Grid();
|
||||
|
||||
// Might make all code paths go this way.
|
||||
auto left_v = left.AcceleratorView(ViewRead);
|
||||
auto right_v=right.AcceleratorView(ViewRead);
|
||||
|
||||
const uint64_t nsimd = grid->Nsimd();
|
||||
const uint64_t sites = grid->oSites();
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
// GPU - SIMT lane compliance...
|
||||
typedef decltype(innerProduct(left_v[0],right_v[0])) inner_t;
|
||||
// Might make all code paths go this way.
|
||||
typedef decltype(innerProduct(vobj(),vobj())) inner_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
|
||||
|
||||
{
|
||||
autoView( left_v , left, AcceleratorRead);
|
||||
autoView( right_v,right, AcceleratorRead);
|
||||
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto x_l = left_v(ss);
|
||||
auto y_l = right_v(ss);
|
||||
coalescedWrite(inner_tmp_v[ss],innerProduct(x_l,y_l));
|
||||
})
|
||||
// GPU - SIMT lane compliance...
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto x_l = left_v(ss);
|
||||
auto y_l = right_v(ss);
|
||||
coalescedWrite(inner_tmp_v[ss],innerProduct(x_l,y_l));
|
||||
})
|
||||
}
|
||||
|
||||
// This is in single precision and fails some tests
|
||||
// Need a sumD that sums in double
|
||||
nrm = TensorRemove(sumD_gpu(inner_tmp_v,sites));
|
||||
#else
|
||||
// CPU
|
||||
typedef decltype(innerProductD(left_v[0],right_v[0])) inner_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto x_l = left_v[ss];
|
||||
auto y_l = right_v[ss];
|
||||
inner_tmp_v[ss]=innerProductD(x_l,y_l);
|
||||
})
|
||||
nrm = TensorRemove(sum(inner_tmp_v,sites));
|
||||
#endif
|
||||
nrm = TensorRemove(sumD(inner_tmp_v,sites));
|
||||
return nrm;
|
||||
}
|
||||
|
||||
@@ -175,15 +210,14 @@ axpby_norm_fast(Lattice<vobj> &z,sobj a,sobj b,const Lattice<vobj> &x,const Latt
|
||||
|
||||
GridBase *grid = x.Grid();
|
||||
|
||||
auto x_v=x.AcceleratorView(ViewRead);
|
||||
auto y_v=y.AcceleratorView(ViewRead);
|
||||
auto z_v=z.AcceleratorView(ViewWrite);
|
||||
|
||||
const uint64_t nsimd = grid->Nsimd();
|
||||
const uint64_t sites = grid->oSites();
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
// GPU
|
||||
autoView( x_v, x, AcceleratorRead);
|
||||
autoView( y_v, y, AcceleratorRead);
|
||||
autoView( z_v, z, AcceleratorWrite);
|
||||
|
||||
typedef decltype(innerProduct(x_v[0],y_v[0])) inner_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
@@ -193,22 +227,7 @@ axpby_norm_fast(Lattice<vobj> &z,sobj a,sobj b,const Lattice<vobj> &x,const Latt
|
||||
coalescedWrite(inner_tmp_v[ss],innerProduct(tmp,tmp));
|
||||
coalescedWrite(z_v[ss],tmp);
|
||||
});
|
||||
|
||||
nrm = real(TensorRemove(sumD_gpu(inner_tmp_v,sites)));
|
||||
#else
|
||||
// CPU
|
||||
typedef decltype(innerProductD(x_v[0],y_v[0])) inner_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto tmp = a*x_v(ss)+b*y_v(ss);
|
||||
inner_tmp_v[ss]=innerProductD(tmp,tmp);
|
||||
z_v[ss]=tmp;
|
||||
});
|
||||
// Already promoted to double
|
||||
nrm = real(TensorRemove(sum(inner_tmp_v,sites)));
|
||||
#endif
|
||||
nrm = real(TensorRemove(sumD(inner_tmp_v,sites)));
|
||||
grid->GlobalSum(nrm);
|
||||
return nrm;
|
||||
}
|
||||
@@ -224,47 +243,30 @@ innerProductNorm(ComplexD& ip, RealD &nrm, const Lattice<vobj> &left,const Latti
|
||||
|
||||
GridBase *grid = left.Grid();
|
||||
|
||||
auto left_v=left.AcceleratorView(ViewRead);
|
||||
auto right_v=right.AcceleratorView(ViewRead);
|
||||
|
||||
const uint64_t nsimd = grid->Nsimd();
|
||||
const uint64_t sites = grid->oSites();
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
// GPU
|
||||
typedef decltype(innerProduct(left_v[0],right_v[0])) inner_t;
|
||||
typedef decltype(innerProduct(left_v[0],left_v[0])) norm_t;
|
||||
typedef decltype(innerProduct(vobj(),vobj())) inner_t;
|
||||
typedef decltype(innerProduct(vobj(),vobj())) norm_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
Vector<norm_t> norm_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
auto norm_tmp_v = &norm_tmp[0];
|
||||
{
|
||||
autoView(left_v,left, AcceleratorRead);
|
||||
autoView(right_v,right,AcceleratorRead);
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto left_tmp = left_v(ss);
|
||||
coalescedWrite(inner_tmp_v[ss],innerProduct(left_tmp,right_v(ss)));
|
||||
coalescedWrite(norm_tmp_v[ss],innerProduct(left_tmp,left_tmp));
|
||||
});
|
||||
}
|
||||
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto left_tmp = left_v(ss);
|
||||
coalescedWrite(inner_tmp_v[ss],innerProduct(left_tmp,right_v(ss)));
|
||||
coalescedWrite(norm_tmp_v[ss],innerProduct(left_tmp,left_tmp));
|
||||
});
|
||||
tmp[0] = TensorRemove(sumD(inner_tmp_v,sites));
|
||||
tmp[1] = TensorRemove(sumD(norm_tmp_v,sites));
|
||||
|
||||
tmp[0] = TensorRemove(sumD_gpu(inner_tmp_v,sites));
|
||||
tmp[1] = TensorRemove(sumD_gpu(norm_tmp_v,sites));
|
||||
#else
|
||||
// CPU
|
||||
typedef decltype(innerProductD(left_v[0],right_v[0])) inner_t;
|
||||
typedef decltype(innerProductD(left_v[0],left_v[0])) norm_t;
|
||||
Vector<inner_t> inner_tmp(sites);
|
||||
Vector<norm_t> norm_tmp(sites);
|
||||
auto inner_tmp_v = &inner_tmp[0];
|
||||
auto norm_tmp_v = &norm_tmp[0];
|
||||
|
||||
accelerator_for( ss, sites, nsimd,{
|
||||
auto left_tmp = left_v(ss);
|
||||
inner_tmp_v[ss] = innerProductD(left_tmp,right_v(ss));
|
||||
norm_tmp_v[ss] = innerProductD(left_tmp,left_tmp);
|
||||
});
|
||||
// Already promoted to double
|
||||
tmp[0] = TensorRemove(sum(inner_tmp_v,sites));
|
||||
tmp[1] = TensorRemove(sum(norm_tmp_v,sites));
|
||||
#endif
|
||||
grid->GlobalSumVector(&tmp[0],2); // keep norm Complex -> can use GlobalSumVector
|
||||
ip = tmp[0];
|
||||
nrm = real(tmp[1]);
|
||||
@@ -335,7 +337,7 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
|
||||
|
||||
// sum over reduced dimension planes, breaking out orthog dir
|
||||
// Parallel over orthog direction
|
||||
auto Data_v=Data.View();
|
||||
autoView( Data_v, Data, CpuRead);
|
||||
thread_for( r,rd, {
|
||||
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
|
||||
for(int n=0;n<e1;n++){
|
||||
@@ -413,8 +415,8 @@ static void sliceInnerProductVector( std::vector<ComplexD> & result, const Latti
|
||||
int e2= grid->_slice_block [orthogdim];
|
||||
int stride=grid->_slice_stride[orthogdim];
|
||||
|
||||
auto lhv=lhs.View();
|
||||
auto rhv=rhs.View();
|
||||
autoView( lhv, lhs, CpuRead);
|
||||
autoView( rhv, rhs, CpuRead);
|
||||
thread_for( r,rd,{
|
||||
|
||||
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
|
||||
@@ -521,14 +523,12 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice
|
||||
|
||||
tensor_reduced at; at=av;
|
||||
|
||||
auto Rv=R.View();
|
||||
auto Xv=X.View();
|
||||
auto Yv=Y.View();
|
||||
thread_for_collapse(2, n, e1, {
|
||||
for(int b=0;b<e2;b++){
|
||||
autoView( Rv, R, CpuWrite);
|
||||
autoView( Xv, X, CpuRead);
|
||||
autoView( Yv, Y, CpuRead);
|
||||
thread_for2d( n, e1, b,e2, {
|
||||
int ss= so+n*stride+b;
|
||||
Rv[ss] = at*Xv[ss]+Yv[ss];
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -581,9 +581,9 @@ static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice
|
||||
int nblock=FullGrid->_slice_nblock[Orthog];
|
||||
int ostride=FullGrid->_ostride[Orthog];
|
||||
|
||||
auto X_v=X.View();
|
||||
auto Y_v=Y.View();
|
||||
auto R_v=R.View();
|
||||
autoView( X_v, X, CpuRead);
|
||||
autoView( Y_v, Y, CpuRead);
|
||||
autoView( R_v, R, CpuWrite);
|
||||
thread_region
|
||||
{
|
||||
Vector<vobj> s_x(Nblock);
|
||||
@@ -628,13 +628,14 @@ static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<
|
||||
// int nl=1;
|
||||
|
||||
//FIXME package in a convenient iterator
|
||||
// thread_for2d_in_region
|
||||
//Should loop over a plane orthogonal to direction "Orthog"
|
||||
int stride=FullGrid->_slice_stride[Orthog];
|
||||
int block =FullGrid->_slice_block [Orthog];
|
||||
int nblock=FullGrid->_slice_nblock[Orthog];
|
||||
int ostride=FullGrid->_ostride[Orthog];
|
||||
auto R_v = R.View();
|
||||
auto X_v = X.View();
|
||||
autoView( R_v, R, CpuWrite);
|
||||
autoView( X_v, X, CpuRead);
|
||||
thread_region
|
||||
{
|
||||
std::vector<vobj> s_x(Nblock);
|
||||
@@ -692,8 +693,8 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
||||
|
||||
typedef typename vobj::vector_typeD vector_typeD;
|
||||
|
||||
auto lhs_v=lhs.View();
|
||||
auto rhs_v=rhs.View();
|
||||
autoView( lhs_v, lhs, CpuRead);
|
||||
autoView( rhs_v, rhs, CpuRead);
|
||||
thread_region
|
||||
{
|
||||
std::vector<vobj> Left(Nblock);
|
||||
|
@@ -1,7 +1,13 @@
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
#define WARP_SIZE 32
|
||||
#ifdef GRID_HIP
|
||||
extern hipDeviceProp_t *gpu_props;
|
||||
#endif
|
||||
#ifdef GRID_CUDA
|
||||
extern cudaDeviceProp *gpu_props;
|
||||
#endif
|
||||
|
||||
#define WARP_SIZE 32
|
||||
__device__ unsigned int retirementCount = 0;
|
||||
|
||||
template <class Iterator>
|
||||
@@ -19,7 +25,12 @@ template <class Iterator>
|
||||
void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &threads, Iterator &blocks) {
|
||||
|
||||
int device;
|
||||
#ifdef GRID_CUDA
|
||||
cudaGetDevice(&device);
|
||||
#endif
|
||||
#ifdef GRID_HIP
|
||||
hipGetDevice(&device);
|
||||
#endif
|
||||
|
||||
Iterator warpSize = gpu_props[device].warpSize;
|
||||
Iterator sharedMemPerBlock = gpu_props[device].sharedMemPerBlock;
|
||||
@@ -147,7 +158,7 @@ __global__ void reduceKernel(const vobj *lat, sobj *buffer, Iterator n) {
|
||||
sobj *smem = (sobj *)shmem_pointer;
|
||||
|
||||
// wait until all outstanding memory instructions in this thread are finished
|
||||
__threadfence();
|
||||
acceleratorFence();
|
||||
|
||||
if (tid==0) {
|
||||
unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
|
||||
@@ -156,8 +167,8 @@ __global__ void reduceKernel(const vobj *lat, sobj *buffer, Iterator n) {
|
||||
}
|
||||
|
||||
// each thread must read the correct value of amLast
|
||||
__syncthreads();
|
||||
|
||||
acceleratorSynchroniseAll();
|
||||
|
||||
if (amLast) {
|
||||
// reduce buffer[0], ..., buffer[gridDim.x-1]
|
||||
Iterator i = tid;
|
||||
@@ -199,13 +210,7 @@ inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
||||
sobj *buffer_v = &buffer[0];
|
||||
|
||||
reduceKernel<<< numBlocks, numThreads, smemSize >>>(lat, buffer_v, size);
|
||||
cudaDeviceSynchronize();
|
||||
|
||||
cudaError err = cudaGetLastError();
|
||||
if ( cudaSuccess != err ) {
|
||||
printf("Cuda error %s\n",cudaGetErrorString( err ));
|
||||
exit(0);
|
||||
}
|
||||
accelerator_barrier();
|
||||
auto result = buffer_v[0];
|
||||
return result;
|
||||
}
|
||||
|
@@ -375,7 +375,7 @@ public:
|
||||
int osites = _grid->oSites(); // guaranteed to be <= l.Grid()->oSites() by a factor multiplicity
|
||||
int words = sizeof(scalar_object) / sizeof(scalar_type);
|
||||
|
||||
auto l_v = l.View();
|
||||
autoView(l_v, l, CpuWrite);
|
||||
thread_for( ss, osites, {
|
||||
ExtractBuffer<scalar_object> buf(Nsimd);
|
||||
for (int m = 0; m < multiplicity; m++) { // Draw from same generator multiplicity times
|
||||
@@ -461,8 +461,8 @@ public:
|
||||
}
|
||||
|
||||
{
|
||||
// Obtain one reseeded generator per thread
|
||||
int Nthread = GridThread::GetThreads();
|
||||
// Obtain one reseeded generator per thread
|
||||
int Nthread = 32; // Hardwire a good level or parallelism
|
||||
std::vector<RngEngine> seeders(Nthread);
|
||||
for(int t=0;t<Nthread;t++){
|
||||
seeders[t] = Reseed(master_engine);
|
||||
|
@@ -42,8 +42,8 @@ template<class vobj>
|
||||
inline auto trace(const Lattice<vobj> &lhs) -> Lattice<decltype(trace(vobj()))>
|
||||
{
|
||||
Lattice<decltype(trace(vobj()))> ret(lhs.Grid());
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
autoView(ret_v , ret, AcceleratorWrite);
|
||||
autoView(lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
|
||||
coalescedWrite(ret_v[ss], trace(lhs_v(ss)));
|
||||
});
|
||||
@@ -58,8 +58,8 @@ template<int Index,class vobj>
|
||||
inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(vobj()))>
|
||||
{
|
||||
Lattice<decltype(traceIndex<Index>(vobj()))> ret(lhs.Grid());
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
autoView( ret_v , ret, AcceleratorWrite);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
accelerator_for( ss, lhs_v.size(), vobj::Nsimd(), {
|
||||
coalescedWrite(ret_v[ss], traceIndex<Index>(lhs_v(ss)));
|
||||
});
|
||||
|
@@ -47,11 +47,12 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// remove and insert a half checkerboard
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full)
|
||||
{
|
||||
half.Checkerboard() = cb;
|
||||
|
||||
auto half_v = half.View();
|
||||
auto full_v = full.View();
|
||||
autoView( half_v, half, CpuWrite);
|
||||
autoView( full_v, full, CpuRead);
|
||||
thread_for(ss, full.Grid()->oSites(),{
|
||||
int cbos;
|
||||
Coordinate coor;
|
||||
@@ -64,11 +65,11 @@ template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,con
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half)
|
||||
{
|
||||
int cb = half.Checkerboard();
|
||||
auto half_v = half.View();
|
||||
auto full_v = full.View();
|
||||
autoView( half_v , half, CpuRead);
|
||||
autoView( full_v , full, CpuWrite);
|
||||
thread_for(ss,full.Grid()->oSites(),{
|
||||
|
||||
Coordinate coor;
|
||||
@@ -96,15 +97,15 @@ accelerator_inline void convertType(ComplexF & out, const std::complex<float> &
|
||||
out = in;
|
||||
}
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
#ifdef GRID_SIMT
|
||||
accelerator_inline void convertType(vComplexF & out, const ComplexF & in) {
|
||||
((ComplexF*)&out)[SIMTlane(vComplexF::Nsimd())] = in;
|
||||
((ComplexF*)&out)[acceleratorSIMTlane(vComplexF::Nsimd())] = in;
|
||||
}
|
||||
accelerator_inline void convertType(vComplexD & out, const ComplexD & in) {
|
||||
((ComplexD*)&out)[SIMTlane(vComplexD::Nsimd())] = in;
|
||||
((ComplexD*)&out)[acceleratorSIMTlane(vComplexD::Nsimd())] = in;
|
||||
}
|
||||
accelerator_inline void convertType(vComplexD2 & out, const ComplexD & in) {
|
||||
((ComplexD*)&out)[SIMTlane(vComplexD::Nsimd()*2)] = in;
|
||||
((ComplexD*)&out)[acceleratorSIMTlane(vComplexD::Nsimd()*2)] = in;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -151,12 +152,11 @@ accelerator_inline void convertType(T & out, const T & in) {
|
||||
|
||||
template<typename T1,typename T2>
|
||||
accelerator_inline void convertType(Lattice<T1> & out, const Lattice<T2> & in) {
|
||||
auto out_v = out.AcceleratorView(ViewWrite);
|
||||
auto in_v = in.AcceleratorView(ViewRead);
|
||||
|
||||
autoView( out_v , out,AcceleratorWrite);
|
||||
autoView( in_v , in ,AcceleratorRead);
|
||||
accelerator_for(ss,out_v.size(),T1::Nsimd(),{
|
||||
convertType(out_v[ss],in_v(ss));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -166,17 +166,18 @@ template<class vobj>
|
||||
inline auto localInnerProductD(const Lattice<vobj> &lhs,const Lattice<vobj> &rhs)
|
||||
-> Lattice<iScalar<decltype(TensorRemove(innerProductD2(lhs.View()[0],rhs.View()[0])))>>
|
||||
{
|
||||
auto lhs_v = lhs.AcceleratorView(ViewRead);
|
||||
auto rhs_v = rhs.AcceleratorView(ViewRead);
|
||||
autoView( lhs_v , lhs, AcceleratorRead);
|
||||
autoView( rhs_v , rhs, AcceleratorRead);
|
||||
|
||||
typedef decltype(TensorRemove(innerProductD2(lhs_v[0],rhs_v[0]))) t_inner;
|
||||
Lattice<iScalar<t_inner>> ret(lhs.Grid());
|
||||
auto ret_v = ret.AcceleratorView(ViewWrite);
|
||||
|
||||
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
|
||||
{
|
||||
autoView(ret_v, ret,AcceleratorWrite);
|
||||
accelerator_for(ss,rhs_v.size(),vobj::Nsimd(),{
|
||||
convertType(ret_v[ss],innerProductD2(lhs_v(ss),rhs_v(ss)));
|
||||
});
|
||||
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -194,14 +195,13 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
Lattice<iScalar<CComplex>> ip(coarse);
|
||||
Lattice<vobj> fineDataRed = fineData;
|
||||
|
||||
// auto fineData_ = fineData.View();
|
||||
auto coarseData_ = coarseData.AcceleratorView(ViewWrite);
|
||||
auto ip_ = ip.AcceleratorView(ViewReadWrite);
|
||||
autoView( coarseData_ , coarseData, AcceleratorWrite);
|
||||
autoView( ip_ , ip, AcceleratorWrite);
|
||||
for(int v=0;v<nbasis;v++) {
|
||||
blockInnerProductD(ip,Basis[v],fineDataRed); // ip = <basis|fine>
|
||||
accelerator_for( sc, coarse->oSites(), vobj::Nsimd(), {
|
||||
convertType(coarseData_[sc](v),ip_[sc]);
|
||||
});
|
||||
});
|
||||
|
||||
// improve numerical stability of projection
|
||||
// |fine> = |fine> - <basis|fine> |basis>
|
||||
@@ -210,68 +210,6 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj,class CComplex,int nbasis>
|
||||
inline void blockProject1(Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
const Lattice<vobj> &fineData,
|
||||
const std::vector<Lattice<vobj> > &Basis)
|
||||
{
|
||||
typedef iVector<CComplex,nbasis > coarseSiteData;
|
||||
coarseSiteData elide;
|
||||
typedef decltype(coalescedRead(elide)) ScalarComplex;
|
||||
GridBase * fine = fineData.Grid();
|
||||
GridBase * coarse= coarseData.Grid();
|
||||
int _ndimension = coarse->_ndimension;
|
||||
|
||||
// checks
|
||||
assert( nbasis == Basis.size() );
|
||||
subdivides(coarse,fine);
|
||||
for(int i=0;i<nbasis;i++){
|
||||
conformable(Basis[i],fineData);
|
||||
}
|
||||
|
||||
Coordinate block_r (_ndimension);
|
||||
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
assert(block_r[d]*coarse->_rdimensions[d] == fine->_rdimensions[d]);
|
||||
}
|
||||
int blockVol = fine->oSites()/coarse->oSites();
|
||||
|
||||
coarseData=Zero();
|
||||
|
||||
auto fineData_ = fineData.View();
|
||||
auto coarseData_ = coarseData.View();
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// To make this lock free, loop over coars parallel, and then loop over fine associated with coarse.
|
||||
// Otherwise do fine inner product per site, and make the update atomic
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
accelerator_for( sci, nbasis*coarse->oSites(), vobj::Nsimd(), {
|
||||
|
||||
auto sc=sci/nbasis;
|
||||
auto i=sci%nbasis;
|
||||
auto Basis_ = Basis[i].View();
|
||||
|
||||
Coordinate coor_c(_ndimension);
|
||||
Lexicographic::CoorFromIndex(coor_c,sc,coarse->_rdimensions); // Block coordinate
|
||||
|
||||
int sf;
|
||||
decltype(innerProduct(Basis_(sf),fineData_(sf))) reduce=Zero();
|
||||
|
||||
for(int sb=0;sb<blockVol;sb++){
|
||||
|
||||
Coordinate coor_b(_ndimension);
|
||||
Coordinate coor_f(_ndimension);
|
||||
|
||||
Lexicographic::CoorFromIndex(coor_b,sb,block_r);
|
||||
for(int d=0;d<_ndimension;d++) coor_f[d]=coor_c[d]*block_r[d]+coor_b[d];
|
||||
Lexicographic::IndexFromCoor(coor_f,sf,fine->_rdimensions);
|
||||
|
||||
reduce=reduce+innerProduct(Basis_(sf),fineData_(sf));
|
||||
}
|
||||
coalescedWrite(coarseData_[sc](i),reduce);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
template<class vobj,class vobj2,class CComplex>
|
||||
inline void blockZAXPY(Lattice<vobj> &fineZ,
|
||||
@@ -298,10 +236,10 @@ template<class vobj,class vobj2,class CComplex>
|
||||
assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
||||
}
|
||||
|
||||
auto fineZ_ = fineZ.AcceleratorView(ViewWrite);
|
||||
auto fineX_ = fineX.AcceleratorView(ViewRead);
|
||||
auto fineY_ = fineY.AcceleratorView(ViewRead);
|
||||
auto coarseA_= coarseA.AcceleratorView(ViewRead);
|
||||
autoView( fineZ_ , fineZ, AcceleratorWrite);
|
||||
autoView( fineX_ , fineX, AcceleratorRead);
|
||||
autoView( fineY_ , fineY, AcceleratorRead);
|
||||
autoView( coarseA_, coarseA, AcceleratorRead);
|
||||
|
||||
accelerator_for(sf, fine->oSites(), CComplex::Nsimd(), {
|
||||
|
||||
@@ -314,7 +252,7 @@ template<class vobj,class vobj2,class CComplex>
|
||||
Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
// z = A x + y
|
||||
#ifdef __CUDA_ARCH__
|
||||
#ifdef GRID_SIMT
|
||||
typename vobj2::tensor_reduced::scalar_object cA;
|
||||
typename vobj::scalar_object cAx;
|
||||
#else
|
||||
@@ -344,15 +282,16 @@ template<class vobj,class CComplex>
|
||||
Lattice<dotp> fine_inner(fine); fine_inner.Checkerboard() = fineX.Checkerboard();
|
||||
Lattice<dotp> coarse_inner(coarse);
|
||||
|
||||
auto CoarseInner_ = CoarseInner.AcceleratorView(ViewWrite);
|
||||
auto coarse_inner_ = coarse_inner.AcceleratorView(ViewReadWrite);
|
||||
|
||||
// Precision promotion
|
||||
fine_inner = localInnerProductD(fineX,fineY);
|
||||
blockSum(coarse_inner,fine_inner);
|
||||
accelerator_for(ss, coarse->oSites(), 1, {
|
||||
{
|
||||
autoView( CoarseInner_ , CoarseInner,AcceleratorWrite);
|
||||
autoView( coarse_inner_ , coarse_inner,AcceleratorRead);
|
||||
accelerator_for(ss, coarse->oSites(), 1, {
|
||||
convertType(CoarseInner_[ss], TensorRemove(coarse_inner_[ss]));
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -370,14 +309,15 @@ inline void blockInnerProduct(Lattice<CComplex> &CoarseInner,
|
||||
Lattice<dotp> coarse_inner(coarse);
|
||||
|
||||
// Precision promotion?
|
||||
auto CoarseInner_ = CoarseInner.AcceleratorView(ViewWrite);
|
||||
auto coarse_inner_ = coarse_inner.AcceleratorView(ViewReadWrite);
|
||||
|
||||
fine_inner = localInnerProduct(fineX,fineY);
|
||||
blockSum(coarse_inner,fine_inner);
|
||||
accelerator_for(ss, coarse->oSites(), 1, {
|
||||
CoarseInner_[ss] = coarse_inner_[ss];
|
||||
});
|
||||
{
|
||||
autoView( CoarseInner_ , CoarseInner, AcceleratorWrite);
|
||||
autoView( coarse_inner_ , coarse_inner, AcceleratorRead);
|
||||
accelerator_for(ss, coarse->oSites(), 1, {
|
||||
CoarseInner_[ss] = coarse_inner_[ss];
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj,class CComplex>
|
||||
@@ -408,8 +348,10 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
|
||||
}
|
||||
int blockVol = fine->oSites()/coarse->oSites();
|
||||
|
||||
auto coarseData_ = coarseData.AcceleratorView(ViewReadWrite);
|
||||
auto fineData_ = fineData.AcceleratorView(ViewRead);
|
||||
// Turn this around to loop threaded over sc and interior loop
|
||||
// over sf would thread better
|
||||
autoView( coarseData_ , coarseData, AcceleratorWrite);
|
||||
autoView( fineData_ , fineData, AcceleratorRead);
|
||||
|
||||
accelerator_for(sc,coarse->oSites(),1,{
|
||||
|
||||
@@ -510,8 +452,8 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
for(int d=0 ; d<_ndimension;d++){
|
||||
block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d];
|
||||
}
|
||||
auto fineData_ = fineData.View();
|
||||
auto coarseData_ = coarseData.View();
|
||||
autoView( fineData_ , fineData, AcceleratorWrite);
|
||||
autoView( coarseData_ , coarseData, AcceleratorRead);
|
||||
|
||||
// Loop with a cache friendly loop ordering
|
||||
accelerator_for(sf,fine->oSites(),1,{
|
||||
@@ -524,7 +466,7 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
|
||||
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
auto basis_ = Basis[i].View();
|
||||
/* auto basis_ = Basis[i], );*/
|
||||
if(i==0) fineData_[sf]=coarseData_[sc](i) *basis_[sf]);
|
||||
else fineData_[sf]=fineData_[sf]+coarseData_[sc](i)*basis_[sf]);
|
||||
}
|
||||
@@ -543,8 +485,14 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
|
||||
fineData=Zero();
|
||||
for(int i=0;i<nbasis;i++) {
|
||||
Lattice<iScalar<CComplex> > ip = PeekIndex<0>(coarseData,i);
|
||||
auto ip_ = ip.AcceleratorView(ViewRead);
|
||||
blockZAXPY(fineData,ip,Basis[i],fineData);
|
||||
|
||||
Lattice<CComplex> cip(coarse);
|
||||
autoView( cip_ , cip, AcceleratorWrite);
|
||||
autoView( ip_ , ip, AcceleratorRead);
|
||||
accelerator_forNB(sc,coarse->oSites(),CComplex::Nsimd(),{
|
||||
coalescedWrite(cip_[sc], ip_(sc)());
|
||||
});
|
||||
blockZAXPY<vobj,CComplex >(fineData,cip,Basis[i],fineData);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -614,8 +562,9 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
|
||||
Coordinate rdt = Tg->_rdimensions;
|
||||
Coordinate ist = Tg->_istride;
|
||||
Coordinate ost = Tg->_ostride;
|
||||
auto t_v = To.AcceleratorView(ViewWrite);
|
||||
auto f_v = From.AcceleratorView(ViewRead);
|
||||
|
||||
autoView( t_v , To, AcceleratorWrite);
|
||||
autoView( f_v , From, AcceleratorRead);
|
||||
accelerator_for(idx,Fg->lSites(),1,{
|
||||
sobj s;
|
||||
Coordinate Fcoor(nd);
|
||||
@@ -862,7 +811,7 @@ unvectorizeToLexOrdArray(std::vector<sobj> &out, const Lattice<vobj> &in)
|
||||
}
|
||||
|
||||
//loop over outer index
|
||||
auto in_v = in.View();
|
||||
autoView( in_v , in, CpuRead);
|
||||
thread_for(in_oidx,in_grid->oSites(),{
|
||||
//Assemble vector of pointers to output elements
|
||||
ExtractPointerArray<sobj> out_ptrs(in_nsimd);
|
||||
@@ -955,7 +904,7 @@ vectorizeFromLexOrdArray( std::vector<sobj> &in, Lattice<vobj> &out)
|
||||
icoor[lane].resize(ndim);
|
||||
grid->iCoorFromIindex(icoor[lane],lane);
|
||||
}
|
||||
auto out_v = out.View();
|
||||
autoView( out_v , out, CpuWrite);
|
||||
thread_for(oidx, grid->oSites(),{
|
||||
//Assemble vector of pointers to output elements
|
||||
ExtractPointerArray<sobj> ptrs(nsimd);
|
||||
@@ -1058,7 +1007,7 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in)
|
||||
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
||||
unvectorizeToLexOrdArray(in_slex_conv, in);
|
||||
|
||||
auto out_v = out.View();
|
||||
autoView( out_v , out, CpuWrite);
|
||||
thread_for(out_oidx,out_grid->oSites(),{
|
||||
Coordinate out_ocoor(ndim);
|
||||
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
||||
|
@@ -42,8 +42,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
template<class vobj>
|
||||
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
|
||||
Lattice<vobj> ret(lhs.Grid());
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(ret_v[ss], transpose(lhs_v(ss)));
|
||||
});
|
||||
@@ -58,8 +58,8 @@ template<int Index,class vobj>
|
||||
inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(vobj()))>
|
||||
{
|
||||
Lattice<decltype(transposeIndex<Index>(vobj()))> ret(lhs.Grid());
|
||||
auto ret_v = ret.View();
|
||||
auto lhs_v = lhs.View();
|
||||
autoView( ret_v, ret, AcceleratorWrite);
|
||||
autoView( lhs_v, lhs, AcceleratorRead);
|
||||
accelerator_for(ss,lhs_v.size(),vobj::Nsimd(),{
|
||||
coalescedWrite(ret_v[ss] , transposeIndex<Index>(lhs_v(ss)));
|
||||
});
|
||||
|
@@ -35,8 +35,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class obj> Lattice<obj> pow(const Lattice<obj> &rhs_i,RealD y){
|
||||
Lattice<obj> ret_i(rhs_i.Grid());
|
||||
auto rhs = rhs_i.View();
|
||||
auto ret = ret_i.View();
|
||||
autoView( rhs, rhs_i, AcceleratorRead);
|
||||
autoView( ret, ret_i, AcceleratorWrite);
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
accelerator_for(ss,rhs.size(),1,{
|
||||
ret[ss]=pow(rhs[ss],y);
|
||||
@@ -45,8 +45,8 @@ template<class obj> Lattice<obj> pow(const Lattice<obj> &rhs_i,RealD y){
|
||||
}
|
||||
template<class obj> Lattice<obj> mod(const Lattice<obj> &rhs_i,Integer y){
|
||||
Lattice<obj> ret_i(rhs_i.Grid());
|
||||
auto rhs = rhs_i.View();
|
||||
auto ret = ret_i.View();
|
||||
autoView( rhs , rhs_i, AcceleratorRead);
|
||||
autoView( ret , ret_i, AcceleratorWrite);
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
accelerator_for(ss,rhs.size(),obj::Nsimd(),{
|
||||
coalescedWrite(ret[ss],mod(rhs(ss),y));
|
||||
@@ -56,8 +56,8 @@ template<class obj> Lattice<obj> mod(const Lattice<obj> &rhs_i,Integer y){
|
||||
|
||||
template<class obj> Lattice<obj> div(const Lattice<obj> &rhs_i,Integer y){
|
||||
Lattice<obj> ret_i(rhs_i.Grid());
|
||||
auto ret = ret_i.View();
|
||||
auto rhs = rhs_i.View();
|
||||
autoView( ret , ret_i, AcceleratorWrite);
|
||||
autoView( rhs , rhs_i, AcceleratorRead);
|
||||
ret.Checkerboard() = rhs_i.Checkerboard();
|
||||
accelerator_for(ss,rhs.size(),obj::Nsimd(),{
|
||||
coalescedWrite(ret[ss],div(rhs(ss),y));
|
||||
@@ -67,8 +67,8 @@ template<class obj> Lattice<obj> div(const Lattice<obj> &rhs_i,Integer y){
|
||||
|
||||
template<class obj> Lattice<obj> expMat(const Lattice<obj> &rhs_i, RealD alpha, Integer Nexp = DEFAULT_MAT_EXP){
|
||||
Lattice<obj> ret_i(rhs_i.Grid());
|
||||
auto rhs = rhs_i.View();
|
||||
auto ret = ret_i.View();
|
||||
autoView( rhs , rhs_i, AcceleratorRead);
|
||||
autoView( ret , ret_i, AcceleratorWrite);
|
||||
ret.Checkerboard() = rhs.Checkerboard();
|
||||
accelerator_for(ss,rhs.size(),obj::Nsimd(),{
|
||||
coalescedWrite(ret[ss],Exponentiate(rhs(ss),alpha, Nexp));
|
||||
|
163
Grid/lattice/Lattice_view.h
Normal file
163
Grid/lattice/Lattice_view.h
Normal file
@@ -0,0 +1,163 @@
|
||||
#pragma once
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
///////////////////////////////////////////////////////////////////
|
||||
// Base class which can be used by traits to pick up behaviour
|
||||
///////////////////////////////////////////////////////////////////
|
||||
class LatticeBase {};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Conformable checks; same instance of Grid required
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
void accelerator_inline conformable(GridBase *lhs,GridBase *rhs)
|
||||
{
|
||||
assert(lhs == rhs);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Minimal base class containing only data valid to access from accelerator
|
||||
// _odata will be a managed pointer in CUDA
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Force access to lattice through a view object.
|
||||
// prevents writing of code that will not offload to GPU, but perhaps annoyingly
|
||||
// strict since host could could in principle direct access through the lattice object
|
||||
// Need to decide programming model.
|
||||
#define LATTICE_VIEW_STRICT
|
||||
template<class vobj> class LatticeAccelerator : public LatticeBase
|
||||
{
|
||||
protected:
|
||||
//public:
|
||||
GridBase *_grid;
|
||||
int checkerboard;
|
||||
vobj *_odata; // A managed pointer
|
||||
uint64_t _odata_size;
|
||||
public:
|
||||
accelerator_inline LatticeAccelerator() : checkerboard(0), _odata(nullptr), _odata_size(0), _grid(nullptr) { };
|
||||
accelerator_inline uint64_t oSites(void) const { return _odata_size; };
|
||||
accelerator_inline int Checkerboard(void) const { return checkerboard; };
|
||||
accelerator_inline int &Checkerboard(void) { return this->checkerboard; }; // can assign checkerboard on a container, not a view
|
||||
accelerator_inline void Conformable(GridBase * &grid) const
|
||||
{
|
||||
if (grid) conformable(grid, _grid);
|
||||
else grid = _grid;
|
||||
};
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// A View class which provides accessor to the data.
|
||||
// This will be safe to call from accelerator_for and is trivially copy constructible
|
||||
// The copy constructor for this will need to be used by device lambda functions
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
template<class vobj>
|
||||
class LatticeView : public LatticeAccelerator<vobj>
|
||||
{
|
||||
public:
|
||||
// Rvalue
|
||||
ViewMode mode;
|
||||
void * cpu_ptr;
|
||||
#ifdef GRID_SIMT
|
||||
accelerator_inline const typename vobj::scalar_object operator()(size_t i) const {
|
||||
return coalescedRead(this->_odata[i]);
|
||||
}
|
||||
#else
|
||||
accelerator_inline const vobj & operator()(size_t i) const { return this->_odata[i]; }
|
||||
#endif
|
||||
|
||||
accelerator_inline const vobj & operator[](size_t i) const { return this->_odata[i]; };
|
||||
accelerator_inline vobj & operator[](size_t i) { return this->_odata[i]; };
|
||||
|
||||
accelerator_inline uint64_t begin(void) const { return 0;};
|
||||
accelerator_inline uint64_t end(void) const { return this->_odata_size; };
|
||||
accelerator_inline uint64_t size(void) const { return this->_odata_size; };
|
||||
|
||||
LatticeView(const LatticeAccelerator<vobj> &refer_to_me) : LatticeAccelerator<vobj> (refer_to_me){}
|
||||
LatticeView(const LatticeView<vobj> &refer_to_me) = default; // Trivially copyable
|
||||
LatticeView(const LatticeAccelerator<vobj> &refer_to_me,ViewMode mode) : LatticeAccelerator<vobj> (refer_to_me)
|
||||
{
|
||||
this->ViewOpen(mode);
|
||||
}
|
||||
|
||||
// Host functions
|
||||
void ViewOpen(ViewMode mode)
|
||||
{ // Translate the pointer, could save a copy. Could use a "Handle" and not save _odata originally in base
|
||||
// std::cout << "View Open"<<std::hex<<this->_odata<<std::dec <<std::endl;
|
||||
this->cpu_ptr = (void *)this->_odata;
|
||||
this->mode = mode;
|
||||
this->_odata =(vobj *)
|
||||
MemoryManager::ViewOpen(this->cpu_ptr,
|
||||
this->_odata_size*sizeof(vobj),
|
||||
mode,
|
||||
AdviseDefault);
|
||||
}
|
||||
void ViewClose(void)
|
||||
{ // Inform the manager
|
||||
// std::cout << "View Close"<<std::hex<<this->cpu_ptr<<std::dec <<std::endl;
|
||||
MemoryManager::ViewClose(this->cpu_ptr,this->mode);
|
||||
}
|
||||
|
||||
};
|
||||
// Little autoscope assister
|
||||
template<class View>
|
||||
class ViewCloser
|
||||
{
|
||||
View v; // Take a copy of view and call view close when I go out of scope automatically
|
||||
public:
|
||||
ViewCloser(View &_v) : v(_v) {};
|
||||
~ViewCloser() { v.ViewClose(); }
|
||||
};
|
||||
|
||||
#define autoView(l_v,l,mode) \
|
||||
auto l_v = l.View(mode); \
|
||||
ViewCloser<decltype(l_v)> _autoView##l_v(l_v);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Lattice expression types used by ET to assemble the AST
|
||||
//
|
||||
// Need to be able to detect code paths according to the whether a lattice object or not
|
||||
// so introduce some trait type things
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class LatticeExpressionBase {};
|
||||
|
||||
template <typename T> using is_lattice = std::is_base_of<LatticeBase, T>;
|
||||
template <typename T> using is_lattice_expr = std::is_base_of<LatticeExpressionBase,T >;
|
||||
|
||||
template<class T, bool isLattice> struct ViewMapBase { typedef T Type; };
|
||||
template<class T> struct ViewMapBase<T,true> { typedef LatticeView<typename T::vector_object> Type; };
|
||||
template<class T> using ViewMap = ViewMapBase<T,std::is_base_of<LatticeBase, T>::value >;
|
||||
|
||||
template <typename Op, typename _T1>
|
||||
class LatticeUnaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
LatticeUnaryExpression(Op _op,const _T1 &_arg1) : op(_op), arg1(_arg1) {};
|
||||
};
|
||||
|
||||
template <typename Op, typename _T1, typename _T2>
|
||||
class LatticeBinaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
typedef typename ViewMap<_T2>::Type T2;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
T2 arg2;
|
||||
LatticeBinaryExpression(Op _op,const _T1 &_arg1,const _T2 &_arg2) : op(_op), arg1(_arg1), arg2(_arg2) {};
|
||||
};
|
||||
|
||||
template <typename Op, typename _T1, typename _T2, typename _T3>
|
||||
class LatticeTrinaryExpression : public LatticeExpressionBase
|
||||
{
|
||||
public:
|
||||
typedef typename ViewMap<_T1>::Type T1;
|
||||
typedef typename ViewMap<_T2>::Type T2;
|
||||
typedef typename ViewMap<_T3>::Type T3;
|
||||
Op op;
|
||||
T1 arg1;
|
||||
T2 arg2;
|
||||
T3 arg3;
|
||||
LatticeTrinaryExpression(Op _op,const _T1 &_arg1,const _T2 &_arg2,const _T3 &_arg3) : op(_op), arg1(_arg1), arg2(_arg2), arg3(_arg3) {};
|
||||
};
|
||||
NAMESPACE_END(Grid);
|
Reference in New Issue
Block a user