1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-11 03:46:55 +01:00

Threading support rework.

Placed parallel pragmas as macros; implemented deterministic thread reduction in style of
BFM.
This commit is contained in:
Peter Boyle
2015-05-12 07:51:41 +01:00
parent 6e6843ac69
commit c6baa3e657
26 changed files with 276 additions and 184 deletions

View File

@ -10,7 +10,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
@ -21,7 +21,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
@ -32,7 +32,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
@ -42,7 +42,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const Lattice<obj3> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
@ -56,7 +56,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
conformable(lhs,ret);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
mult(&tmp,&lhs._odata[ss],&rhs);
@ -67,7 +67,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
conformable(lhs,ret);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
mac(&tmp,&lhs._odata[ss],&rhs);
@ -78,7 +78,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
conformable(lhs,ret);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
sub(&tmp,&lhs._odata[ss],&rhs);
@ -88,7 +88,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
conformable(lhs,ret);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
obj1 tmp;
add(&tmp,&lhs._odata[ss],&rhs);
@ -102,7 +102,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
conformable(ret,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites();ss++){
obj1 tmp;
mult(&tmp,&lhs,&rhs._odata[ss]);
@ -113,7 +113,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
conformable(ret,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites();ss++){
obj1 tmp;
mac(&tmp,&lhs,&rhs._odata[ss]);
@ -124,7 +124,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
conformable(ret,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites();ss++){
obj1 tmp;
sub(&tmp,&lhs,&rhs._odata[ss]);
@ -134,7 +134,7 @@ namespace Grid {
template<class obj1,class obj2,class obj3>
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
conformable(ret,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites();ss++){
obj1 tmp;
add(&tmp,&lhs,&rhs._odata[ss]);
@ -145,7 +145,7 @@ namespace Grid {
template<class sobj,class vobj>
inline void axpy(Lattice<vobj> &ret,sobj a,const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
conformable(lhs,rhs);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
vobj tmp = a*lhs._odata[ss];
vstream(ret._odata[ss],tmp+rhs._odata[ss]);

View File

@ -64,7 +64,7 @@ public:
////////////////////////////////////////////////////////////////////////////////
template <typename Op, typename T1> inline Lattice<vobj> & operator=(const LatticeUnaryExpression<Op,T1> &expr)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
vobj tmp= eval(ss,expr);
vstream(_odata[ss] ,tmp);
@ -73,7 +73,7 @@ public:
}
template <typename Op, typename T1,typename T2> inline Lattice<vobj> & operator=(const LatticeBinaryExpression<Op,T1,T2> &expr)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
vobj tmp= eval(ss,expr);
vstream(_odata[ss] ,tmp);
@ -82,7 +82,7 @@ public:
}
template <typename Op, typename T1,typename T2,typename T3> inline Lattice<vobj> & operator=(const LatticeTrinaryExpression<Op,T1,T2,T3> &expr)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
vobj tmp= eval(ss,expr);
vstream(_odata[ss] ,tmp);
@ -95,7 +95,7 @@ public:
GridFromExpression(_grid,expr);
assert(_grid!=nullptr);
_odata.resize(_grid->oSites());
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
_odata[ss] = eval(ss,expr);
}
@ -105,7 +105,7 @@ public:
GridFromExpression(_grid,expr);
assert(_grid!=nullptr);
_odata.resize(_grid->oSites());
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
_odata[ss] = eval(ss,expr);
}
@ -115,7 +115,7 @@ public:
GridFromExpression(_grid,expr);
assert(_grid!=nullptr);
_odata.resize(_grid->oSites());
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
_odata[ss] = eval(ss,expr);
}
@ -133,7 +133,7 @@ public:
}
template<class sobj> inline Lattice<vobj> & operator = (const sobj & r){
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
this->_odata[ss]=r;
}
@ -142,7 +142,7 @@ public:
template<class robj> inline Lattice<vobj> & operator = (const Lattice<robj> & r){
conformable(*this,r);
std::cout<<"Lattice operator ="<<std::endl;
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<_grid->oSites();ss++){
this->_odata[ss]=r._odata[ss];
}
@ -167,7 +167,7 @@ public:
inline friend Lattice<vobj> operator / (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs){
conformable(lhs,rhs);
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = lhs._odata[ss]/rhs._odata[ss];
}

View File

@ -15,7 +15,7 @@ namespace Grid {
inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
{
Lattice<vInteger> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
}
@ -25,7 +25,7 @@ namespace Grid {
inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
{
Lattice<vInteger> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites(); ss++){
ret._odata[ss]=op(lhs._odata[ss],rhs);
}
@ -35,7 +35,7 @@ namespace Grid {
inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
{
Lattice<vInteger> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=op(lhs._odata[ss],rhs);
}

View File

@ -16,7 +16,7 @@ namespace Grid {
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
{
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
}
@ -29,7 +29,7 @@ namespace Grid {
-> Lattice<typename vobj::tensor_reduced>
{
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
}
@ -42,7 +42,7 @@ namespace Grid {
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
{
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
}

View File

@ -10,7 +10,7 @@ namespace Grid {
inline Lattice<vobj> operator -(const Lattice<vobj> &r)
{
Lattice<vobj> ret(r._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<r._grid->oSites();ss++){
vstream(ret._odata[ss], -r._odata[ss]);
}
@ -47,7 +47,7 @@ namespace Grid {
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
{
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss];
vstream(ret._odata[ss],tmp);
@ -59,7 +59,7 @@ namespace Grid {
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
{
Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];
vstream(ret._odata[ss],tmp);
@ -71,7 +71,7 @@ namespace Grid {
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
{
Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];
vstream(ret._odata[ss],tmp);
@ -83,7 +83,7 @@ namespace Grid {
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
{
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites(); ss++){
decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
vstream(ret._odata[ss],tmp);
@ -95,7 +95,7 @@ namespace Grid {
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
{
Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs;
vstream(ret._odata[ss],tmp);
@ -107,7 +107,7 @@ namespace Grid {
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
{
Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<rhs._grid->oSites(); ss++){
decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
vstream(ret._odata[ss],tmp);

View File

@ -15,7 +15,7 @@ namespace Grid {
-> Lattice<decltype(peekIndex<Index>(lhs._odata[0]))>
{
Lattice<decltype(peekIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss]);
}
@ -26,7 +26,7 @@ namespace Grid {
-> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))>
{
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
}
@ -37,7 +37,7 @@ namespace Grid {
-> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))>
{
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
}
@ -50,7 +50,7 @@ namespace Grid {
template<int Index,class vobj> inline
void pokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0]))> & rhs)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss]);
}
@ -58,7 +58,7 @@ namespace Grid {
template<int Index,class vobj> inline
void pokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
}
@ -66,7 +66,7 @@ namespace Grid {
template<int Index,class vobj> inline
void pokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
{
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
}

View File

@ -11,7 +11,7 @@ namespace Grid {
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = adj(lhs._odata[ss]);
}
@ -20,7 +20,7 @@ namespace Grid {
template<class vobj> inline Lattice<vobj> conj(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = conj(lhs._odata[ss]);
}
@ -30,7 +30,7 @@ namespace Grid {
template<class vobj> inline auto real(const Lattice<vobj> &z) -> Lattice<decltype(real(z._odata[0]))>
{
Lattice<decltype(real(z._odata[0]))> ret(z._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<z._grid->oSites();ss++){
ret._odata[ss] = real(z._odata[ss]);
}
@ -40,7 +40,7 @@ namespace Grid {
template<class vobj> inline auto imag(const Lattice<vobj> &z) -> Lattice<decltype(imag(z._odata[0]))>
{
Lattice<decltype(imag(z._odata[0]))> ret(z._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<z._grid->oSites();ss++){
ret._odata[ss] = imag(z._odata[ss]);
}

View File

@ -7,69 +7,85 @@ namespace Grid {
#endif
////////////////////////////////////////////////////////////////////////////////////////////////////
// Reduction operations
// Deterministic Reduction operations
////////////////////////////////////////////////////////////////////////////////////////////////////
template<class vobj>
inline RealD norm2(const Lattice<vobj> &arg){
typedef typename vobj::scalar_type scalar;
typedef typename vobj::vector_type vector;
decltype(innerProduct(arg._odata[0],arg._odata[0])) vnrm;
scalar nrm;
//FIXME make this loop parallelisable
vnrm=zero;
for(int ss=0;ss<arg._grid->oSites(); ss++){
vnrm = vnrm + innerProduct(arg._odata[ss],arg._odata[ss]);
}
vector vvnrm =TensorRemove(vnrm) ;
nrm = Reduce(vvnrm);
arg._grid->GlobalSum(nrm);
return real(nrm);
}
template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
ComplexD nrm = innerProduct(arg,arg);
return real(nrm);
}
template<class vobj>
inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right)
// inline auto innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right)
//->decltype(innerProduct(left._odata[0],right._odata[0]))
{
typedef typename vobj::scalar_type scalar;
decltype(innerProduct(left._odata[0],right._odata[0])) vnrm;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
vector_type vnrm;
scalar_type nrm;
scalar nrm;
//FIXME make this loop parallelisable
vnrm=zero;
for(int ss=0;ss<left._grid->oSites(); ss++){
vnrm = vnrm + innerProduct(left._odata[ss],right._odata[ss]);
GridBase *grid = left._grid;
std::vector<vector_type,alignedAllocator<vector_type> > sumarray(grid->SumArraySize());
for(int i=0;i<grid->SumArraySize();i++){
sumarray[i]=zero;
}
nrm = Reduce(vnrm);
PARALLEL_FOR_LOOP
for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff;
GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
decltype(innerProduct(left._odata[0],right._odata[0])) vnrm=zero; // private to thread; sub summation
for(int ss=myoff;ss<mywork+myoff; ss++){
vnrm = vnrm + innerProduct(left._odata[ss],right._odata[ss]);
}
sumarray[thr]=TensorRemove(vnrm) ;
}
vector_type vvnrm; vvnrm=zero; // sum across threads
for(int i=0;i<grid->SumArraySize();i++){
vvnrm = vvnrm+sumarray[i];
}
nrm = Reduce(vvnrm);// sum across simd
right._grid->GlobalSum(nrm);
return nrm;
}
template<class vobj>
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg){
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg){
GridBase *grid=arg._grid;
int Nsimd = grid->Nsimd();
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
vobj vsum;
sobj ssum;
vsum=zero;
ssum=zero;
//FIXME make this loop parallelisable
for(int ss=0;ss<arg._grid->oSites(); ss++){
vsum = vsum + arg._odata[ss];
std::vector<vobj,alignedAllocator<vobj> > sumarray(grid->SumArraySize());
for(int i=0;i<grid->SumArraySize();i++){
sumarray[i]=zero;
}
PARALLEL_FOR_LOOP
for(int thr=0;thr<grid->SumArraySize();thr++){
int nwork, mywork, myoff;
GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
vobj vvsum=zero;
for(int ss=myoff;ss<mywork+myoff; ss++){
vvsum = vvsum + arg._odata[ss];
}
sumarray[thr]=vvsum;
}
vobj vsum=zero; // sum across threads
for(int i=0;i<grid->SumArraySize();i++){
vsum = vsum+sumarray[i];
}
typedef typename vobj::scalar_object sobj;
sobj ssum=zero;
std::vector<sobj> buf(Nsimd);
extract(vsum,buf);
for(int i=0;i<Nsimd;i++) ssum = ssum + buf[i];
arg._grid->GlobalSum(ssum);
return ssum;
@ -77,13 +93,15 @@ namespace Grid {
template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
{
typedef typename vobj::scalar_object sobj;
GridBase *grid = Data._grid;
assert(grid!=NULL);
// FIXME
std::cout<<"WARNING ! SliceSum is unthreaded "<<grid->SumArraySize()<<" threads "<<std::endl;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
@ -94,10 +112,8 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
sobj szero; szero=zero;
std::vector<vobj,alignedAllocator<vobj> > lvSum(rd); // will locally sum vectors first
std::vector<sobj> lsSum(ld,szero); // sum across these down to scalars
std::vector<sobj> lsSum(ld,zero); // sum across these down to scalars
std::vector<sobj> extracted(Nsimd); // splitting the SIMD
result.resize(fd); // And then global sum to return the same vector to every node for IO to file
@ -108,6 +124,7 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
std::vector<int> coor(Nd);
// sum over reduced dimension planes, breaking out orthog dir
for(int ss=0;ss<grid->oSites();ss++){
GridBase::CoorFromIndex(coor,ss,grid->_rdimensions);
int r = coor[orthogdim];

View File

@ -15,7 +15,7 @@ namespace Grid {
-> Lattice<decltype(trace(lhs._odata[0]))>
{
Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = trace(lhs._odata[ss]);
}
@ -30,7 +30,7 @@ namespace Grid {
-> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
{
Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
}

View File

@ -23,7 +23,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
half.checkerboard = cb;
int ssh=0;
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<full._grid->oSites();ss++){
std::vector<int> coor;
int cbos;
@ -41,7 +41,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
int cb = half.checkerboard;
int ssh=0;
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<full._grid->oSites();ss++){
std::vector<int> coor;
int cbos;

View File

@ -13,7 +13,7 @@ namespace Grid {
template<class vobj>
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
Lattice<vobj> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = transpose(lhs._odata[ss]);
}
@ -28,7 +28,7 @@ namespace Grid {
-> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
{
Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
#pragma omp parallel for
PARALLEL_FOR_LOOP
for(int ss=0;ss<lhs._grid->oSites();ss++){
ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
}

View File

@ -0,0 +1,58 @@
#ifndef GRID_LATTICE_WHERE_H
#define GRID_LATTICE_WHERE_H
namespace Grid {
// Must implement the predicate gating the
// Must be able to reduce the predicate down to a single vInteger per site.
// Must be able to require the type be iScalar x iScalar x ....
// give a GetVtype method in iScalar
// and blow away the tensor structures.
//
template<class vobj,class iobj>
inline void where(Lattice<vobj> &ret,const Lattice<iobj> &predicate,Lattice<vobj> &iftrue,Lattice<vobj> &iffalse)
{
conformable(iftrue,iffalse);
conformable(iftrue,predicate);
conformable(iftrue,ret);
GridBase *grid=iftrue._grid;
typedef typename vobj::scalar_object scalar_object;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef typename iobj::vector_type mask_type;
const int Nsimd = grid->Nsimd();
const int words = sizeof(vobj)/sizeof(vector_type);
std::vector<Integer> mask(Nsimd);
std::vector<scalar_object> truevals (Nsimd);
std::vector<scalar_object> falsevals(Nsimd);
PARALLEL_FOR_LOOP
for(int ss=0;ss<iftrue._grid->oSites(); ss++){
extract(iftrue._odata[ss] ,truevals);
extract(iffalse._odata[ss] ,falsevals);
extract<vInteger,Integer>(TensorRemove(predicate._odata[ss]),mask);
for(int s=0;s<Nsimd;s++){
if (mask[s]) falsevals[s]=truevals[s];
}
merge(ret._odata[ss],falsevals);
}
}
template<class vobj,class iobj>
inline Lattice<vobj> where(const Lattice<iobj> &predicate,Lattice<vobj> &iftrue,Lattice<vobj> &iffalse)
{
conformable(iftrue,iffalse);
conformable(iftrue,predicate);
Lattice<vobj> ret(iftrue._grid);
where(ret,predicate,iftrue,iffalse);
return ret;
}
}
#endif