mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-10 06:00:45 +01:00
144 lines
3.8 KiB
C++
144 lines
3.8 KiB
C++
#include <Grid/Grid.h>
|
|
|
|
template<class vobj> inline void sliceSumCPU(const Grid::Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
|
{
|
|
using namespace Grid;
|
|
///////////////////////////////////////////////////////
|
|
// FIXME precision promoted summation
|
|
// may be important for correlation functions
|
|
// But easily avoided by using double precision fields
|
|
///////////////////////////////////////////////////////
|
|
typedef typename vobj::scalar_object sobj;
|
|
typedef typename vobj::scalar_object::scalar_type scalar_type;
|
|
GridBase *grid = Data.Grid();
|
|
assert(grid!=NULL);
|
|
|
|
const int Nd = grid->_ndimension;
|
|
const int Nsimd = grid->Nsimd();
|
|
|
|
assert(orthogdim >= 0);
|
|
assert(orthogdim < Nd);
|
|
|
|
int fd=grid->_fdimensions[orthogdim];
|
|
int ld=grid->_ldimensions[orthogdim];
|
|
int rd=grid->_rdimensions[orthogdim];
|
|
|
|
Vector<vobj> lvSum(rd); // will locally sum vectors first
|
|
Vector<sobj> lsSum(ld,Zero()); // sum across these down to scalars
|
|
ExtractBuffer<sobj> extracted(Nsimd); // splitting the SIMD
|
|
|
|
result.resize(fd); // And then global sum to return the same vector to every node
|
|
for(int r=0;r<rd;r++){
|
|
lvSum[r]=Zero();
|
|
}
|
|
|
|
int e1= grid->_slice_nblock[orthogdim];
|
|
int e2= grid->_slice_block [orthogdim];
|
|
int stride=grid->_slice_stride[orthogdim];
|
|
int ostride=grid->_ostride[orthogdim];
|
|
|
|
//Reduce Data down to lvSum
|
|
sliceSumReduction_cpu(Data,lvSum,rd, e1,e2,stride,ostride,Nsimd);
|
|
|
|
// Sum across simd lanes in the plane, breaking out orthog dir.
|
|
Coordinate icoor(Nd);
|
|
|
|
for(int rt=0;rt<rd;rt++){
|
|
|
|
extract(lvSum[rt],extracted);
|
|
|
|
for(int idx=0;idx<Nsimd;idx++){
|
|
|
|
grid->iCoorFromIindex(icoor,idx);
|
|
|
|
int ldx =rt+icoor[orthogdim]*rd;
|
|
|
|
lsSum[ldx]=lsSum[ldx]+extracted[idx];
|
|
|
|
}
|
|
}
|
|
|
|
// sum over nodes.
|
|
for(int t=0;t<fd;t++){
|
|
int pt = t/ld; // processor plane
|
|
int lt = t%ld;
|
|
if ( pt == grid->_processor_coor[orthogdim] ) {
|
|
result[t]=lsSum[lt];
|
|
} else {
|
|
result[t]=Zero();
|
|
}
|
|
|
|
}
|
|
scalar_type * ptr = (scalar_type *) &result[0];
|
|
int words = fd*sizeof(sobj)/sizeof(scalar_type);
|
|
grid->GlobalSumVector(ptr, words);
|
|
}
|
|
|
|
|
|
int main (int argc, char ** argv) {
|
|
|
|
using namespace Grid;
|
|
|
|
Grid_init(&argc,&argv);
|
|
|
|
|
|
Coordinate latt_size({64,64,64,16});
|
|
auto simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
|
auto mpi_layout = GridDefaultMpi();
|
|
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
|
|
|
|
std::vector<int> seeds({1, 2, 3, 4});
|
|
|
|
GridParallelRNG pRNG(&Grid);
|
|
pRNG.SeedFixedIntegers(seeds);
|
|
|
|
LatticeComplexD test_data(&Grid);
|
|
gaussian(pRNG,test_data);
|
|
|
|
std::vector<TComplexD> reduction_reference;
|
|
std::vector<TComplexD> reduction_result;
|
|
|
|
//warmup
|
|
for (int sweeps = 0; sweeps < 5; sweeps++) {
|
|
reduction_result = sliceSum(test_data,0);
|
|
}
|
|
|
|
int trace_id = traceStart("sliceSum benchmark");
|
|
for (int i = 0; i < Nd; i++) {
|
|
|
|
RealD t=-usecond();
|
|
|
|
tracePush("sliceSum");
|
|
sliceSumCPU(test_data,reduction_reference,i);
|
|
tracePop("sliceSum");
|
|
|
|
t+=usecond();
|
|
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
|
|
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
|
|
|
|
|
|
RealD tgpu=-usecond();
|
|
|
|
tracePush("sliceSumGpu");
|
|
reduction_result = sliceSum(test_data,i);
|
|
tracePop("sliceSumGpu");
|
|
|
|
tgpu+=usecond();
|
|
|
|
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
|
|
|
|
|
|
for(int t=0;t<reduction_reference.size();t++) {
|
|
|
|
auto diff = reduction_reference[t]-reduction_result[t];
|
|
assert(abs(TensorRemove(diff)) < 1e-8 );
|
|
|
|
}
|
|
|
|
|
|
}
|
|
traceStop(trace_id);
|
|
|
|
Grid_finalize();
|
|
return 0;
|
|
} |