1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-09 23:45:36 +00:00

Compare commits

...

34 Commits

Author SHA1 Message Date
D. A. Clarke
927f8b800e
Merge b02d022993 into ee1b8bbdbd 2024-02-28 14:08:55 -05:00
Peter Boyle
ee1b8bbdbd
Merge pull request #454 from edbennett/adjoint-broke
fix HMC for non-fundamental representations
2024-02-28 14:05:27 -05:00
Peter Boyle
3f1636637d
Merge pull request #453 from dbollweg/feature/sliceSum_gpu
Feature/slice sum gpu
2024-02-28 14:04:43 -05:00
Peter Boyle
2e570f5300
Merge pull request #457 from lehner/feature/gpt
Import GPT-related updates
2024-02-28 13:59:04 -05:00
Christoph Lehner
9f89486df5 remove unnecessary code path 2024-02-28 19:56:23 +01:00
Christoph Lehner
22b43b86cb Make GPT test suite work with SYCL 2024-02-28 12:57:17 +01:00
dbollweg
3c9012676a CUDA cub refuses to reduce vSpinColourMatrix, breaking up into smaller parts like already done for HIP case. 2024-02-27 12:41:45 -05:00
Dennis Bollweg
b507fe209c Added SpinColourMatrix case to sliceSum Test 2024-02-27 11:28:32 -05:00
Dennis Bollweg
6cd2d8fcd5 Replace cuda/hip memcpy with Grid functions 2024-02-26 09:55:07 -05:00
dbollweg
0a816b5509 Merge branch 'feature/sliceSum_gpu' of https://github.com/dbollweg/Grid into feature/sliceSum_gpu 2024-02-22 21:43:06 -05:00
dbollweg
1c8b807c2e free malloc'd memory 2024-02-22 21:42:44 -05:00
Christoph Lehner
66391f84f2 Merge branch 'feature/gpt' of ../Grid into develop 2024-02-21 19:05:00 +01:00
Ed Bennett
97f7a9ecb3 fix HMC for non-fundamental representations 2024-02-21 08:27:55 +00:00
Dennis Bollweg
15878f7613 sliceSumReduction_cub_large now also faster than CPU on Frontier 2024-02-16 13:55:21 -05:00
dbollweg
e0d5e3c6c7
Merge branch 'paboyle:develop' into feature/sliceSum_gpu 2024-02-16 13:16:37 -05:00
dbollweg
6f3455900e Adding sliceSumReduction_cub_small/large since hipcub cannot deal with arb. large vobjs 2024-02-16 13:15:02 -05:00
dbollweg
b5659d106e more test cases 2024-02-09 13:37:14 -05:00
dbollweg
4b43307402 Undo include path changes for level zero api header 2024-02-09 13:07:56 -05:00
dbollweg
09af8c25a2
Merge branch 'paboyle:develop' into feature/sliceSum_gpu 2024-02-09 13:02:59 -05:00
dbollweg
9514035b87 refactor slicesum: slicesum uses GPU version by default now 2024-02-09 13:02:28 -05:00
dbollweg
1514b4f137 slicesum_sycl passes test 2024-02-06 19:08:44 -05:00
dbollweg
ab2de131bd work towards sliceSum for sycl backend 2024-02-06 13:24:45 -05:00
Dennis Bollweg
5af8da76d7 Fix cuda compilation of Lattice_slicesum_gpu.h 2024-02-01 18:02:30 -05:00
Dennis Bollweg
b8b9dc952d Async memcpy's and cleanup 2024-02-01 17:55:35 -05:00
Dennis Bollweg
79a6ed32d8 Use accelerator_for2d and DeviceSegmentedRecude to avoid kernel launch latencies 2024-02-01 16:41:03 -05:00
dbollweg
caa5f97723 Add sliceSum gpu using cub/hipcub 2024-01-31 16:50:06 -05:00
Christoph Lehner
f2648e94b9 getHostPointer added to Lattice 2023-10-23 13:47:41 +02:00
Christoph Lehner
e6ed516052 merged 2023-10-08 09:00:37 +02:00
Christoph Lehner
e2a3dae1f2 Option for multiple simultaneous CartesianStencils 2023-10-08 08:58:44 +02:00
Christoph Lehner
452bf2e907 Accelerator basisRotate also on HIP 2023-06-20 20:36:24 +03:00
Christoph Lehner
e8c29e2fe5
Merge pull request #31 from paboyle/develop
Sync
2023-05-28 16:13:12 +02:00
Christoph Lehner
da9cbfc7cc
Suppress BuildSurfaceList verbosity in Stencil.h 2023-05-19 20:22:20 +02:00
Christoph Lehner
6b9f07c1ed
Merge pull request #30 from paboyle/develop
Merge upstream
2023-05-19 20:20:58 +02:00
Christoph Lehner
5f75735dab Add M and Mdag to WilsonTMFermion 2023-04-06 18:25:05 +02:00
12 changed files with 596 additions and 40 deletions

View File

@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
basis_v.push_back(basis[k].View(AcceleratorWrite)); basis_v.push_back(basis[k].View(AcceleratorWrite));
} }
#if ( (!defined(GRID_CUDA)) ) #if ( !(defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)) )
int max_threads = thread_max(); int max_threads = thread_max();
Vector < vobj > Bt(Nm * max_threads); Vector < vobj > Bt(Nm * max_threads);
thread_region thread_region

View File

@ -31,6 +31,7 @@ Author: Christoph Lehner <christoph@lhnr.de>
#if defined(GRID_SYCL) #if defined(GRID_SYCL)
#include <Grid/lattice/Lattice_reduction_sycl.h> #include <Grid/lattice/Lattice_reduction_sycl.h>
#endif #endif
#include <Grid/lattice/Lattice_slicesum_core.h>
NAMESPACE_BEGIN(Grid); NAMESPACE_BEGIN(Grid);
@ -448,19 +449,10 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
int e1= grid->_slice_nblock[orthogdim]; int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim]; int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim]; int stride=grid->_slice_stride[orthogdim];
int ostride=grid->_ostride[orthogdim];
// sum over reduced dimension planes, breaking out orthog dir
// Parallel over orthog direction //Reduce Data down to lvSum
autoView( Data_v, Data, CpuRead); sliceSumReduction(Data,lvSum,rd, e1,e2,stride,ostride,Nsimd);
thread_for( r,rd, {
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
lvSum[r]=lvSum[r]+Data_v[ss];
}
}
});
// Sum across simd lanes in the plane, breaking out orthog dir. // Sum across simd lanes in the plane, breaking out orthog dir.
Coordinate icoor(Nd); Coordinate icoor(Nd);
@ -504,6 +496,7 @@ sliceSum(const Lattice<vobj> &Data,int orthogdim)
return result; return result;
} }
template<class vobj> template<class vobj>
static void sliceInnerProductVector( std::vector<ComplexD> & result, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int orthogdim) static void sliceInnerProductVector( std::vector<ComplexD> & result, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int orthogdim)
{ {

View File

@ -0,0 +1,213 @@
#pragma once
#include <type_traits>
#if defined(GRID_CUDA)
#include <cub/cub.cuh>
#define gpucub cub
#define gpuError_t cudaError_t
#define gpuSuccess cudaSuccess
#elif defined(GRID_HIP)
#include <hipcub/hipcub.hpp>
#define gpucub hipcub
#define gpuError_t hipError_t
#define gpuSuccess hipSuccess
#endif
NAMESPACE_BEGIN(Grid);
#if defined(GRID_CUDA) || defined(GRID_HIP)
template<class vobj> inline void sliceSumReduction_cub_small(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) {
size_t subvol_size = e1*e2;
commVector<vobj> reduction_buffer(rd*subvol_size);
auto rb_p = &reduction_buffer[0];
vobj zero_init;
zeroit(zero_init);
void *temp_storage_array = NULL;
size_t temp_storage_bytes = 0;
vobj *d_out;
int* d_offsets;
std::vector<int> offsets(rd+1,0);
for (int i = 0; i < offsets.size(); i++) {
offsets[i] = i*subvol_size;
}
//Allocate memory for output and offset arrays on device
d_out = static_cast<vobj*>(acceleratorAllocDevice(rd*sizeof(vobj)));
d_offsets = static_cast<int*>(acceleratorAllocDevice((rd+1)*sizeof(int)));
//copy offsets to device
acceleratorCopyToDeviceAsync(&offsets[0],d_offsets,sizeof(int)*(rd+1),computeStream);
gpuError_t gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p,d_out, rd, d_offsets, d_offsets+1, ::gpucub::Sum(), zero_init, computeStream);
if (gpuErr!=gpuSuccess) {
std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce (setup)! Error: " << gpuErr <<std::endl;
exit(EXIT_FAILURE);
}
//allocate memory for temp_storage_array
temp_storage_array = acceleratorAllocDevice(temp_storage_bytes);
//prepare buffer for reduction
//use non-blocking accelerator_for to avoid syncs (ok because we submit to same computeStream)
//use 2d accelerator_for to avoid launch latencies found when serially looping over rd
accelerator_for2dNB( s,subvol_size, r,rd, Nsimd,{
int n = s / e2;
int b = s % e2;
int so=r*ostride; // base offset for start of plane
int ss= so+n*stride+b;
coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data[ss]));
});
//issue segmented reductions in computeStream
gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p, d_out, rd, d_offsets, d_offsets+1,::gpucub::Sum(), zero_init, computeStream);
if (gpuErr!=gpuSuccess) {
std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce! Error: " << gpuErr <<std::endl;
exit(EXIT_FAILURE);
}
acceleratorCopyFromDeviceAsync(d_out,&lvSum[0],rd*sizeof(vobj),computeStream);
//sync after copy
accelerator_barrier();
acceleratorFreeDevice(temp_storage_array);
acceleratorFreeDevice(d_out);
acceleratorFreeDevice(d_offsets);
}
template<class vobj> inline void sliceSumReduction_cub_large(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) {
typedef typename vobj::vector_type vector;
const int words = sizeof(vobj)/sizeof(vector);
const int osites = rd*e1*e2;
commVector<vector>buffer(osites);
vector *dat = (vector *)Data;
vector *buf = &buffer[0];
Vector<vector> lvSum_small(rd);
vector *lvSum_ptr = (vector *)&lvSum[0];
for (int w = 0; w < words; w++) {
accelerator_for(ss,osites,1,{
buf[ss] = dat[ss*words+w];
});
sliceSumReduction_cub_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd);
for (int r = 0; r < rd; r++) {
lvSum_ptr[w+words*r]=lvSum_small[r];
}
}
}
template<class vobj> inline void sliceSumReduction_cub(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd)
{
autoView(Data_v, Data, AcceleratorRead); //hipcub/cub cannot deal with large vobjs so we split into small/large case.
if constexpr (sizeof(vobj) <= 256) {
sliceSumReduction_cub_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
}
else {
sliceSumReduction_cub_large(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
}
}
#endif
#if defined(GRID_SYCL)
template<class vobj> inline void sliceSumReduction_sycl(const Lattice<vobj> &Data, Vector <vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
typedef typename vobj::scalar_object sobj;
size_t subvol_size = e1*e2;
vobj *mysum = (vobj *) malloc_shared(sizeof(vobj),*theGridAccelerator);
vobj vobj_zero;
zeroit(vobj_zero);
commVector<vobj> reduction_buffer(rd*subvol_size);
auto rb_p = &reduction_buffer[0];
autoView(Data_v, Data, AcceleratorRead);
//prepare reduction buffer
accelerator_for2d( s,subvol_size, r,rd, (size_t)Nsimd,{
int n = s / e2;
int b = s % e2;
int so=r*ostride; // base offset for start of plane
int ss= so+n*stride+b;
coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data_v[ss]));
});
for (int r = 0; r < rd; r++) {
mysum[0] = vobj_zero; //dirty hack: cannot pass vobj_zero as identity to sycl::reduction as its not device_copyable
theGridAccelerator->submit([&](cl::sycl::handler &cgh) {
auto Reduction = cl::sycl::reduction(mysum,std::plus<>());
cgh.parallel_for(cl::sycl::range<1>{subvol_size},
Reduction,
[=](cl::sycl::id<1> item, auto &sum) {
auto s = item[0];
sum += rb_p[r*subvol_size+s];
});
});
theGridAccelerator->wait();
lvSum[r] = mysum[0];
}
free(mysum,*theGridAccelerator);
}
#endif
template<class vobj> inline void sliceSumReduction_cpu(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
// sum over reduced dimension planes, breaking out orthog dir
// Parallel over orthog direction
autoView( Data_v, Data, CpuRead);
thread_for( r,rd, {
int so=r*ostride; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
lvSum[r]=lvSum[r]+Data_v[ss];
}
}
});
}
template<class vobj> inline void sliceSumReduction(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
#if defined(GRID_CUDA) || defined(GRID_HIP)
sliceSumReduction_cub(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#elif defined(GRID_SYCL)
sliceSumReduction_sycl(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#else
sliceSumReduction_cpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#endif
}
NAMESPACE_END(Grid);

View File

@ -469,15 +469,13 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
Coordinate fine_rdimensions = fine->_rdimensions; Coordinate fine_rdimensions = fine->_rdimensions;
Coordinate coarse_rdimensions = coarse->_rdimensions; Coordinate coarse_rdimensions = coarse->_rdimensions;
vobj zz = Zero();
accelerator_for(sc,coarse->oSites(),1,{ accelerator_for(sc,coarse->oSites(),1,{
// One thread per sub block // One thread per sub block
Coordinate coor_c(_ndimension); Coordinate coor_c(_ndimension);
Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions); // Block coordinate Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions); // Block coordinate
vobj cd = zz; vobj cd = Zero();
for(int sb=0;sb<blockVol;sb++){ for(int sb=0;sb<blockVol;sb++){

View File

@ -45,6 +45,7 @@ public:
}; };
// Host only // Host only
GridBase * getGrid(void) const { return _grid; }; GridBase * getGrid(void) const { return _grid; };
vobj* getHostPointer(void) const { return _odata; };
}; };
///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////

View File

@ -34,7 +34,7 @@ class GridTracer {
}; };
inline void tracePush(const char *name) { roctxRangePushA(name); } inline void tracePush(const char *name) { roctxRangePushA(name); }
inline void tracePop(const char *name) { roctxRangePop(); } inline void tracePop(const char *name) { roctxRangePop(); }
inline int traceStart(const char *name) { roctxRangeStart(name); } inline int traceStart(const char *name) { return roctxRangeStart(name); }
inline void traceStop(int ID) { roctxRangeStop(ID); } inline void traceStop(int ID) { roctxRangeStop(ID); }
#endif #endif

View File

@ -63,7 +63,9 @@ public:
virtual void MooeeDag(const FermionField &in, FermionField &out) ; virtual void MooeeDag(const FermionField &in, FermionField &out) ;
virtual void MooeeInv(const FermionField &in, FermionField &out) ; virtual void MooeeInv(const FermionField &in, FermionField &out) ;
virtual void MooeeInvDag(const FermionField &in, FermionField &out) ; virtual void MooeeInvDag(const FermionField &in, FermionField &out) ;
virtual void M(const FermionField &in, FermionField &out) ;
virtual void Mdag(const FermionField &in, FermionField &out) ;
private: private:
RealD mu; // TwistedMass parameter RealD mu; // TwistedMass parameter

View File

@ -93,5 +93,25 @@ void WilsonTMFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &ou
RealD b = tm /sq; RealD b = tm /sq;
axpibg5x(out,in,a,b); axpibg5x(out,in,a,b);
} }
template<class Impl>
void WilsonTMFermion<Impl>::M(const FermionField &in, FermionField &out) {
out.Checkerboard() = in.Checkerboard();
this->Dhop(in, out, DaggerNo);
FermionField tmp(out.Grid());
RealD a = 4.0+this->mass;
RealD b = this->mu;
axpibg5x(tmp,in,a,b);
axpy(out, 1.0, tmp, out);
}
template<class Impl>
void WilsonTMFermion<Impl>::Mdag(const FermionField &in, FermionField &out) {
out.Checkerboard() = in.Checkerboard();
this->Dhop(in, out, DaggerYes);
FermionField tmp(out.Grid());
RealD a = 4.0+this->mass;
RealD b = -this->mu;
axpibg5x(tmp,in,a,b);
axpy(out, 1.0, tmp, out);
}
NAMESPACE_END(Grid); NAMESPACE_END(Grid);

View File

@ -237,7 +237,7 @@ public:
for (int level = 0; level < as.size(); ++level) { for (int level = 0; level < as.size(); ++level) {
int multiplier = as.at(level).multiplier; int multiplier = as.at(level).multiplier;
ActionLevel<Field> * Level = new ActionLevel<Field>(multiplier); ActionLevel<Field, RepresentationPolicy> * Level = new ActionLevel<Field, RepresentationPolicy>(multiplier);
Level->push_back(new EmptyAction<Field>); Level->push_back(new EmptyAction<Field>);
LevelForces.push_back(*Level); LevelForces.push_back(*Level);
// does it copy by value or reference?? // does it copy by value or reference??

View File

@ -706,7 +706,7 @@ public:
} }
} }
} }
std::cout << GridLogDebug << "BuildSurfaceList size is "<<surface_list.size()<<std::endl; //std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
} }
/// Introduce a block structure and switch off comms on boundaries /// Introduce a block structure and switch off comms on boundaries
void DirichletBlock(const Coordinate &dirichlet_block) void DirichletBlock(const Coordinate &dirichlet_block)
@ -761,7 +761,8 @@ public:
int checkerboard, int checkerboard,
const std::vector<int> &directions, const std::vector<int> &directions,
const std::vector<int> &distances, const std::vector<int> &distances,
Parameters p=Parameters()) Parameters p=Parameters(),
bool preserve_shm=false)
{ {
face_table_computed=0; face_table_computed=0;
_grid = grid; _grid = grid;
@ -855,7 +856,9 @@ public:
///////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////
const int Nsimd = grid->Nsimd(); const int Nsimd = grid->Nsimd();
_grid->ShmBufferFreeAll(); // Allow for multiple stencils to exist simultaneously
if (!preserve_shm)
_grid->ShmBufferFreeAll();
int maxl=2; int maxl=2;
u_simd_send_buf.resize(maxl); u_simd_send_buf.resize(maxl);

View File

@ -225,6 +225,8 @@ inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);}; inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);} inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);} inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyHostToDevice, stream);}
inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToHost, stream);}
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);} inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
{ {
@ -287,23 +289,24 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
#define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \ #define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \
theGridAccelerator->submit([&](cl::sycl::handler &cgh) { \ theGridAccelerator->submit([&](cl::sycl::handler &cgh) { \
unsigned long nt=acceleratorThreads(); \ unsigned long nt=acceleratorThreads(); \
unsigned long unum1 = num1; \ if(nt < 8)nt=8; \
unsigned long unum2 = num2; \ unsigned long unum1 = num1; \
if(nt < 8)nt=8; \ unsigned long unum2 = num2; \
cl::sycl::range<3> local {nt,1,nsimd}; \ unsigned long unum1_divisible_by_nt = ((unum1 + nt - 1) / nt) * nt; \
cl::sycl::range<3> global{unum1,unum2,nsimd}; \ cl::sycl::range<3> local {nt,1,nsimd}; \
cgh.parallel_for( \ cl::sycl::range<3> global{unum1_divisible_by_nt,unum2,nsimd}; \
cl::sycl::nd_range<3>(global,local), \ cgh.parallel_for( \
[=] (cl::sycl::nd_item<3> item) /*mutable*/ \ cl::sycl::nd_range<3>(global,local), \
[[intel::reqd_sub_group_size(16)]] \ [=] (cl::sycl::nd_item<3> item) /*mutable*/ \
{ \ [[intel::reqd_sub_group_size(16)]] \
auto iter1 = item.get_global_id(0); \ { \
auto iter2 = item.get_global_id(1); \ auto iter1 = item.get_global_id(0); \
auto lane = item.get_global_id(2); \ auto iter2 = item.get_global_id(1); \
{ __VA_ARGS__ }; \ auto lane = item.get_global_id(2); \
}); \ { if (iter1 < unum1){ __VA_ARGS__ } }; \
}); }); \
});
#define accelerator_barrier(dummy) { theGridAccelerator->wait(); } #define accelerator_barrier(dummy) { theGridAccelerator->wait(); }
@ -442,6 +445,8 @@ inline void acceleratorFreeShared(void *ptr){ auto r=hipFree(ptr);};
inline void acceleratorFreeDevice(void *ptr){ auto r=hipFree(ptr);}; inline void acceleratorFreeDevice(void *ptr){ auto r=hipFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto r=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);} inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto r=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto r=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);} inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto r=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyHostToDevice, stream);}
inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) { auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToHost, stream);}
//inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);} //inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
//inline void acceleratorCopySynchronise(void) { } //inline void acceleratorCopySynchronise(void) { }
inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto r=hipMemset(base,value,bytes);} inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto r=hipMemset(base,value,bytes);}

321
tests/core/Test_sliceSum.cc Normal file
View File

@ -0,0 +1,321 @@
#include <Grid/Grid.h>
template<class vobj> inline void sliceSumCPU(const Grid::Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
{
using namespace Grid;
///////////////////////////////////////////////////////
// FIXME precision promoted summation
// may be important for correlation functions
// But easily avoided by using double precision fields
///////////////////////////////////////////////////////
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_object::scalar_type scalar_type;
GridBase *grid = Data.Grid();
assert(grid!=NULL);
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
assert(orthogdim >= 0);
assert(orthogdim < Nd);
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
Vector<vobj> lvSum(rd); // will locally sum vectors first
Vector<sobj> lsSum(ld,Zero()); // sum across these down to scalars
ExtractBuffer<sobj> extracted(Nsimd); // splitting the SIMD
result.resize(fd); // And then global sum to return the same vector to every node
for(int r=0;r<rd;r++){
lvSum[r]=Zero();
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
int ostride=grid->_ostride[orthogdim];
//Reduce Data down to lvSum
sliceSumReduction_cpu(Data,lvSum,rd, e1,e2,stride,ostride,Nsimd);
// Sum across simd lanes in the plane, breaking out orthog dir.
Coordinate icoor(Nd);
for(int rt=0;rt<rd;rt++){
extract(lvSum[rt],extracted);
for(int idx=0;idx<Nsimd;idx++){
grid->iCoorFromIindex(icoor,idx);
int ldx =rt+icoor[orthogdim]*rd;
lsSum[ldx]=lsSum[ldx]+extracted[idx];
}
}
// sum over nodes.
for(int t=0;t<fd;t++){
int pt = t/ld; // processor plane
int lt = t%ld;
if ( pt == grid->_processor_coor[orthogdim] ) {
result[t]=lsSum[lt];
} else {
result[t]=Zero();
}
}
scalar_type * ptr = (scalar_type *) &result[0];
int words = fd*sizeof(sobj)/sizeof(scalar_type);
grid->GlobalSumVector(ptr, words);
}
int main (int argc, char ** argv) {
using namespace Grid;
Grid_init(&argc,&argv);
Coordinate latt_size({64,64,64,16});
auto simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
std::vector<int> seeds({1, 2, 3, 4});
GridParallelRNG pRNG(&Grid);
pRNG.SeedFixedIntegers(seeds);
LatticeComplexD test_data(&Grid);
gaussian(pRNG,test_data);
std::vector<TComplexD> reduction_reference;
std::vector<TComplexD> reduction_result;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result = sliceSum(test_data,0);
}
int trace_id = traceStart("sliceSum benchmark - ComplexD");
std::cout << GridLogMessage << "Testing ComplexD" << std::endl;
std::cout << GridLogMessage << "sizeof(ComplexD) = " << sizeof(ComplexD) << std::endl;
std::cout << GridLogMessage << "sizeof(vComplexD) = " << sizeof(vComplexD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data,reduction_reference,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result = sliceSum(test_data,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference.size();t++) {
auto diff = reduction_reference[t]-reduction_result[t];
assert(abs(TensorRemove(diff)) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinVectorD test_data_cv(&Grid);
gaussian(pRNG,test_data_cv);
std::vector<SpinVectorD> reduction_reference_cv;
std::vector<SpinVectorD> reduction_result_cv;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_cv = sliceSum(test_data_cv,0);
}
trace_id = traceStart("sliceSum benchmark - SpinVectorD");
std::cout << GridLogMessage << "Testing SpinVectorD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinVectorD) = " << sizeof(SpinVectorD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinVectorD) = " << sizeof(vSpinVectorD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_cv,reduction_reference_cv,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_cv = sliceSum(test_data_cv,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_cv.size();t++) {
auto diff = reduction_reference_cv[t]-reduction_result_cv[t];
assert(abs(diff()(0)()) < 1e-8 );
assert(abs(diff()(1)()) < 1e-8 );
assert(abs(diff()(2)()) < 1e-8 );
assert(abs(diff()(3)()) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinColourVectorD test_data_scv(&Grid);
gaussian(pRNG,test_data_scv);
std::vector<SpinColourVectorD> reduction_reference_scv;
std::vector<SpinColourVectorD> reduction_result_scv;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_scv = sliceSum(test_data_scv,0);
}
trace_id = traceStart("sliceSum benchmark - SpinColourVectorD");
std::cout << GridLogMessage << "Testing SpinColourVectorD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinColourVectorD) = " << sizeof(SpinColourVectorD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinColourVectorD) = " << sizeof(vSpinColourVectorD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_scv,reduction_reference_scv,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_scv = sliceSum(test_data_scv,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_scv.size();t++) {
auto diff = reduction_reference_scv[t]-reduction_result_scv[t];
// std::cout << diff <<std::endl;
assert(abs(diff()(0)(0)) < 1e-8 );
assert(abs(diff()(0)(1)) < 1e-8 );
assert(abs(diff()(0)(2)) < 1e-8 );
assert(abs(diff()(1)(0)) < 1e-8 );
assert(abs(diff()(1)(1)) < 1e-8 );
assert(abs(diff()(1)(2)) < 1e-8 );
assert(abs(diff()(2)(0)) < 1e-8 );
assert(abs(diff()(2)(1)) < 1e-8 );
assert(abs(diff()(2)(2)) < 1e-8 );
assert(abs(diff()(3)(0)) < 1e-8 );
assert(abs(diff()(3)(1)) < 1e-8 );
assert(abs(diff()(3)(2)) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinColourMatrixD test_data_scm(&Grid);
gaussian(pRNG,test_data_scm);
std::vector<SpinColourMatrixD> reduction_reference_scm;
std::vector<SpinColourMatrixD> reduction_result_scm;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_scm = sliceSum(test_data_scm,0);
}
trace_id = traceStart("sliceSum benchmark - SpinColourMatrixD");
std::cout << GridLogMessage << "Testing SpinColourMatrixD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinColourMatrixD) = " << sizeof(SpinColourMatrixD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinColourMatrixD) = " << sizeof(vSpinColourMatrixD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_scm,reduction_reference_scm,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_scm = sliceSum(test_data_scm,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_scm.size();t++) {
auto diff = reduction_reference_scm[t]-reduction_result_scm[t];
// std::cout << diff <<std::endl;
for (int is = 0; is < Ns; is++) {
for (int js = 0; js < Ns; js++) {
for (int ic = 0; ic < Nc; ic++) {
for (int jc = 0; jc < Nc; jc++) {
assert(abs(diff()(is,js)(ic,jc)) < 1e-8);
}
}
}
}
}
}
traceStop(trace_id);
Grid_finalize();
return 0;
}