mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
Use accelerator_for2d and DeviceSegmentedRecude to avoid kernel launch latencies
This commit is contained in:
parent
caa5f97723
commit
79a6ed32d8
@ -6,6 +6,7 @@
|
||||
#define gpuMalloc cudaMalloc
|
||||
#define gpuMemcpy cudaMemcpy
|
||||
#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
|
||||
#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
|
||||
#define gpuError_t cudaError_t
|
||||
#define gpuSuccess cudaSuccess
|
||||
|
||||
@ -16,6 +17,7 @@
|
||||
#define gpuMalloc hipMalloc
|
||||
#define gpuMemcpy hipMemcpy
|
||||
#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
|
||||
#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
|
||||
#define gpuError_t hipError_t
|
||||
#define gpuSuccess hipSuccess
|
||||
|
||||
@ -49,14 +51,15 @@ template<class vobj> inline void sliceSumGpu(const Lattice<vobj> &Data,std::vect
|
||||
int ostride=grid->_ostride[orthogdim];
|
||||
Vector<vobj> lvSum(rd);
|
||||
Vector<sobj> lsSum(ld,Zero());
|
||||
commVector<vobj> reduction_buffer(e1*e2);
|
||||
commVector<vobj> reduction_buffer(rd*e1*e2);
|
||||
ExtractBuffer<sobj> extracted(Nsimd);
|
||||
|
||||
result.resize(fd);
|
||||
for(int r=0;r<rd;r++){
|
||||
lvSum[r]=Zero();
|
||||
}
|
||||
|
||||
vobj identity;
|
||||
zeroit(identity);
|
||||
|
||||
autoView( Data_v, Data, AcceleratorRead);
|
||||
auto rb_p = &reduction_buffer[0];
|
||||
@ -65,39 +68,59 @@ template<class vobj> inline void sliceSumGpu(const Lattice<vobj> &Data,std::vect
|
||||
vobj *d_out;
|
||||
size_t temp_storage_bytes = 0;
|
||||
size_t size = e1*e2;
|
||||
gpuMalloc(&d_out,rd*sizeof(vobj));
|
||||
gpuError_t gpuErr =gpucub::DeviceReduce::Sum(helperArray, temp_storage_bytes, rb_p,d_out, size, computeStream);
|
||||
std::vector<int> offsets(rd+1,0);
|
||||
for (int i = 0; i < offsets.size(); i++) {
|
||||
offsets[i] = i*size;
|
||||
}
|
||||
int* d_offsets;
|
||||
|
||||
gpuError_t gpuErr = gpuMalloc(&d_out,rd*sizeof(vobj));
|
||||
if (gpuErr != gpuSuccess) {
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during gpuMalloc(1) Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
gpuErr = gpuMalloc(&d_offsets,sizeof(int)*(rd+1));
|
||||
if (gpuErr != gpuSuccess) {
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during gpuMalloc(2) Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
gpuErr = gpuMemcpy(d_offsets,&offsets[0],sizeof(int)*(rd+1),gpuMemcpyHostToDevice);
|
||||
if (gpuErr != gpuSuccess) {
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during gpuMemcpy(1) Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
|
||||
gpuErr = gpucub::DeviceSegmentedReduce::Reduce(helperArray, temp_storage_bytes, rb_p,d_out, rd, d_offsets, d_offsets+1, ::gpucub::Sum(), identity, computeStream);
|
||||
if (gpuErr!=gpuSuccess) {
|
||||
std::cout << "Encountered error during cub::DeviceReduce::Sum(1)! Error: " << gpuErr <<std::endl;
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during cub::DeviceReduce::Sum(1)! Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
|
||||
gpuErr = gpuMalloc(&helperArray,temp_storage_bytes);
|
||||
if (gpuErr!=gpuSuccess) {
|
||||
std::cout << "Encountered error during gpuMalloc Error: " << gpuErr <<std::endl;
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during gpuMalloc Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
for (int r = 0; r < rd; r++) {
|
||||
//prepare buffer for reduction
|
||||
accelerator_forNB( s,e1*e2, grid->Nsimd(),{ //use non-blocking accelerator_for to avoid syncs (ok because we submit to same computeStream)
|
||||
|
||||
int n = s / e2;
|
||||
int b = s % e2;
|
||||
int so=r*ostride; // base offset for start of plane
|
||||
int ss= so+n*stride+b;
|
||||
|
||||
coalescedWrite(rb_p[s], coalescedRead(Data_v[ss]));
|
||||
//prepare buffer for reduction
|
||||
accelerator_for2dNB( s,e1*e2, r,rd, grid->Nsimd(),{ //use non-blocking accelerator_for to avoid syncs (ok because we submit to same computeStream)
|
||||
//use 2d accelerator_for to avoid launch latencies found when looping over rd
|
||||
int n = s / e2;
|
||||
int b = s % e2;
|
||||
int so=r*ostride; // base offset for start of plane
|
||||
int ss= so+n*stride+b;
|
||||
|
||||
});
|
||||
|
||||
//issue reductions in computeStream
|
||||
gpuErr =gpucub::DeviceReduce::Sum(helperArray, temp_storage_bytes, rb_p, &d_out[r], size, computeStream);
|
||||
if (gpuErr!=gpuSuccess) {
|
||||
std::cout << "Encountered error during cub::DeviceReduce::Sum(2)! Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
coalescedWrite(rb_p[r*e1*e2+s], coalescedRead(Data_v[ss]));
|
||||
|
||||
});
|
||||
|
||||
//issue reductions in computeStream
|
||||
gpuErr =gpucub::DeviceSegmentedReduce::Reduce(helperArray, temp_storage_bytes, rb_p, d_out, rd, d_offsets, d_offsets+1,::gpucub::Sum(), identity, computeStream);
|
||||
if (gpuErr!=gpuSuccess) {
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during cub::DeviceReduce::Sum(2)! Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
|
||||
//sync before copy
|
||||
accelerator_barrier();
|
||||
gpuMemcpy(&lvSum[0],d_out,rd*sizeof(vobj),gpuMemcpyDeviceToHost);
|
||||
|
||||
gpuErr = gpuMemcpy(&lvSum[0],d_out,rd*sizeof(vobj),gpuMemcpyDeviceToHost);
|
||||
if (gpuErr!=gpuSuccess) {
|
||||
std::cout << "Lattice_slicesum_gpu.h: Encountered error during gpuMemcpy(2) Error: " << gpuErr <<std::endl;
|
||||
}
|
||||
// Sum across simd lanes in the plane, breaking out orthog dir.
|
||||
Coordinate icoor(Nd);
|
||||
|
||||
|
@ -34,7 +34,7 @@ class GridTracer {
|
||||
};
|
||||
inline void tracePush(const char *name) { roctxRangePushA(name); }
|
||||
inline void tracePop(const char *name) { roctxRangePop(); }
|
||||
inline int traceStart(const char *name) { roctxRangeStart(name); }
|
||||
inline int traceStart(const char *name) { return roctxRangeStart(name); }
|
||||
inline void traceStop(int ID) { roctxRangeStop(ID); }
|
||||
#endif
|
||||
|
||||
|
@ -44,8 +44,9 @@ int main (int argc, char ** argv) {
|
||||
std::cout <<" sliceSumGpu took "<<tgpu<<" usecs"<<std::endl;
|
||||
|
||||
for(int t=0;t<reduction_reference.size();t++){
|
||||
|
||||
auto diff = reduction_reference[t]-reduction_result[t];
|
||||
// std::cout << "Difference = " << diff <<std::endl;
|
||||
|
||||
assert(abs(TensorRemove(diff)) < 1e-8 );
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user