1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00

Merge pull request #384 from jdmaia/hip_launchbounds

Changing thread block order and adding launch_bounds
This commit is contained in:
Peter Boyle 2022-02-14 11:08:28 -05:00 committed by GitHub
commit 48772f0976
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -342,7 +342,7 @@ extern hipStream_t copyStream;
/*These routines define mapping from thread grid to loop & vector lane indexing */ /*These routines define mapping from thread grid to loop & vector lane indexing */
accelerator_inline int acceleratorSIMTlane(int Nsimd) { accelerator_inline int acceleratorSIMTlane(int Nsimd) {
#ifdef GRID_SIMT #ifdef GRID_SIMT
return hipThreadIdx_z; return hipThreadIdx_x;
#else #else
return 0; return 0;
#endif #endif
@ -356,19 +356,41 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
{ __VA_ARGS__;} \ { __VA_ARGS__;} \
}; \ }; \
int nt=acceleratorThreads(); \ int nt=acceleratorThreads(); \
dim3 hip_threads(nt,1,nsimd); \ dim3 hip_threads(nsimd, nt, 1); \
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \ dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \ if(hip_threads.x * hip_threads.y * hip_threads.z <= 64){ \
0,0, \ hipLaunchKernelGGL(LambdaApply64,hip_blocks,hip_threads, \
num1,num2,nsimd,lambda); \ 0,0, \
num1,num2,nsimd, lambda); \
} else { \
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
0,0, \
num1,num2,nsimd, lambda); \
} \
} }
template<typename lambda> __global__ template<typename lambda> __global__
__launch_bounds__(64,1)
void LambdaApply64(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
{
// Following the same scheme as CUDA for now
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
uint64_t z = threadIdx.x;
if ( (x < numx) && (y<numy) && (z<numz) ) {
Lambda(x,y,z);
}
}
template<typename lambda> __global__
__launch_bounds__(1024,1)
void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda) void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
{ {
uint64_t x = hipThreadIdx_x + hipBlockDim_x*hipBlockIdx_x; // Following the same scheme as CUDA for now
uint64_t y = hipThreadIdx_y + hipBlockDim_y*hipBlockIdx_y; uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
uint64_t z = hipThreadIdx_z ;//+ hipBlockDim_z*hipBlockIdx_z; uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
uint64_t z = threadIdx.x;
if ( (x < numx) && (y<numy) && (z<numz) ) { if ( (x < numx) && (y<numy) && (z<numz) ) {
Lambda(x,y,z); Lambda(x,y,z);
} }