1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-09 21:50:45 +01:00

Test of using a lane variable instead of repeated reference to threadIdx.y

This commit is contained in:
Peter Boyle 2019-06-08 13:46:26 +01:00
parent 18cbfecf02
commit 29a244e423

View File

@ -31,18 +31,28 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
NAMESPACE_BEGIN(Grid); NAMESPACE_BEGIN(Grid);
//accelerator_inline void SIMTsynchronise(void)
accelerator_inline void synchronise(void)
{
#ifdef __CUDA_ARCH__
__syncthreads();
#endif
return;
}
#ifndef __CUDA_ARCH__ #ifndef __CUDA_ARCH__
////////////////////////////////////////// //////////////////////////////////////////
// Trivial mapping of vectors on host // Trivial mapping of vectors on host
////////////////////////////////////////// //////////////////////////////////////////
accelerator_inline int SIMTlane(int Nsimd) { return 0; } // CUDA specific
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
vobj coalescedRead(const vobj & __restrict__ vec) vobj coalescedRead(const vobj & __restrict__ vec,int lane=0)
{ {
return vec; return vec;
} }
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
vobj coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm) vobj coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm,int lane=0)
{ {
if ( doperm ) { if ( doperm ) {
vobj ret; vobj ret;
@ -53,37 +63,31 @@ vobj coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm)
} }
} }
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const vobj & __restrict__ extracted) void coalescedWrite(vobj & __restrict__ vec,const vobj & __restrict__ extracted,int lane=0)
{ {
vstream(vec, extracted); vstream(vec, extracted);
} }
#else #else
accelerator_inline int SIMTlane(int Nsimd){ return threadIdx.x % Nsimd; } // CUDA specific accelerator_inline int SIMTlane(int Nsimd) { return threadIdx.y; } // CUDA specific
////////////////////////////////////////// //////////////////////////////////////////
// Extract and insert slices on the GPU // Extract and insert slices on the GPU
////////////////////////////////////////// //////////////////////////////////////////
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
typename vobj::scalar_object coalescedRead(const vobj & __restrict__ vec) typename vobj::scalar_object coalescedRead(const vobj & __restrict__ vec,int lane=SIMTlane(vobj::Nsimd()))
{ {
const int Nsimd = vobj::Nsimd();
int lane = SIMTlane(Nsimd);
return extractLane(lane,vec); return extractLane(lane,vec);
} }
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
typename vobj::scalar_object coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm) typename vobj::scalar_object coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm,int lane=SIMTlane(vobj::Nsimd()))
{ {
const int Nsimd = vobj::Nsimd(); int mask = vobj::Nsimd() >> (ptype + 1);
int lane = SIMTlane(Nsimd);
int mask = Nsimd >> (ptype + 1);
int plane= doperm ? lane ^ mask : lane; int plane= doperm ? lane ^ mask : lane;
return extractLane(plane,vec); return extractLane(plane,vec);
} }
template<class vobj> accelerator_inline template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const typename vobj::scalar_object & __restrict__ extracted) void coalescedWrite(vobj & __restrict__ vec,const typename vobj::scalar_object & __restrict__ extracted,int lane=SIMTlane(vobj::Nsimd()))
{ {
const int Nsimd = vobj::Nsimd();
int lane = SIMTlane(Nsimd);
insertLane(lane,vec,extracted); insertLane(lane,vec,extracted);
} }
#endif #endif