mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-19 08:17:05 +01:00
Imported coalescedReadGeneralPermute GPU implementation from Christoph
Fixed bug in padded staple code where extract was being called on the result before the GPU view was closed Fixed compile issue with pointer cast in padded staple code Added timing summaries of padded staple code and timing breakdown of staple implementation to Test_padded_cell_staple
This commit is contained in:
@ -93,7 +93,7 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__
|
||||
{
|
||||
vstream(vec, extracted);
|
||||
}
|
||||
#else
|
||||
#else //==GRID_SIMT
|
||||
|
||||
|
||||
//#ifndef GRID_SYCL
|
||||
@ -176,6 +176,14 @@ typename vobj::scalar_object coalescedReadPermute(const vobj & __restrict__ vec,
|
||||
return extractLane(plane,vec);
|
||||
}
|
||||
template<class vobj> accelerator_inline
|
||||
typename vobj::scalar_object coalescedReadGeneralPermute(const vobj & __restrict__ vec,int perm_mask,int nd,int lane=acceleratorSIMTlane(vobj::Nsimd()))
|
||||
{
|
||||
int plane = lane;
|
||||
for (int d=0;d<nd;d++)
|
||||
plane = (perm_mask & (0x1 << d)) ? plane ^ (vobj::Nsimd() >> (d + 1)) : plane;
|
||||
return extractLane(plane,vec);
|
||||
}
|
||||
template<class vobj> accelerator_inline
|
||||
void coalescedWrite(vobj & __restrict__ vec,const typename vobj::scalar_object & __restrict__ extracted,int lane=acceleratorSIMTlane(vobj::Nsimd()))
|
||||
{
|
||||
insertLane(lane,vec,extracted);
|
||||
|
Reference in New Issue
Block a user