mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-18 07:47:06 +01:00
fixed conflicts after merging pabyle develop
This commit is contained in:
@ -1,14 +1,23 @@
|
||||
#include <Grid/GridCore.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
int world_rank; // Use to control world rank for print guarding
|
||||
int acceleratorAbortOnGpuError=1;
|
||||
uint32_t accelerator_threads=2;
|
||||
uint32_t acceleratorThreads(void) {return accelerator_threads;};
|
||||
void acceleratorThreads(uint32_t t) {accelerator_threads = t;};
|
||||
|
||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_OMPI "OMPI_COMM_WORLD_RANK"
|
||||
#define ENV_LOCAL_RANK_SLURM "SLURM_LOCALID"
|
||||
#define ENV_RANK_SLURM "SLURM_PROCID"
|
||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
||||
|
||||
#ifdef GRID_CUDA
|
||||
cudaDeviceProp *gpu_props;
|
||||
cudaStream_t copyStream;
|
||||
cudaStream_t computeStream;
|
||||
void acceleratorInit(void)
|
||||
{
|
||||
int nDevices = 1;
|
||||
@ -16,13 +25,8 @@ void acceleratorInit(void)
|
||||
gpu_props = new cudaDeviceProp[nDevices];
|
||||
|
||||
char * localRankStr = NULL;
|
||||
int rank = 0, world_rank=0;
|
||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_OMPI "OMPI_COMM_WORLD_RANK"
|
||||
#define ENV_LOCAL_RANK_SLURM "SLURM_LOCALID"
|
||||
#define ENV_RANK_SLURM "SLURM_PROCID"
|
||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
||||
int rank = 0;
|
||||
world_rank=0;
|
||||
if ((localRankStr = getenv(ENV_RANK_OMPI )) != NULL) { world_rank = atoi(localRankStr);}
|
||||
if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
|
||||
if ((localRankStr = getenv(ENV_RANK_SLURM )) != NULL) { world_rank = atoi(localRankStr);}
|
||||
@ -97,6 +101,7 @@ void acceleratorInit(void)
|
||||
|
||||
cudaSetDevice(device);
|
||||
cudaStreamCreate(©Stream);
|
||||
cudaStreamCreate(&computeStream);
|
||||
const int len=64;
|
||||
char busid[len];
|
||||
if( rank == world_rank ) {
|
||||
@ -111,6 +116,7 @@ void acceleratorInit(void)
|
||||
#ifdef GRID_HIP
|
||||
hipDeviceProp_t *gpu_props;
|
||||
hipStream_t copyStream;
|
||||
hipStream_t computeStream;
|
||||
void acceleratorInit(void)
|
||||
{
|
||||
int nDevices = 1;
|
||||
@ -118,11 +124,8 @@ void acceleratorInit(void)
|
||||
gpu_props = new hipDeviceProp_t[nDevices];
|
||||
|
||||
char * localRankStr = NULL;
|
||||
int rank = 0, world_rank=0;
|
||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_OMPI "OMPI_COMM_WORLD_RANK"
|
||||
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
||||
int rank = 0;
|
||||
world_rank=0;
|
||||
// We extract the local rank initialization using an environment variable
|
||||
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL)
|
||||
{
|
||||
@ -134,8 +137,10 @@ void acceleratorInit(void)
|
||||
}
|
||||
if ((localRankStr = getenv(ENV_RANK_OMPI )) != NULL) { world_rank = atoi(localRankStr);}
|
||||
if ((localRankStr = getenv(ENV_RANK_MVAPICH)) != NULL) { world_rank = atoi(localRankStr);}
|
||||
if ((localRankStr = getenv(ENV_RANK_SLURM )) != NULL) { world_rank = atoi(localRankStr);}
|
||||
|
||||
printf("world_rank %d has %d devices\n",world_rank,nDevices);
|
||||
if ( world_rank == 0 )
|
||||
printf("world_rank %d has %d devices\n",world_rank,nDevices);
|
||||
size_t totalDeviceMem=0;
|
||||
for (int i = 0; i < nDevices; i++) {
|
||||
|
||||
@ -181,6 +186,7 @@ void acceleratorInit(void)
|
||||
#endif
|
||||
hipSetDevice(device);
|
||||
hipStreamCreate(©Stream);
|
||||
hipStreamCreate(&computeStream);
|
||||
const int len=64;
|
||||
char busid[len];
|
||||
if( rank == world_rank ) {
|
||||
@ -210,11 +216,9 @@ void acceleratorInit(void)
|
||||
#endif
|
||||
|
||||
char * localRankStr = NULL;
|
||||
int rank = 0, world_rank=0;
|
||||
#define ENV_LOCAL_RANK_OMPI "OMPI_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_LOCAL_RANK_MVAPICH "MV2_COMM_WORLD_LOCAL_RANK"
|
||||
#define ENV_RANK_OMPI "OMPI_COMM_WORLD_RANK"
|
||||
#define ENV_RANK_MVAPICH "MV2_COMM_WORLD_RANK"
|
||||
int rank = 0;
|
||||
world_rank=0;
|
||||
|
||||
// We extract the local rank initialization using an environment variable
|
||||
if ((localRankStr = getenv(ENV_LOCAL_RANK_OMPI)) != NULL)
|
||||
{
|
||||
|
@ -110,6 +110,7 @@ void acceleratorInit(void);
|
||||
|
||||
extern int acceleratorAbortOnGpuError;
|
||||
extern cudaStream_t copyStream;
|
||||
extern cudaStream_t computeStream;
|
||||
|
||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
#ifdef GRID_SIMT
|
||||
@ -137,7 +138,7 @@ inline void cuda_mem(void)
|
||||
}; \
|
||||
dim3 cu_threads(nsimd,acceleratorThreads(),1); \
|
||||
dim3 cu_blocks ((num1+nt-1)/nt,num2,1); \
|
||||
LambdaApply<<<cu_blocks,cu_threads>>>(num1,num2,nsimd,lambda); \
|
||||
LambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda); \
|
||||
}
|
||||
|
||||
#define accelerator_for6dNB(iter1, num1, \
|
||||
@ -156,7 +157,7 @@ inline void cuda_mem(void)
|
||||
}; \
|
||||
dim3 cu_blocks (num1,num2,num3); \
|
||||
dim3 cu_threads(num4,num5,num6); \
|
||||
Lambda6Apply<<<cu_blocks,cu_threads>>>(num1,num2,num3,num4,num5,num6,lambda); \
|
||||
Lambda6Apply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,num3,num4,num5,num6,lambda); \
|
||||
}
|
||||
|
||||
template<typename lambda> __global__
|
||||
@ -192,7 +193,7 @@ void Lambda6Apply(uint64_t num1, uint64_t num2, uint64_t num3,
|
||||
|
||||
#define accelerator_barrier(dummy) \
|
||||
{ \
|
||||
cudaDeviceSynchronize(); \
|
||||
cudaStreamSynchronize(computeStream); \
|
||||
cudaError err = cudaGetLastError(); \
|
||||
if ( cudaSuccess != err ) { \
|
||||
printf("accelerator_barrier(): Cuda error %s \n", \
|
||||
@ -250,17 +251,23 @@ inline int acceleratorIsCommunicable(void *ptr)
|
||||
//////////////////////////////////////////////
|
||||
// SyCL acceleration
|
||||
//////////////////////////////////////////////
|
||||
#ifdef GRID_SYCL
|
||||
NAMESPACE_END(Grid);
|
||||
#include <CL/sycl.hpp>
|
||||
#include <CL/sycl/usm.hpp>
|
||||
|
||||
#ifdef GRID_SYCL
|
||||
#define GRID_SYCL_LEVEL_ZERO_IPC
|
||||
|
||||
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||
NAMESPACE_END(Grid);
|
||||
#if 0
|
||||
#include <CL/sycl.hpp>
|
||||
#include <CL/sycl/usm.hpp>
|
||||
#include <level_zero/ze_api.h>
|
||||
#include <CL/sycl/backend/level_zero.hpp>
|
||||
#else
|
||||
#include <sycl/CL/sycl.hpp>
|
||||
#include <sycl/usm.hpp>
|
||||
#include <level_zero/ze_api.h>
|
||||
#include <sycl/ext/oneapi/backend/level_zero.hpp>
|
||||
#endif
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
extern cl::sycl::queue *theGridAccelerator;
|
||||
@ -342,6 +349,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
#define accelerator_inline __host__ __device__ inline
|
||||
|
||||
extern hipStream_t copyStream;
|
||||
extern hipStream_t computeStream;
|
||||
/*These routines define mapping from thread grid to loop & vector lane indexing */
|
||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
#ifdef GRID_SIMT
|
||||
@ -363,16 +371,15 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
|
||||
if(hip_threads.x * hip_threads.y * hip_threads.z <= 64){ \
|
||||
hipLaunchKernelGGL(LambdaApply64,hip_blocks,hip_threads, \
|
||||
0,0, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
0,computeStream, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
} else { \
|
||||
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
|
||||
0,0, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
0,computeStream, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
template<typename lambda> __global__
|
||||
__launch_bounds__(64,1)
|
||||
void LambdaApply64(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||
@ -401,7 +408,7 @@ void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||
|
||||
#define accelerator_barrier(dummy) \
|
||||
{ \
|
||||
hipDeviceSynchronize(); \
|
||||
hipStreamSynchronize(computeStream); \
|
||||
auto err = hipGetLastError(); \
|
||||
if ( err != hipSuccess ) { \
|
||||
printf("After hipDeviceSynchronize() : HIP error %s \n", hipGetErrorString( err )); \
|
||||
@ -444,7 +451,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
{
|
||||
hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToDevice,copyStream);
|
||||
hipMemcpyDtoDAsync(to,from,bytes, copyStream);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
||||
|
||||
@ -453,8 +460,9 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
||||
//////////////////////////////////////////////
|
||||
// Common on all GPU targets
|
||||
//////////////////////////////////////////////
|
||||
#if defined(GRID_SYCL) || defined(GRID_CUDA) || defined(GRID_HIP)
|
||||
#define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );
|
||||
#if defined(GRID_SYCL) || defined(GRID_CUDA) || defined(GRID_HIP)
|
||||
// FIXME -- the non-blocking nature got broken March 30 2023 by PAB
|
||||
#define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );
|
||||
|
||||
#define accelerator_for( iter, num, nsimd, ... ) \
|
||||
accelerator_forNB(iter, num, nsimd, { __VA_ARGS__ } ); \
|
||||
@ -464,6 +472,8 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
||||
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
||||
accelerator_barrier(dummy);
|
||||
|
||||
#define GRID_ACCELERATED
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@ -649,7 +659,7 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
||||
//////////////////////////////////////////////
|
||||
|
||||
#ifdef GRID_SYCL
|
||||
inline void acceleratorFenceComputeStream(void){ accelerator_barrier();};
|
||||
inline void acceleratorFenceComputeStream(void){ theGridAccelerator->ext_oneapi_submit_barrier(); };
|
||||
#else
|
||||
// Ordering within a stream guaranteed on Nvidia & AMD
|
||||
inline void acceleratorFenceComputeStream(void){ };
|
||||
|
Reference in New Issue
Block a user