mirror of
https://github.com/paboyle/Grid.git
synced 2025-08-15 18:51:53 +01:00
merge upstream develop
This commit is contained in:
@@ -114,6 +114,7 @@ public:
|
||||
void GlobalSumVector(RealD *,int N);
|
||||
void GlobalSum(uint32_t &);
|
||||
void GlobalSum(uint64_t &);
|
||||
void GlobalSumVector(uint64_t*,int N);
|
||||
void GlobalSum(ComplexF &c);
|
||||
void GlobalSumVector(ComplexF *c,int N);
|
||||
void GlobalSum(ComplexD &c);
|
||||
|
@@ -275,6 +275,10 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(uint64_t* u,int N){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,u,N,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
|
@@ -70,9 +70,10 @@ CartesianCommunicator::~CartesianCommunicator(){}
|
||||
void CartesianCommunicator::GlobalSum(float &){}
|
||||
void CartesianCommunicator::GlobalSumVector(float *,int N){}
|
||||
void CartesianCommunicator::GlobalSum(double &){}
|
||||
void CartesianCommunicator::GlobalSumVector(double *,int N){}
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &){}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &){}
|
||||
void CartesianCommunicator::GlobalSumVector(double *,int N){}
|
||||
void CartesianCommunicator::GlobalSumVector(uint64_t *,int N){}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &){}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &){}
|
||||
|
||||
|
@@ -74,7 +74,9 @@ void *SharedMemory::ShmBufferMalloc(size_t bytes){
|
||||
if (heap_bytes >= heap_size) {
|
||||
std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm <MB> flag" <<std::endl;
|
||||
std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl;
|
||||
std::cout<< " Current value is " << (heap_size/(1024*1024)) <<std::endl;
|
||||
std::cout<< " Current alloc is " << (bytes/(1024*1024)) <<"MB"<<std::endl;
|
||||
std::cout<< " Current bytes is " << (heap_bytes/(1024*1024)) <<"MB"<<std::endl;
|
||||
std::cout<< " Current heap is " << (heap_size/(1024*1024)) <<"MB"<<std::endl;
|
||||
assert(heap_bytes<heap_size);
|
||||
}
|
||||
//std::cerr << "ShmBufferMalloc "<<std::hex<< ptr<<" - "<<((uint64_t)ptr+bytes)<<std::dec<<std::endl;
|
||||
|
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/GridCore.h>
|
||||
#include <pwd.h>
|
||||
|
||||
#ifdef GRID_NVCC
|
||||
#ifdef GRID_CUDA
|
||||
#include <cuda_runtime_api.h>
|
||||
#endif
|
||||
|
||||
@@ -170,17 +170,24 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
||||
std::vector<int> primes({2,3,5});
|
||||
|
||||
int dim = 0;
|
||||
int last_dim = ndimension - 1;
|
||||
int AutoShmSize = 1;
|
||||
while(AutoShmSize != WorldShmSize) {
|
||||
for(int p=0;p<primes.size();p++) {
|
||||
int p;
|
||||
for(p=0;p<primes.size();p++) {
|
||||
int prime=primes[p];
|
||||
if ( divides(prime,WorldDims[dim]/ShmDims[dim])
|
||||
&& divides(prime,WorldShmSize/AutoShmSize) ) {
|
||||
AutoShmSize*=prime;
|
||||
ShmDims[dim]*=prime;
|
||||
last_dim = dim;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (p == primes.size() && last_dim == dim) {
|
||||
std::cerr << "GlobalSharedMemory::GetShmDims failed" << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
dim=(dim+1) %ndimension;
|
||||
}
|
||||
}
|
||||
@@ -413,7 +420,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Hugetlbfs mapping intended
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_NVCC
|
||||
#ifdef GRID_CUDA
|
||||
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
{
|
||||
void * ShmCommBuf ;
|
||||
@@ -433,13 +440,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// cudaDeviceGetP2PAttribute(&perfRank, cudaDevP2PAttrPerformanceRank, device1, device2);
|
||||
|
||||
#ifdef GRID_IBM_SUMMIT
|
||||
// IBM Jsrun makes cuda Device numbering screwy and not match rank
|
||||
std::cout << "IBM Summit or similar - NOT setting device to WorldShmRank"<<std::endl;
|
||||
#else
|
||||
std::cout << "setting device to WorldShmRank"<<std::endl;
|
||||
cudaSetDevice(WorldShmRank);
|
||||
#endif
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Each MPI rank should allocate our own buffer
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -677,7 +677,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||
{
|
||||
#ifdef GRID_NVCC
|
||||
#ifdef GRID_CUDA
|
||||
cudaMemset(dest,0,bytes);
|
||||
#else
|
||||
bzero(dest,bytes);
|
||||
@@ -685,7 +685,7 @@ void GlobalSharedMemory::SharedMemoryZero(void *dest,size_t bytes)
|
||||
}
|
||||
void GlobalSharedMemory::SharedMemoryCopy(void *dest,const void *src,size_t bytes)
|
||||
{
|
||||
#ifdef GRID_NVCC
|
||||
#ifdef GRID_CUDA
|
||||
cudaMemcpy(dest,src,bytes,cudaMemcpyDefault);
|
||||
#else
|
||||
bcopy(src,dest,bytes);
|
||||
|
Reference in New Issue
Block a user