1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-17 15:27:06 +01:00

MPI3 working with a bounce through shared memory on my laptop.

Longer term plan: make the "u_comm_buf" in Stencil point to the shared region and avoid the
send between ranks on same node.
This commit is contained in:
paboyle
2016-10-21 09:03:26 +01:00
parent 5b5925b8e5
commit a762b1fb71
8 changed files with 208 additions and 116 deletions

View File

@ -45,7 +45,7 @@ class CartesianCommunicator {
public:
// Communicator should know nothing of the physics grid, only processor grid.
int _Nprocessors; // How many in all
std::vector<int> _processors; // Which dimensions get relayed out over processors lanes.
int _processor; // linear processor rank
@ -56,10 +56,13 @@ class CartesianCommunicator {
MPI_Comm communicator;
typedef MPI_Request CommsRequest_t;
#elif GRID_COMMS_MPI3
int shm_mode;
MPI_Comm communicator;
typedef MPI_Request CommsRequest_t;
const int MAXLOG2RANKSPERNODE = 16; // 65536 ranks per node adequate for now
const int MAXLOG2RANKSPERNODE = 16; // 65536 ranks per node adequate for now
const uint64_t MAX_MPI_SHM_BYTES = 256*1024*1024; // 256MB shared memory for comms enought for 48^4 local vol comms
std::vector<int> WorldDims;
std::vector<int> GroupDims;
@ -69,14 +72,23 @@ class CartesianCommunicator {
std::vector<int> ShmCoor;
std::vector<int> WorldCoor;
int GroupRank;
int ShmRank;
int WorldRank;
static std::vector<int> GroupRanks;
static std::vector<int> MyGroup;
static int ShmSetup;
static MPI_Win ShmWindow;
static MPI_Comm ShmComm;
int GroupSize;
int ShmSize;
void * ShmCommBuf;
std::vector<void *> ShmCommBufs;
int WorldRank;
int WorldSize;
static int ShmRank;
static int ShmSize;
static int GroupSize;
static int GroupRank;
std::vector<int> LexicographicToWorldRank;
#else
typedef int CommsRequest_t;