1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Splitting communicators first cut

This commit is contained in:
paboyle
2017-06-22 08:14:34 +01:00
parent 1d7aa673a4
commit 6ebf9f15b7
34 changed files with 215 additions and 65 deletions

View File

@ -60,7 +60,7 @@ void CartesianCommunicator::ShmBufferFreeAll(void) {
/////////////////////////////////
// Grid information queries
/////////////////////////////////
int CartesianCommunicator::Dimensions(void) { return _ndimension; };
int CartesianCommunicator::Dimensions(void) { return _ndimension; };
int CartesianCommunicator::IsBoss(void) { return _processor==0; };
int CartesianCommunicator::BossRank(void) { return 0; };
int CartesianCommunicator::ThisRank(void) { return _processor; };

View File

@ -68,6 +68,7 @@ class CartesianCommunicator {
static MPI_Comm communicator_world;
MPI_Comm communicator;
typedef MPI_Request CommsRequest_t;
#else
typedef int CommsRequest_t;
#endif
@ -135,11 +136,24 @@ class CartesianCommunicator {
// Must call in Grid startup
////////////////////////////////////////////////
static void Init(int *argc, char ***argv);
////////////////////////////////////////////////
// Constructor of any given grid
// Constructors to sub-divide a parent communicator
// and default to comm world
////////////////////////////////////////////////
CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent);
CartesianCommunicator(const std::vector<int> &pdimensions_in);
private:
#if defined (GRID_COMMS_MPI)
//|| defined (GRID_COMMS_MPI3)
////////////////////////////////////////////////
// Private initialise from an MPI communicator
// Can use after an MPI_Comm_split, but hidden from user so private
////////////////////////////////////////////////
void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base);
#endif
public:
////////////////////////////////////////////////////////////////////////////////////////
// Wraps MPI_Cart routines, or implements equivalent on other impls

View File

@ -53,24 +53,80 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
ShmInitGeneric();
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
InitFromMPICommunicator(processors,communicator_world);
std::cout << "Passed communicator world to a new communicator" <<std::endl;
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
{
_ndimension = processors.size();
std::vector<int> periodic(_ndimension,1);
assert(_ndimension = parent._ndimension);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// split the communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::vector<int> ratio(_ndimension);
std::vector<int> rcoor(_ndimension);
std::vector<int> scoor(_ndimension);
int Nsubcomm=1;
int Nsubrank=1;
for(int d=0;d<_ndimension;d++) {
ratio[d] = parent._processors[d] / processors[d];
rcoor[d] = parent._processor_coor[d] / processors[d];
scoor[d] = parent._processor_coor[d] % processors[d];
assert(ratio[d] * processors[d] == parent._processors[d]); // must exactly subdivide
Nsubcomm *= ratio[d];
Nsubrank *= processors[d];
}
int rlex, slex;
Lexicographic::IndexFromCoor(rcoor,rlex,ratio);
Lexicographic::IndexFromCoor(scoor,slex,processors);
MPI_Comm comm_split;
int ierr= MPI_Comm_split(communicator_world, rlex, slex,&comm_split);
assert(ierr==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Set up from the new split communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
InitFromMPICommunicator(processors,comm_split);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Declare victory
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << "Divided communicator "<< parent._Nprocessors<<" into "
<<Nsubcomm <<" communicators with " << Nsubrank << " ranks"<<std::endl;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Take an MPI_Comm and self assemble
//////////////////////////////////////////////////////////////////////////////////////////////////////
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
{
if ( communicator_base != communicator_world ) {
std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
}
_ndimension = processors.size();
_processor_coor.resize(_ndimension);
/////////////////////////////////
// Count the requested nodes
/////////////////////////////////
_Nprocessors=1;
_processors = processors;
_processor_coor.resize(_ndimension);
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
for(int i=0;i<_ndimension;i++){
_Nprocessors*=_processors[i];
}
int Size;
std::vector<int> periodic(_ndimension,1);
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
int Size;
MPI_Comm_size(communicator,&Size);
assert(Size==_Nprocessors);

View File

@ -371,6 +371,15 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &c
assert(lr!=-1);
Lexicographic::CoorFromIndex(coor,lr,_processors);
}
//////////////////////////////////
// Try to subdivide communicator
//////////////////////////////////
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,CartesianCommunicator &parent)
: CartesianCommunicator(processors)
{
std::cout << "Attempts to split MPI3 communicators will fail until implemented" <<std::endl;
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
int ierr;

View File

@ -38,6 +38,9 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
ShmInitGeneric();
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,CartesianCommunicator &parent)
: CartesianCommunicator(processors) {}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
_processors = processors;

View File

@ -75,6 +75,11 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
ShmInitGeneric();
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,CartesianCommunicator &parent)
: CartesianCommunicator(processors)
{
std::cout << "Attempts to split SHMEM communicators will fail " <<std::endl;
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
_ndimension = processors.size();