2016-11-01 11:35:43 +00:00
|
|
|
|
2016-01-02 14:51:32 +00:00
|
|
|
/*************************************************************************************
|
|
|
|
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
|
|
|
|
Source file: ./lib/communicator/Communicator_base.h
|
|
|
|
|
|
|
|
Copyright (C) 2015
|
|
|
|
|
|
|
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution directory
|
|
|
|
*************************************************************************************/
|
|
|
|
/* END LEGAL */
|
2015-05-11 19:09:49 +01:00
|
|
|
#ifndef GRID_COMMUNICATOR_BASE_H
|
|
|
|
#define GRID_COMMUNICATOR_BASE_H
|
|
|
|
|
|
|
|
///////////////////////////////////
|
|
|
|
// Processor layout information
|
|
|
|
///////////////////////////////////
|
|
|
|
#ifdef GRID_COMMS_MPI
|
|
|
|
#include <mpi.h>
|
|
|
|
#endif
|
2016-10-20 16:58:01 +01:00
|
|
|
#ifdef GRID_COMMS_MPI3
|
|
|
|
#include <mpi.h>
|
|
|
|
#endif
|
2017-06-24 23:10:24 +01:00
|
|
|
#ifdef GRID_COMMS_MPIT
|
2016-11-01 11:35:43 +00:00
|
|
|
#include <mpi.h>
|
|
|
|
#endif
|
2016-02-11 00:50:32 +00:00
|
|
|
#ifdef GRID_COMMS_SHMEM
|
|
|
|
#include <mpp/shmem.h>
|
|
|
|
#endif
|
2016-10-24 17:30:43 +01:00
|
|
|
|
2015-05-11 19:09:49 +01:00
|
|
|
namespace Grid {
|
2016-10-24 17:30:43 +01:00
|
|
|
|
2015-05-11 19:09:49 +01:00
|
|
|
class CartesianCommunicator {
|
|
|
|
public:
|
|
|
|
|
2017-08-20 01:10:50 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////////
|
|
|
|
// Isend/Irecv/Wait, or Sendrecv blocking
|
|
|
|
////////////////////////////////////////////
|
|
|
|
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
|
|
|
|
static CommunicatorPolicy_t CommunicatorPolicy;
|
|
|
|
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
|
|
|
|
|
|
|
|
///////////////////////////////////////////
|
|
|
|
// Up to 65536 ranks per node adequate for now
|
2016-10-24 17:30:43 +01:00
|
|
|
// 128MB shared memory for comms enought for 48^4 local vol comms
|
|
|
|
// Give external control (command line override?) of this
|
2017-08-20 01:10:50 +01:00
|
|
|
///////////////////////////////////////////
|
2017-07-29 18:06:53 +01:00
|
|
|
static const int MAXLOG2RANKSPERNODE = 16;
|
|
|
|
static uint64_t MAX_MPI_SHM_BYTES;
|
|
|
|
static int nCommThreads;
|
2017-08-20 01:10:50 +01:00
|
|
|
// use explicit huge pages
|
|
|
|
static int Hugepages;
|
2016-10-20 16:58:01 +01:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
// Communicator should know nothing of the physics grid, only processor grid.
|
|
|
|
int _Nprocessors; // How many in all
|
|
|
|
std::vector<int> _processors; // Which dimensions get relayed out over processors lanes.
|
|
|
|
int _processor; // linear processor rank
|
|
|
|
std::vector<int> _processor_coor; // linear processor coordinate
|
|
|
|
unsigned long _ndimension;
|
|
|
|
|
2017-06-24 23:10:24 +01:00
|
|
|
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT)
|
2016-10-24 17:30:43 +01:00
|
|
|
static MPI_Comm communicator_world;
|
2017-08-20 02:39:10 +01:00
|
|
|
|
|
|
|
MPI_Comm communicator;
|
|
|
|
std::vector<MPI_Comm> communicator_halo;
|
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
typedef MPI_Request CommsRequest_t;
|
2017-06-22 08:14:34 +01:00
|
|
|
|
2015-05-11 19:09:49 +01:00
|
|
|
#else
|
2016-10-24 17:30:43 +01:00
|
|
|
typedef int CommsRequest_t;
|
2015-05-11 19:09:49 +01:00
|
|
|
#endif
|
|
|
|
|
2017-06-24 23:10:24 +01:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
// Helper functionality for SHM Windows common to all other impls
|
|
|
|
////////////////////////////////////////////////////////////////////
|
|
|
|
// Longer term; drop this in favour of a master / slave model with
|
|
|
|
// cartesian communicator on a subset of ranks, slave ranks controlled
|
|
|
|
// by group leader with data xfer via shared memory
|
|
|
|
////////////////////////////////////////////////////////////////////
|
2016-11-01 11:35:43 +00:00
|
|
|
#ifdef GRID_COMMS_MPI3
|
|
|
|
|
|
|
|
static int ShmRank;
|
|
|
|
static int ShmSize;
|
|
|
|
static int GroupRank;
|
|
|
|
static int GroupSize;
|
|
|
|
static int WorldRank;
|
|
|
|
static int WorldSize;
|
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
std::vector<int> WorldDims;
|
|
|
|
std::vector<int> GroupDims;
|
|
|
|
std::vector<int> ShmDims;
|
|
|
|
|
|
|
|
std::vector<int> GroupCoor;
|
|
|
|
std::vector<int> ShmCoor;
|
|
|
|
std::vector<int> WorldCoor;
|
2016-11-01 11:35:43 +00:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
static std::vector<int> GroupRanks;
|
|
|
|
static std::vector<int> MyGroup;
|
|
|
|
static int ShmSetup;
|
|
|
|
static MPI_Win ShmWindow;
|
|
|
|
static MPI_Comm ShmComm;
|
|
|
|
|
|
|
|
std::vector<int> LexicographicToWorldRank;
|
|
|
|
|
|
|
|
static std::vector<void *> ShmCommBufs;
|
2016-11-01 11:35:43 +00:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
#else
|
|
|
|
static void ShmInitGeneric(void);
|
|
|
|
static commVector<uint8_t> ShmBufStorageVector;
|
|
|
|
#endif
|
2016-11-01 11:35:43 +00:00
|
|
|
|
|
|
|
/////////////////////////////////
|
|
|
|
// Grid information and queries
|
|
|
|
// Implemented in Communicator_base.C
|
|
|
|
/////////////////////////////////
|
2016-10-24 17:30:43 +01:00
|
|
|
static void * ShmCommBuf;
|
2017-02-07 06:22:53 +00:00
|
|
|
|
2017-07-29 18:06:53 +01:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
size_t heap_top;
|
|
|
|
size_t heap_bytes;
|
2016-11-01 11:35:43 +00:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
void *ShmBufferSelf(void);
|
|
|
|
void *ShmBuffer(int rank);
|
|
|
|
void *ShmBufferTranslate(int rank,void * local_p);
|
|
|
|
void *ShmBufferMalloc(size_t bytes);
|
2016-10-25 01:45:53 +01:00
|
|
|
void ShmBufferFreeAll(void) ;
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////////////
|
|
|
|
// Must call in Grid startup
|
|
|
|
////////////////////////////////////////////////
|
|
|
|
static void Init(int *argc, char ***argv);
|
2017-06-22 08:14:34 +01:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
////////////////////////////////////////////////
|
2017-06-22 08:14:34 +01:00
|
|
|
// Constructors to sub-divide a parent communicator
|
|
|
|
// and default to comm world
|
2016-10-24 17:30:43 +01:00
|
|
|
////////////////////////////////////////////////
|
2017-10-30 00:16:12 +00:00
|
|
|
CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank);
|
2016-10-24 17:30:43 +01:00
|
|
|
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
2017-10-25 10:24:14 +01:00
|
|
|
virtual ~CartesianCommunicator();
|
2017-06-22 08:14:34 +01:00
|
|
|
|
|
|
|
private:
|
2017-10-02 23:14:56 +01:00
|
|
|
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
2017-06-22 08:14:34 +01:00
|
|
|
////////////////////////////////////////////////
|
|
|
|
// Private initialise from an MPI communicator
|
|
|
|
// Can use after an MPI_Comm_split, but hidden from user so private
|
|
|
|
////////////////////////////////////////////////
|
|
|
|
void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base);
|
|
|
|
#endif
|
|
|
|
public:
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Wraps MPI_Cart routines, or implements equivalent on other impls
|
|
|
|
////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
void ShiftedRanks(int dim,int shift,int & source, int & dest);
|
|
|
|
int RankFromProcessorCoor(std::vector<int> &coor);
|
|
|
|
void ProcessorCoorFromRank(int rank,std::vector<int> &coor);
|
|
|
|
|
2017-05-30 23:39:16 +01:00
|
|
|
int Dimensions(void) ;
|
2016-10-24 17:30:43 +01:00
|
|
|
int IsBoss(void) ;
|
|
|
|
int BossRank(void) ;
|
|
|
|
int ThisRank(void) ;
|
|
|
|
const std::vector<int> & ThisProcessorCoor(void) ;
|
|
|
|
const std::vector<int> & ProcessorGrid(void) ;
|
|
|
|
int ProcessorCount(void) ;
|
2017-02-07 06:22:53 +00:00
|
|
|
int NodeCount(void) ;
|
2017-05-30 23:39:16 +01:00
|
|
|
int RankCount(void) ;
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// very VERY rarely (Log, serial RNG) we need world without a grid
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
static int RankWorld(void) ;
|
|
|
|
static void BroadcastWorld(int root,void* data, int bytes);
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
// Reduction
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
void GlobalSum(RealF &);
|
|
|
|
void GlobalSumVector(RealF *,int N);
|
|
|
|
void GlobalSum(RealD &);
|
|
|
|
void GlobalSumVector(RealD *,int N);
|
|
|
|
void GlobalSum(uint32_t &);
|
|
|
|
void GlobalSum(uint64_t &);
|
|
|
|
void GlobalSum(ComplexF &c);
|
|
|
|
void GlobalSumVector(ComplexF *c,int N);
|
|
|
|
void GlobalSum(ComplexD &c);
|
|
|
|
void GlobalSumVector(ComplexD *c,int N);
|
2017-06-11 23:14:10 +01:00
|
|
|
void GlobalXOR(uint32_t &);
|
|
|
|
void GlobalXOR(uint64_t &);
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
template<class obj> void GlobalSum(obj &o){
|
|
|
|
typedef typename obj::scalar_type scalar_type;
|
|
|
|
int words = sizeof(obj)/sizeof(scalar_type);
|
|
|
|
scalar_type * ptr = (scalar_type *)& o;
|
|
|
|
GlobalSumVector(ptr,words);
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
// Face exchange, buffer swap in translational invariant way
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
void SendToRecvFrom(void *xmit,
|
|
|
|
int xmit_to_rank,
|
|
|
|
void *recv,
|
|
|
|
int recv_from_rank,
|
|
|
|
int bytes);
|
|
|
|
|
|
|
|
void SendRecvPacket(void *xmit,
|
|
|
|
void *recv,
|
|
|
|
int xmit_to_rank,
|
|
|
|
int recv_from_rank,
|
|
|
|
int bytes);
|
|
|
|
|
|
|
|
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
|
|
|
void *xmit,
|
|
|
|
int xmit_to_rank,
|
|
|
|
void *recv,
|
|
|
|
int recv_from_rank,
|
|
|
|
int bytes);
|
|
|
|
|
|
|
|
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
|
|
|
|
2017-07-29 18:06:53 +01:00
|
|
|
double StencilSendToRecvFrom(void *xmit,
|
|
|
|
int xmit_to_rank,
|
|
|
|
void *recv,
|
|
|
|
int recv_from_rank,
|
|
|
|
int bytes,int dir);
|
|
|
|
|
2017-02-20 22:47:40 +00:00
|
|
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
2017-06-24 23:10:24 +01:00
|
|
|
void *xmit,
|
|
|
|
int xmit_to_rank,
|
|
|
|
void *recv,
|
|
|
|
int recv_from_rank,
|
|
|
|
int bytes,int dir);
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
|
2017-06-24 23:10:24 +01:00
|
|
|
void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int i);
|
2016-10-24 17:30:43 +01:00
|
|
|
void StencilBarrier(void);
|
2015-05-11 19:09:49 +01:00
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
// Barrier
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
void Barrier(void);
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
// Broadcast a buffer and composite larger
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
void Broadcast(int root,void* data, int bytes);
|
2017-10-09 23:16:51 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
// All2All down one dimension
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
template<class T> void AllToAll(int dim,std::vector<T> &in, std::vector<T> &out){
|
|
|
|
assert(dim>=0);
|
|
|
|
assert(dim<_ndimension);
|
|
|
|
int numnode = _processors[dim];
|
|
|
|
// std::cerr << " AllToAll in.size() "<<in.size()<<std::endl;
|
|
|
|
// std::cerr << " AllToAll out.size() "<<out.size()<<std::endl;
|
|
|
|
assert(in.size()==out.size());
|
2017-10-27 14:20:35 +01:00
|
|
|
uint64_t bytes=sizeof(T);
|
|
|
|
uint64_t words=in.size()/numnode;
|
|
|
|
|
|
|
|
assert(numnode * words == in.size());
|
|
|
|
assert(words < (1ULL<<32));
|
|
|
|
|
|
|
|
AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes);
|
2017-10-09 23:16:51 +01:00
|
|
|
}
|
2017-10-27 14:20:35 +01:00
|
|
|
void AllToAll(int dim ,void *in,void *out,uint64_t words,uint64_t bytes);
|
|
|
|
void AllToAll(void *in,void *out,uint64_t words ,uint64_t bytes);
|
2016-10-24 17:30:43 +01:00
|
|
|
|
|
|
|
template<class obj> void Broadcast(int root,obj &data)
|
2015-05-11 19:09:49 +01:00
|
|
|
{
|
|
|
|
Broadcast(root,(void *)&data,sizeof(data));
|
|
|
|
};
|
|
|
|
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|