mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
MPIT works with split grid now
This commit is contained in:
parent
5625b47c7d
commit
d54807b8c0
@ -96,6 +96,105 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
|||||||
GlobalSumVector((double *)c,2*N);
|
GlobalSumVector((double *)c,2*N);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
||||||
|
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
|
{
|
||||||
|
_ndimension = processors.size();
|
||||||
|
assert(_ndimension = parent._ndimension);
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// split the communicator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
int Nparent;
|
||||||
|
MPI_Comm_size(parent.communicator,&Nparent);
|
||||||
|
|
||||||
|
int childsize=1;
|
||||||
|
for(int d=0;d<processors.size();d++) {
|
||||||
|
childsize *= processors[d];
|
||||||
|
}
|
||||||
|
int Nchild = Nparent/childsize;
|
||||||
|
assert (childsize * Nchild == Nparent);
|
||||||
|
|
||||||
|
int prank; MPI_Comm_rank(parent.communicator,&prank);
|
||||||
|
int crank = prank % childsize;
|
||||||
|
int ccomm = prank / childsize;
|
||||||
|
|
||||||
|
MPI_Comm comm_split;
|
||||||
|
if ( Nchild > 1 ) {
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
|
||||||
|
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
|
||||||
|
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
|
||||||
|
std::cout<<std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
|
||||||
|
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
|
||||||
|
std::cout<<std::endl;
|
||||||
|
|
||||||
|
int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
|
||||||
|
assert(ierr==0);
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Declare victory
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
|
||||||
|
<<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
|
||||||
|
} else {
|
||||||
|
comm_split=parent.communicator;
|
||||||
|
// std::cout << "Passed parental communicator to a new communicator" <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Set up from the new split communicator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
InitFromMPICommunicator(processors,comm_split);
|
||||||
|
}
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Take an MPI_Comm and self assemble
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
|
||||||
|
{
|
||||||
|
// if ( communicator_base != communicator_world ) {
|
||||||
|
// std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
|
||||||
|
// }
|
||||||
|
_ndimension = processors.size();
|
||||||
|
_processor_coor.resize(_ndimension);
|
||||||
|
|
||||||
|
/////////////////////////////////
|
||||||
|
// Count the requested nodes
|
||||||
|
/////////////////////////////////
|
||||||
|
_Nprocessors=1;
|
||||||
|
_processors = processors;
|
||||||
|
for(int i=0;i<_ndimension;i++){
|
||||||
|
_Nprocessors*=_processors[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<int> periodic(_ndimension,1);
|
||||||
|
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||||
|
MPI_Comm_rank(communicator,&_processor);
|
||||||
|
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||||
|
|
||||||
|
int Size;
|
||||||
|
MPI_Comm_size(communicator,&Size);
|
||||||
|
|
||||||
|
#ifdef GRID_COMMS_MPIT
|
||||||
|
communicator_halo.resize (2*_ndimension);
|
||||||
|
for(int i=0;i<_ndimension*2;i++){
|
||||||
|
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
assert(Size==_Nprocessors);
|
||||||
|
}
|
||||||
|
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
|
{
|
||||||
|
InitFromMPICommunicator(processors,communicator_world);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#if !defined( GRID_COMMS_MPI3)
|
#if !defined( GRID_COMMS_MPI3)
|
||||||
|
|
||||||
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
||||||
|
@ -157,8 +157,7 @@ class CartesianCommunicator {
|
|||||||
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
CartesianCommunicator(const std::vector<int> &pdimensions_in);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#if defined (GRID_COMMS_MPI)
|
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
||||||
//|| defined (GRID_COMMS_MPI3)
|
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
// Private initialise from an MPI communicator
|
// Private initialise from an MPI communicator
|
||||||
// Can use after an MPI_Comm_split, but hidden from user so private
|
// Can use after an MPI_Comm_split, but hidden from user so private
|
||||||
|
@ -52,95 +52,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|
||||||
{
|
|
||||||
InitFromMPICommunicator(processors,communicator_world);
|
|
||||||
// std::cout << "Passed communicator world to a new communicator" <<communicator<<std::endl;
|
|
||||||
}
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
|
||||||
{
|
|
||||||
_ndimension = processors.size();
|
|
||||||
assert(_ndimension = parent._ndimension);
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// split the communicator
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
int Nparent;
|
|
||||||
MPI_Comm_size(parent.communicator,&Nparent);
|
|
||||||
|
|
||||||
int childsize=1;
|
|
||||||
for(int d=0;d<processors.size();d++) {
|
|
||||||
childsize *= processors[d];
|
|
||||||
}
|
|
||||||
int Nchild = Nparent/childsize;
|
|
||||||
assert (childsize * Nchild == Nparent);
|
|
||||||
|
|
||||||
int prank; MPI_Comm_rank(parent.communicator,&prank);
|
|
||||||
int crank = prank % childsize;
|
|
||||||
int ccomm = prank / childsize;
|
|
||||||
|
|
||||||
MPI_Comm comm_split;
|
|
||||||
if ( Nchild > 1 ) {
|
|
||||||
|
|
||||||
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
|
|
||||||
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
|
|
||||||
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
|
|
||||||
std::cout<<std::endl;
|
|
||||||
|
|
||||||
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
|
|
||||||
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
|
|
||||||
std::cout<<std::endl;
|
|
||||||
|
|
||||||
int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
|
|
||||||
assert(ierr==0);
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Declare victory
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
|
|
||||||
<<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
|
|
||||||
} else {
|
|
||||||
comm_split=parent.communicator;
|
|
||||||
// std::cout << "Passed parental communicator to a new communicator" <<std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Set up from the new split communicator
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
InitFromMPICommunicator(processors,comm_split);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Take an MPI_Comm and self assemble
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
|
|
||||||
{
|
|
||||||
// if ( communicator_base != communicator_world ) {
|
|
||||||
// std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
|
|
||||||
// }
|
|
||||||
_ndimension = processors.size();
|
|
||||||
_processor_coor.resize(_ndimension);
|
|
||||||
|
|
||||||
/////////////////////////////////
|
|
||||||
// Count the requested nodes
|
|
||||||
/////////////////////////////////
|
|
||||||
_Nprocessors=1;
|
|
||||||
_processors = processors;
|
|
||||||
for(int i=0;i<_ndimension;i++){
|
|
||||||
_Nprocessors*=_processors[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<int> periodic(_ndimension,1);
|
|
||||||
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
|
||||||
MPI_Comm_rank(communicator,&_processor);
|
|
||||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
|
||||||
|
|
||||||
int Size;
|
|
||||||
MPI_Comm_size(communicator,&Size);
|
|
||||||
|
|
||||||
assert(Size==_Nprocessors);
|
|
||||||
}
|
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
@ -53,33 +53,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|
||||||
{
|
|
||||||
_ndimension = processors.size();
|
|
||||||
std::vector<int> periodic(_ndimension,1);
|
|
||||||
|
|
||||||
_Nprocessors=1;
|
|
||||||
_processors = processors;
|
|
||||||
_processor_coor.resize(_ndimension);
|
|
||||||
|
|
||||||
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
|
||||||
MPI_Comm_rank(communicator,&_processor);
|
|
||||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
|
||||||
|
|
||||||
for(int i=0;i<_ndimension;i++){
|
|
||||||
_Nprocessors*=_processors[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
communicator_halo.resize (2*_ndimension);
|
|
||||||
for(int i=0;i<_ndimension*2;i++){
|
|
||||||
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
int Size;
|
|
||||||
MPI_Comm_size(communicator,&Size);
|
|
||||||
|
|
||||||
assert(Size==_Nprocessors);
|
|
||||||
}
|
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
@ -77,7 +77,6 @@ void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Impl> void CayleyFermion5D<Impl>::CayleyReport(void)
|
template<class Impl> void CayleyFermion5D<Impl>::CayleyReport(void)
|
||||||
{
|
{
|
||||||
this->Report();
|
this->Report();
|
||||||
@ -119,7 +118,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
|
|||||||
MooeeInvTime=0;
|
MooeeInvTime=0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user