mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-09 21:50:45 +01:00
Merge remote-tracking branch 'upstream/develop' into feature/new-solver-algorithms
This commit is contained in:
commit
0afa22747d
@ -317,11 +317,23 @@ namespace Grid {
|
|||||||
}
|
}
|
||||||
virtual RealD Mpc (const Field &in, Field &out) {
|
virtual RealD Mpc (const Field &in, Field &out) {
|
||||||
Field tmp(in._grid);
|
Field tmp(in._grid);
|
||||||
|
Field tmp2(in._grid);
|
||||||
|
|
||||||
|
_Mat.Mooee(in,out);
|
||||||
|
_Mat.Mooee(out,tmp);
|
||||||
|
|
||||||
|
_Mat.Meooe(in,out);
|
||||||
|
_Mat.Meooe(out,tmp2);
|
||||||
|
|
||||||
|
return axpy_norm(out,-1.0,tmp2,tmp);
|
||||||
|
#if 0
|
||||||
|
//... much prefer conventional Schur norm
|
||||||
_Mat.Meooe(in,tmp);
|
_Mat.Meooe(in,tmp);
|
||||||
_Mat.MooeeInv(tmp,out);
|
_Mat.MooeeInv(tmp,out);
|
||||||
_Mat.Meooe(out,tmp);
|
_Mat.Meooe(out,tmp);
|
||||||
_Mat.Mooee(in,out);
|
_Mat.Mooee(in,out);
|
||||||
return axpy_norm(out,-1.0,tmp,out);
|
return axpy_norm(out,-1.0,tmp,out);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
virtual RealD MpcDag (const Field &in, Field &out){
|
virtual RealD MpcDag (const Field &in, Field &out){
|
||||||
return Mpc(in,out);
|
return Mpc(in,out);
|
||||||
|
@ -168,8 +168,8 @@ void basisDeflate(const std::vector<Field> &_v,const std::vector<RealD>& eval,co
|
|||||||
template<class Field> class ImplicitlyRestartedLanczosTester
|
template<class Field> class ImplicitlyRestartedLanczosTester
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox);
|
virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
|
||||||
virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox);
|
virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum IRLdiagonalisation {
|
enum IRLdiagonalisation {
|
||||||
|
@ -90,7 +90,7 @@ namespace Grid {
|
|||||||
// Take a matrix and form a Red Black solver calling a Herm solver
|
// Take a matrix and form a Red Black solver calling a Herm solver
|
||||||
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
|
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Now make the norm reflect extra factor of Mee
|
||||||
template<class Field> class SchurRedBlackStaggeredSolve {
|
template<class Field> class SchurRedBlackStaggeredSolve {
|
||||||
private:
|
private:
|
||||||
OperatorFunction<Field> & _HermitianRBSolver;
|
OperatorFunction<Field> & _HermitianRBSolver;
|
||||||
@ -136,8 +136,8 @@ namespace Grid {
|
|||||||
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
|
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
|
||||||
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
|
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
|
||||||
|
|
||||||
src_o = tmp; assert(src_o.checkerboard ==Odd);
|
//src_o = tmp; assert(src_o.checkerboard ==Odd);
|
||||||
// _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source
|
_Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm.
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
// Call the red-black solver
|
// Call the red-black solver
|
||||||
|
@ -98,7 +98,39 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
|||||||
|
|
||||||
|
|
||||||
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3)
|
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3)
|
||||||
|
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
|
||||||
|
{
|
||||||
|
std::vector<int> row(_ndimension,1);
|
||||||
|
assert(dim>=0 && dim<_ndimension);
|
||||||
|
|
||||||
|
// Split the communicator
|
||||||
|
row[dim] = _processors[dim];
|
||||||
|
|
||||||
|
int me;
|
||||||
|
CartesianCommunicator Comm(row,*this,me);
|
||||||
|
Comm.AllToAll(in,out,words,bytes);
|
||||||
|
}
|
||||||
|
void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes)
|
||||||
|
{
|
||||||
|
// MPI is a pain and uses "int" arguments
|
||||||
|
// 64*64*64*128*16 == 500Million elements of data.
|
||||||
|
// When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
|
||||||
|
// (Turns up on 32^3 x 64 Gparity too)
|
||||||
|
MPI_Datatype object;
|
||||||
|
int iwords;
|
||||||
|
int ibytes;
|
||||||
|
iwords = words;
|
||||||
|
ibytes = bytes;
|
||||||
|
assert(words == iwords); // safe to cast to int ?
|
||||||
|
assert(bytes == ibytes); // safe to cast to int ?
|
||||||
|
MPI_Type_contiguous(ibytes,MPI_BYTE,&object);
|
||||||
|
MPI_Type_commit(&object);
|
||||||
|
MPI_Alltoall(in,iwords,object,out,iwords,object,communicator);
|
||||||
|
MPI_Type_free(&object);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)
|
||||||
{
|
{
|
||||||
_ndimension = processors.size();
|
_ndimension = processors.size();
|
||||||
@ -176,6 +208,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,
|
|||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
InitFromMPICommunicator(processors,comm_split);
|
InitFromMPICommunicator(processors,comm_split);
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Take an MPI_Comm and self assemble
|
// Take an MPI_Comm and self assemble
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -199,7 +232,7 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &proc
|
|||||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||||
|
|
||||||
if ( communicator_base != communicator_world ) {
|
if ( communicator_base != communicator_world ) {
|
||||||
std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
|
std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"<<std::endl;
|
||||||
|
|
||||||
std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] ";
|
std::cout << " new communicator rank "<<_processor<< " coor ["<<_ndimension<<"] ";
|
||||||
for(int d=0;d<_processors.size();d++){
|
for(int d=0;d<_processors.size();d++){
|
||||||
@ -211,7 +244,7 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &proc
|
|||||||
int Size;
|
int Size;
|
||||||
MPI_Comm_size(communicator,&Size);
|
MPI_Comm_size(communicator,&Size);
|
||||||
|
|
||||||
#ifdef GRID_COMMS_MPIT
|
#if defined(GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3)
|
||||||
communicator_halo.resize (2*_ndimension);
|
communicator_halo.resize (2*_ndimension);
|
||||||
for(int i=0;i<_ndimension*2;i++){
|
for(int i=0;i<_ndimension*2;i++){
|
||||||
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
||||||
@ -220,7 +253,9 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &proc
|
|||||||
|
|
||||||
assert(Size==_Nprocessors);
|
assert(Size==_Nprocessors);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
InitFromMPICommunicator(processors,communicator_world);
|
InitFromMPICommunicator(processors,communicator_world);
|
||||||
@ -229,10 +264,10 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined( GRID_COMMS_MPI3)
|
#if !defined( GRID_COMMS_MPI3)
|
||||||
|
|
||||||
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
||||||
int CartesianCommunicator::RankCount(void) { return ProcessorCount();};
|
int CartesianCommunicator::RankCount(void) { return ProcessorCount();};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT)
|
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT)
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
|
@ -158,7 +158,7 @@ class CartesianCommunicator {
|
|||||||
virtual ~CartesianCommunicator();
|
virtual ~CartesianCommunicator();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
|
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3)
|
||||||
////////////////////////////////////////////////
|
////////////////////////////////////////////////
|
||||||
// Private initialise from an MPI communicator
|
// Private initialise from an MPI communicator
|
||||||
// Can use after an MPI_Comm_split, but hidden from user so private
|
// Can use after an MPI_Comm_split, but hidden from user so private
|
||||||
|
@ -57,7 +57,7 @@ CartesianCommunicator::~CartesianCommunicator()
|
|||||||
{
|
{
|
||||||
int MPI_is_finalised;
|
int MPI_is_finalised;
|
||||||
MPI_Finalized(&MPI_is_finalised);
|
MPI_Finalized(&MPI_is_finalised);
|
||||||
if (communicator && MPI_is_finalised)
|
if (communicator && !MPI_is_finalised)
|
||||||
MPI_Comm_free(&communicator);
|
MPI_Comm_free(&communicator);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,36 +196,6 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
|||||||
root,
|
root,
|
||||||
communicator);
|
communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
|
||||||
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
|
|
||||||
{
|
|
||||||
std::vector<int> row(_ndimension,1);
|
|
||||||
assert(dim>=0 && dim<_ndimension);
|
|
||||||
|
|
||||||
// Split the communicator
|
|
||||||
row[dim] = _processors[dim];
|
|
||||||
|
|
||||||
int me;
|
|
||||||
CartesianCommunicator Comm(row,*this,me);
|
|
||||||
Comm.AllToAll(in,out,words,bytes);
|
|
||||||
}
|
|
||||||
void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes)
|
|
||||||
{
|
|
||||||
// MPI is a pain and uses "int" arguments
|
|
||||||
// 64*64*64*128*16 == 500Million elements of data.
|
|
||||||
// When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug.
|
|
||||||
// (Turns up on 32^3 x 64 Gparity too)
|
|
||||||
MPI_Datatype object;
|
|
||||||
int iwords;
|
|
||||||
int ibytes;
|
|
||||||
iwords = words;
|
|
||||||
ibytes = bytes;
|
|
||||||
assert(words == iwords); // safe to cast to int ?
|
|
||||||
assert(bytes == ibytes); // safe to cast to int ?
|
|
||||||
MPI_Type_contiguous(ibytes,MPI_BYTE,&object);
|
|
||||||
MPI_Type_commit(&object);
|
|
||||||
MPI_Alltoall(in,iwords,object,out,iwords,object,communicator);
|
|
||||||
MPI_Type_free(&object);
|
|
||||||
}
|
}
|
||||||
///////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////
|
||||||
// Should only be used prior to Grid Init finished.
|
// Should only be used prior to Grid Init finished.
|
||||||
|
@ -454,11 +454,15 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &c
|
|||||||
//////////////////////////////////
|
//////////////////////////////////
|
||||||
// Try to subdivide communicator
|
// Try to subdivide communicator
|
||||||
//////////////////////////////////
|
//////////////////////////////////
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
/*
|
||||||
|
* Use default in MPI compile
|
||||||
|
*/
|
||||||
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent,int &srank)
|
||||||
: CartesianCommunicator(processors)
|
: CartesianCommunicator(processors)
|
||||||
{
|
{
|
||||||
std::cout << "Attempts to split MPI3 communicators will fail until implemented" <<std::endl;
|
std::cout << "Attempts to split MPI3 communicators will fail until implemented" <<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
int ierr;
|
int ierr;
|
||||||
@ -596,6 +600,17 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
CartesianCommunicator::~CartesianCommunicator()
|
||||||
|
{
|
||||||
|
int MPI_is_finalised;
|
||||||
|
MPI_Finalized(&MPI_is_finalised);
|
||||||
|
if (communicator && !MPI_is_finalised) {
|
||||||
|
MPI_Comm_free(&communicator);
|
||||||
|
for(int i=0;i< communicator_halo.size();i++){
|
||||||
|
MPI_Comm_free(&communicator_halo[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
@ -55,11 +55,16 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
|
|
||||||
CartesianCommunicator::~CartesianCommunicator()
|
CartesianCommunicator::~CartesianCommunicator()
|
||||||
{
|
{
|
||||||
if (communicator && !MPI::Is_finalized())
|
int MPI_is_finalised;
|
||||||
|
MPI_Finalized(&MPI_is_finalised);
|
||||||
|
if (communicator && !MPI_is_finalised){
|
||||||
MPI_Comm_free(&communicator);
|
MPI_Comm_free(&communicator);
|
||||||
|
for(int i=0;i< communicator_halo.size();i++){
|
||||||
|
MPI_Comm_free(&communicator_halo[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
@ -241,7 +246,7 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsReque
|
|||||||
{
|
{
|
||||||
int nreq=waitall.size();
|
int nreq=waitall.size();
|
||||||
MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE);
|
MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE);
|
||||||
};
|
}
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom(void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom(void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
void *recv,
|
void *recv,
|
||||||
|
@ -75,6 +75,8 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CartesianCommunicator::~CartesianCommunicator(){}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
|
||||||
: CartesianCommunicator(processors)
|
: CartesianCommunicator(processors)
|
||||||
{
|
{
|
||||||
|
Loading…
x
Reference in New Issue
Block a user