1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-09-20 09:15:38 +01:00

Merge branch 'feature/scalar_adjointFT' of https://github.com/paboyle/Grid into feature/scalar_adjointFT

This commit is contained in:
Guido Cossu 2017-10-04 09:44:27 +01:00
commit 27caff92c6
8 changed files with 114 additions and 147 deletions

View File

@ -96,6 +96,105 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
GlobalSumVector((double *)c,2*N); GlobalSumVector((double *)c,2*N);
} }
#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
{
_ndimension = processors.size();
assert(_ndimension = parent._ndimension);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// split the communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
int Nparent;
MPI_Comm_size(parent.communicator,&Nparent);
int childsize=1;
for(int d=0;d<processors.size();d++) {
childsize *= processors[d];
}
int Nchild = Nparent/childsize;
assert (childsize * Nchild == Nparent);
int prank; MPI_Comm_rank(parent.communicator,&prank);
int crank = prank % childsize;
int ccomm = prank / childsize;
MPI_Comm comm_split;
if ( Nchild > 1 ) {
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
std::cout<<std::endl;
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
std::cout<<std::endl;
int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
assert(ierr==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Declare victory
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
<<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
} else {
comm_split=parent.communicator;
// std::cout << "Passed parental communicator to a new communicator" <<std::endl;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Set up from the new split communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
InitFromMPICommunicator(processors,comm_split);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Take an MPI_Comm and self assemble
//////////////////////////////////////////////////////////////////////////////////////////////////////
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
{
// if ( communicator_base != communicator_world ) {
// std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
// }
_ndimension = processors.size();
_processor_coor.resize(_ndimension);
/////////////////////////////////
// Count the requested nodes
/////////////////////////////////
_Nprocessors=1;
_processors = processors;
for(int i=0;i<_ndimension;i++){
_Nprocessors*=_processors[i];
}
std::vector<int> periodic(_ndimension,1);
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
int Size;
MPI_Comm_size(communicator,&Size);
#ifdef GRID_COMMS_MPIT
communicator_halo.resize (2*_ndimension);
for(int i=0;i<_ndimension*2;i++){
MPI_Comm_dup(communicator,&communicator_halo[i]);
}
#endif
assert(Size==_Nprocessors);
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
InitFromMPICommunicator(processors,communicator_world);
}
#endif
#if !defined( GRID_COMMS_MPI3) #if !defined( GRID_COMMS_MPI3)
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};

View File

@ -155,13 +155,10 @@ class CartesianCommunicator {
//////////////////////////////////////////////// ////////////////////////////////////////////////
CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent); CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent);
CartesianCommunicator(const std::vector<int> &pdimensions_in); CartesianCommunicator(const std::vector<int> &pdimensions_in);
virtual ~CartesianCommunicator(); virtual ~CartesianCommunicator();
private: private:
#if defined (GRID_COMMS_MPI) #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT)
//|| defined (GRID_COMMS_MPI3)
//////////////////////////////////////////////// ////////////////////////////////////////////////
// Private initialise from an MPI communicator // Private initialise from an MPI communicator
// Can use after an MPI_Comm_split, but hidden from user so private // Can use after an MPI_Comm_split, but hidden from user so private
@ -169,7 +166,7 @@ class CartesianCommunicator {
void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base); void InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base);
#endif #endif
public: public:
>>>>>>> develop
//////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////
// Wraps MPI_Cart routines, or implements equivalent on other impls // Wraps MPI_Cart routines, or implements equivalent on other impls

View File

@ -52,102 +52,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
ShmInitGeneric(); ShmInitGeneric();
} }
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
InitFromMPICommunicator(processors,communicator_world);
// std::cout << "Passed communicator world to a new communicator" <<communicator<<std::endl;
}
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors,const CartesianCommunicator &parent)
{
_ndimension = processors.size();
assert(_ndimension = parent._ndimension);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// split the communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
int Nparent;
MPI_Comm_size(parent.communicator,&Nparent);
int childsize=1;
for(int d=0;d<processors.size();d++) {
childsize *= processors[d];
}
int Nchild = Nparent/childsize;
assert (childsize * Nchild == Nparent);
int prank; MPI_Comm_rank(parent.communicator,&prank);
int crank = prank % childsize;
int ccomm = prank / childsize;
MPI_Comm comm_split;
if ( Nchild > 1 ) {
std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec<<std::endl;
std::cout << GridLogMessage<<" parent grid["<< parent._ndimension<<"] ";
for(int d=0;d<parent._processors.size();d++) std::cout << parent._processors[d] << " ";
std::cout<<std::endl;
std::cout << GridLogMessage<<" child grid["<< _ndimension <<"] ";
for(int d=0;d<processors.size();d++) std::cout << processors[d] << " ";
std::cout<<std::endl;
int ierr= MPI_Comm_split(parent.communicator, ccomm,crank,&comm_split);
assert(ierr==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Declare victory
//////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << GridLogMessage<<"Divided communicator "<< parent._Nprocessors<<" into "
<<Nchild <<" communicators with " << childsize << " ranks"<<std::endl;
} else {
comm_split=parent.communicator;
// std::cout << "Passed parental communicator to a new communicator" <<std::endl;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Set up from the new split communicator
//////////////////////////////////////////////////////////////////////////////////////////////////////
InitFromMPICommunicator(processors,comm_split);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////
// Take an MPI_Comm and self assemble
//////////////////////////////////////////////////////////////////////////////////////////////////////
void CartesianCommunicator::InitFromMPICommunicator(const std::vector<int> &processors, MPI_Comm communicator_base)
{
// if ( communicator_base != communicator_world ) {
// std::cout << "Cartesian communicator created with a non-world communicator"<<std::endl;
// }
_ndimension = processors.size();
_processor_coor.resize(_ndimension);
/////////////////////////////////
// Count the requested nodes
/////////////////////////////////
_Nprocessors=1;
_processors = processors;
for(int i=0;i<_ndimension;i++){
_Nprocessors*=_processors[i];
}
std::vector<int> periodic(_ndimension,1);
MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
int Size;
MPI_Comm_size(communicator,&Size);
assert(Size==_Nprocessors);
}
CartesianCommunicator::~CartesianCommunicator(){
if (communicator && !MPI::Is_finalized())
MPI_Comm_free(&communicator);
}
void CartesianCommunicator::GlobalSum(uint32_t &u){ void CartesianCommunicator::GlobalSum(uint32_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
assert(ierr==0); assert(ierr==0);

View File

@ -53,36 +53,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
ShmInitGeneric(); ShmInitGeneric();
} }
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
{
_ndimension = processors.size();
std::vector<int> periodic(_ndimension,1);
_Nprocessors=1;
_processors = processors;
_processor_coor.resize(_ndimension);
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
MPI_Comm_rank(communicator,&_processor);
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
for(int i=0;i<_ndimension;i++){
_Nprocessors*=_processors[i];
}
communicator_halo.resize (2*_ndimension);
for(int i=0;i<_ndimension*2;i++){
MPI_Comm_dup(communicator,&communicator_halo[i]);
}
int Size;
MPI_Comm_size(communicator,&Size);
assert(Size==_Nprocessors);
}
CartesianCommunicator::~CartesianCommunicator() = default;
void CartesianCommunicator::GlobalSum(uint32_t &u){ void CartesianCommunicator::GlobalSum(uint32_t &u){
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
assert(ierr==0); assert(ierr==0);

View File

@ -56,8 +56,6 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
} }
} }
CartesianCommunicator::~CartesianCommunicator() = default;
void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSum(float &){}
void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSumVector(float *,int N){}
void CartesianCommunicator::GlobalSum(double &){} void CartesianCommunicator::GlobalSum(double &){}

View File

@ -77,7 +77,6 @@ void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi
} }
} }
template<class Impl> void CayleyFermion5D<Impl>::CayleyReport(void) template<class Impl> void CayleyFermion5D<Impl>::CayleyReport(void)
{ {
this->Report(); this->Report();
@ -119,7 +118,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
MooeeInvTime=0; MooeeInvTime=0;
} }
template<class Impl> template<class Impl>
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi) void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
{ {

View File

@ -89,12 +89,11 @@ class ScalarImplTypes {
}; };
#ifdef USE_FFT_ACCELERATION
#define USE_FFT_ACCELERATION #ifndef FFT_MASS
#ifdef USE_FFT_ACCELERATION #error "USE_FFT_ACCELERATION is defined but not FFT_MASS"
#define FFT_MASS 0.707 #endif
#endif #endif
template <class S, unsigned int N> template <class S, unsigned int N>
class ScalarAdjMatrixImplTypes { class ScalarAdjMatrixImplTypes {

View File

@ -46,6 +46,7 @@ public:
private: private:
RealD mass_square; RealD mass_square;
RealD lambda; RealD lambda;
RealD g;
const unsigned int N = Impl::Group::Dimension; const unsigned int N = Impl::Group::Dimension;
typedef typename Field::vector_object vobj; typedef typename Field::vector_object vobj;
@ -57,7 +58,7 @@ private:
std::vector<int> displacements; // = {1,1,1,1, -1,-1,-1,-1}; std::vector<int> displacements; // = {1,1,1,1, -1,-1,-1,-1};
public: public:
ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) ScalarInteractionAction(RealD ms, RealD l, RealD gval) : mass_square(ms), lambda(l), g(gval), displacements(2 * Ndim, 0), directions(2 * Ndim, 0)
{ {
for (int mu = 0; mu < Ndim; mu++) for (int mu = 0; mu < Ndim; mu++)
{ {
@ -73,6 +74,7 @@ public:
std::stringstream sstream; std::stringstream sstream;
sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl;
sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl;
sstream << GridLogMessage << "[ScalarAction] g : " << g << std::endl;
return sstream.str(); return sstream.str();
} }
@ -86,8 +88,8 @@ public:
static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); static Stencil phiStencil(p._grid, npoint, 0, directions, displacements);
phiStencil.HaloExchange(p, compressor); phiStencil.HaloExchange(p, compressor);
Field action(p._grid), pshift(p._grid), phisquared(p._grid); Field action(p._grid), pshift(p._grid), phisquared(p._grid);
phisquared = p*p; phisquared = p * p;
action = (2.*Ndim + mass_square) * phisquared - phisquared * phisquared; action = (2.0 * Ndim + mass_square) * phisquared - lambda * phisquared * phisquared;
for (int mu = 0; mu < Ndim; mu++) for (int mu = 0; mu < Ndim; mu++)
{ {
// pshift = Cshift(p, mu, +1); // not efficient, implement with stencils // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils
@ -122,13 +124,13 @@ public:
} }
// NB the trace in the algebra is normalised to 1/2 // NB the trace in the algebra is normalised to 1/2
// minus sign coming from the antihermitian fields // minus sign coming from the antihermitian fields
return -(TensorRemove(sum(trace(action)))).real()*N/lambda; return -(TensorRemove(sum(trace(action)))).real()*N/g;
}; };
virtual void deriv(const Field &p, Field &force) virtual void deriv(const Field &p, Field &force)
{ {
assert(p._grid->Nd() == Ndim); assert(p._grid->Nd() == Ndim);
force = (2.0 * Ndim + mass_square) * p - 2. * p * p * p; force = (2. * Ndim + mass_square) * p - 2. * lambda * p * p * p;
// move this outside // move this outside
static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); static Stencil phiStencil(p._grid, npoint, 0, directions, displacements);
phiStencil.HaloExchange(p, compressor); phiStencil.HaloExchange(p, compressor);
@ -163,7 +165,7 @@ public:
} }
} }
} }
force *= N/lambda; force *= N/g;
} }
}; };