1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-09 21:50:45 +01:00

Removed SliceShare as a reusable routine

This commit is contained in:
Michael Marshall 2019-11-01 20:10:51 +00:00
parent ada0a7a83b
commit 52d8d576d0
3 changed files with 107 additions and 153 deletions

View File

@ -38,136 +38,13 @@
#include <Hadrons/A2AVectors.hpp> #include <Hadrons/A2AVectors.hpp>
#include <Hadrons/DilutedNoise.hpp> #include <Hadrons/DilutedNoise.hpp>
/****************************************************************************** BEGIN_HADRONS_NAMESPACE
This potentially belongs in CartesianCommunicator BEGIN_MODULE_NAMESPACE(MDistil)
******************************************************************************/
BEGIN_MODULE_NAMESPACE(Grid)
inline void SliceShare( GridBase * gridLowDim, GridBase * gridHighDim, void * Buffer, int BufferSize )
{
// Work out which dimension is the spread-out dimension
assert(gridLowDim);
assert(gridHighDim);
const int iNumDims{(const int)gridHighDim->_gdimensions.size()};
assert(iNumDims == gridLowDim->_gdimensions.size());
int dimSpreadOut = -1;
Coordinate coor(iNumDims);
for( int i = 0 ; i < iNumDims ; i++ ) {
coor[i] = gridHighDim->_processor_coor[i];
if( gridLowDim->_gdimensions[i] != gridHighDim->_gdimensions[i] ) {
assert( dimSpreadOut == -1 );
assert( gridLowDim->_processors[i] == 1 ); // easiest assumption to make for now
dimSpreadOut = i;
}
}
if( dimSpreadOut != -1 && gridHighDim->_processors[dimSpreadOut] != gridLowDim->_processors[dimSpreadOut] ) {
// Make sure the same number of data elements exist on each slice
const int NumSlices{gridHighDim->_processors[dimSpreadOut] / gridLowDim->_processors[dimSpreadOut]};
assert(gridHighDim->_processors[dimSpreadOut] == gridLowDim->_processors[dimSpreadOut] * NumSlices);
const int SliceSize{BufferSize/NumSlices};
//CCC_DEBUG_DUMP(Buffer, NumSlices, SliceSize);
assert(BufferSize == SliceSize * NumSlices);
//#ifndef USE_LOCAL_SLICES
// assert(0); // Can't do this without MPI (should really test whether MPI is defined)
//#else
const auto MyRank = gridHighDim->ThisRank();
std::vector<CommsRequest_t> reqs(0);
int MySlice{coor[dimSpreadOut]};
char * const _buffer{(char *)Buffer};
char * const MyData{_buffer + MySlice * SliceSize};
for(int i = 1; i < NumSlices ; i++ ){
int SendSlice = ( MySlice + i ) % NumSlices;
int RecvSlice = ( MySlice - i + NumSlices ) % NumSlices;
char * const RecvData{_buffer + RecvSlice * SliceSize};
coor[dimSpreadOut] = SendSlice;
const auto SendRank = gridHighDim->RankFromProcessorCoor(coor);
coor[dimSpreadOut] = RecvSlice;
const auto RecvRank = gridHighDim->RankFromProcessorCoor(coor);
std::cout << GridLogMessage << "Send slice " << MySlice << " (" << MyRank << ") to " << SendSlice << " (" << SendRank
<< "), receive slice from " << RecvSlice << " (" << RecvRank << ")" << std::endl;
gridHighDim->SendToRecvFromBegin(reqs,MyData,SendRank,RecvData,RecvRank,SliceSize);
//memcpy(RecvData,MyData,SliceSize); // Debug
}
gridHighDim->SendToRecvFromComplete(reqs);
std::cout << GridLogMessage << "Slice data shared." << std::endl;
//CCC_DEBUG_DUMP(Buffer, NumSlices, SliceSize);
//#endif
}
}
/*************************************************************************************
Not sure where the right home for this is? But presumably in Grid
-Grad^2 (Peardon, 2009, pg 2, equation 3, https://arxiv.org/abs/0905.2160)
Field Type of field the operator will be applied to
GaugeField Gauge field the operator will smear using
*************************************************************************************/
template<typename Field, typename GaugeField=LatticeGaugeField>
class Laplacian3D : public LinearOperatorBase<Field>, public LinearFunction<Field> {
typedef typename GaugeField::vector_type vCoeff_t;
protected: // I don't really mind if _gf is messed with ... so make this public?
//GaugeField & _gf;
int nd; // number of spatial dimensions
std::vector<Lattice<iColourMatrix<vCoeff_t> > > U;
public:
// Construct this operator given a gauge field and the number of dimensions it should act on
Laplacian3D( GaugeField& gf, int dimSpatial = Tdir ) : /*_gf(gf),*/ nd{dimSpatial} {
assert(dimSpatial>=1);
for( int mu = 0 ; mu < nd ; mu++ )
U.push_back(PeekIndex<LorentzIndex>(gf,mu));
}
// Apply this operator to "in", return result in "out"
void operator()(const Field& in, Field& out) {
assert( nd <= in.Grid()->Nd() );
conformable( in, out );
out = ( ( Real ) ( 2 * nd ) ) * in;
Field _tmp(in.Grid());
typedef typename GaugeField::vector_type vCoeff_t;
//Lattice<iColourMatrix<vCoeff_t> > U(in.Grid());
for( int mu = 0 ; mu < nd ; mu++ ) {
//U = PeekIndex<LorentzIndex>(_gf,mu);
out -= U[mu] * Cshift( in, mu, 1);
_tmp = adj( U[mu] ) * in;
out -= Cshift(_tmp,mu,-1);
}
}
void OpDiag (const Field &in, Field &out) { assert(0); };
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); };
void Op (const Field &in, Field &out) { assert(0); };
void AdjOp (const Field &in, Field &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2) { assert(0); };
void HermOp(const Field &in, Field &out) { operator()(in,out); };
};
template<typename Field>
class Laplacian3DHerm : public LinearFunction<Field> {
public:
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;
Laplacian3DHerm(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop)
: _poly{poly}, _Linop{linop} {}
void operator()(const Field& in, Field& out) {
_poly(_Linop,in,out);
}
};
END_MODULE_NAMESPACE // Grid
/****************************************************************************** /******************************************************************************
Common elements for distillation Common elements for distillation
******************************************************************************/ ******************************************************************************/
BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MDistil)
using LapEvecs = Grid::Hadrons::EigenPack<LatticeColourVector>; using LapEvecs = Grid::Hadrons::EigenPack<LatticeColourVector>;
// Noise vector index order: nnoise, nt, nvec, ns // Noise vector index order: nnoise, nt, nvec, ns
@ -182,7 +59,7 @@ struct DistilParameters: Serializable {
std::string, SI ) std::string, SI )
DistilParameters() = default; DistilParameters() = default;
template <class ReaderClass> DistilParameters(Reader<ReaderClass>& Reader){read(Reader,"Distil",*this);} template <class ReaderClass> DistilParameters(Reader<ReaderClass>& Reader){read(Reader,"Distil",*this);}
// Numeric parameter is allowed to be empty (in which case it = Default), // Numeric parameter is allowed to be empty (in which case it = Default),
// but assert during setup() if specified but not numeric // but assert during setup() if specified but not numeric
@ -213,7 +90,7 @@ const int Nt_inv{ full_tdil ? 1 : TI }
/****************************************************************************** /******************************************************************************
Default for distillation file operations. For now only used by NamedTensor Default for distillation file operations. For now only used by NamedTensor
******************************************************************************/ ******************************************************************************/
#ifdef HAVE_HDF5 #ifdef HAVE_HDF5
using Default_Reader = Grid::Hdf5Reader; using Default_Reader = Grid::Hdf5Reader;
@ -235,7 +112,7 @@ static const char * FileExtension = ".dat";
Configuration number Configuration number
Noise unique string Noise unique string
Distillation parameters Distillation parameters
******************************************************************************/ ******************************************************************************/
template<typename Scalar_, int NumIndices_> template<typename Scalar_, int NumIndices_>
@ -271,7 +148,7 @@ public:
assert(sizeof...(otherDimensions) + 1 == NumIndices_ && "NamedTensor: dimensions != tensor rank"); assert(sizeof...(otherDimensions) + 1 == NumIndices_ && "NamedTensor: dimensions != tensor rank");
return tensor.operator()(std::array<Eigen::Index, NumIndices_>{{firstDimension, otherDimensions...}}); return tensor.operator()(std::array<Eigen::Index, NumIndices_>{{firstDimension, otherDimensions...}});
} }
// Construct a named tensor explicitly specifying size of each dimension // Construct a named tensor explicitly specifying size of each dimension
template<typename... IndexTypes> template<typename... IndexTypes>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NamedTensor(const std::array<std::string,NumIndices_> &IndexNames_, Eigen::Index firstDimension, IndexTypes... otherDimensions) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NamedTensor(const std::array<std::string,NumIndices_> &IndexNames_, Eigen::Index firstDimension, IndexTypes... otherDimensions)
@ -281,8 +158,8 @@ public:
assert(sizeof...(otherDimensions) + 1 == NumIndices_ && "NamedTensor: dimensions != tensor rank"); assert(sizeof...(otherDimensions) + 1 == NumIndices_ && "NamedTensor: dimensions != tensor rank");
for( int i = 0; i < NumIndices_; i++ ) for( int i = 0; i < NumIndices_; i++ )
IndexNames[i] = IndexNames_[i]; IndexNames[i] = IndexNames_[i];
} }
// Default constructor (assumes tensor will be loaded from file) // Default constructor (assumes tensor will be loaded from file)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NamedTensor() : IndexNames{NumIndices_} {} EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE NamedTensor() : IndexNames{NumIndices_} {}
@ -292,15 +169,10 @@ public:
{ {
for( int i = 0; i < NumIndices_; i++ ) for( int i = 0; i < NumIndices_; i++ )
IndexNames[i] = IndexNames_[i]; IndexNames[i] = IndexNames_[i];
} }
// Share data for timeslices we calculated with other nodes
inline void SliceShare( GridCartesian * gridLowDim, GridCartesian * gridHighDim ) {
Grid::SliceShare( gridLowDim, gridHighDim, tensor.data(), (int) (tensor.size() * sizeof(Scalar_)));
}
bool ValidateIndexNames( std::size_t NumNames, const std::string * MatchNames ) const; bool ValidateIndexNames( std::size_t NumNames, const std::string * MatchNames ) const;
// Read/Write in any format // Read/Write in any format
template<typename Reader> inline void read (Reader &r, const char * pszTag = nullptr); template<typename Reader> inline void read (Reader &r, const char * pszTag = nullptr);
template<typename Writer> inline void write(Writer &w, const char * pszTag = nullptr) const; template<typename Writer> inline void write(Writer &w, const char * pszTag = nullptr) const;
@ -330,7 +202,7 @@ template<typename Writer>
void NamedTensor<Scalar_, NumIndices_>::write(Writer &w, const char * pszTag)const{ void NamedTensor<Scalar_, NumIndices_>::write(Writer &w, const char * pszTag)const{
if( pszTag == nullptr ) if( pszTag == nullptr )
pszTag = "NamedTensor"; pszTag = "NamedTensor";
LOG(Message) << "Writing NamedTensor to tag " << pszTag << std::endl; LOG(Message) << "Writing NamedTensor to tag " << pszTag << std::endl;
write(w, pszTag, *this); write(w, pszTag, *this);
} }
@ -353,8 +225,8 @@ bool NamedTensor<Scalar_, NumIndices_>::ValidateIndexNames( std::size_t NumNames
for( std::size_t i = 0; bSame && i < NumIndices_; i++ ) for( std::size_t i = 0; bSame && i < NumIndices_; i++ )
{ {
bSame = MatchNames[i].size() == IndexNames[i].size() bSame = MatchNames[i].size() == IndexNames[i].size()
&& std::equal( MatchNames[i].begin(), MatchNames[i].end(), IndexNames[i].begin(), && std::equal( MatchNames[i].begin(), MatchNames[i].end(), IndexNames[i].begin(),
[](const char & c1, const char & c2){ return c1 == c2 || std::toupper(c1) == std::toupper(c2); }); [](const char & c1, const char & c2){ return c1 == c2 || std::toupper(c1) == std::toupper(c2); });
} }
return bSame; return bSame;
} }

View File

@ -186,6 +186,67 @@ void TLapEvec<GImpl>::Cleanup(void)
gridHD = nullptr; gridHD = nullptr;
} }
/*************************************************************************************
-Grad^2 (Peardon, 2009, pg 2, equation 3, https://arxiv.org/abs/0905.2160)
Field Type of field the operator will be applied to
GaugeField Gauge field the operator will smear using
*************************************************************************************/
template<typename Field, typename GaugeField=LatticeGaugeField>
class Laplacian3D : public LinearOperatorBase<Field>, public LinearFunction<Field> {
typedef typename GaugeField::vector_type vCoeff_t;
protected: // I don't really mind if _gf is messed with ... so make this public?
//GaugeField & _gf;
int nd; // number of spatial dimensions
std::vector<Lattice<iColourMatrix<vCoeff_t> > > U;
public:
// Construct this operator given a gauge field and the number of dimensions it should act on
Laplacian3D( GaugeField& gf, int dimSpatial = Tdir ) : /*_gf(gf),*/ nd{dimSpatial} {
assert(dimSpatial>=1);
for( int mu = 0 ; mu < nd ; mu++ )
U.push_back(PeekIndex<LorentzIndex>(gf,mu));
}
// Apply this operator to "in", return result in "out"
void operator()(const Field& in, Field& out) {
assert( nd <= in.Grid()->Nd() );
conformable( in, out );
out = ( ( Real ) ( 2 * nd ) ) * in;
Field _tmp(in.Grid());
typedef typename GaugeField::vector_type vCoeff_t;
//Lattice<iColourMatrix<vCoeff_t> > U(in.Grid());
for( int mu = 0 ; mu < nd ; mu++ ) {
//U = PeekIndex<LorentzIndex>(_gf,mu);
out -= U[mu] * Cshift( in, mu, 1);
_tmp = adj( U[mu] ) * in;
out -= Cshift(_tmp,mu,-1);
}
}
void OpDiag (const Field &in, Field &out) { assert(0); };
void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); };
void Op (const Field &in, Field &out) { assert(0); };
void AdjOp (const Field &in, Field &out) { assert(0); };
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2) { assert(0); };
void HermOp(const Field &in, Field &out) { operator()(in,out); };
};
template<typename Field>
class Laplacian3DHerm : public LinearFunction<Field> {
public:
OperatorFunction<Field> & _poly;
LinearOperatorBase<Field> &_Linop;
Laplacian3DHerm(OperatorFunction<Field> & poly,LinearOperatorBase<Field>& linop)
: _poly{poly}, _Linop{linop} {}
void operator()(const Field& in, Field& out) {
_poly(_Linop,in,out);
}
};
/****************************************************************************** /******************************************************************************
Calculate low-mode eigenvalues of the Laplacian Calculate low-mode eigenvalues of the Laplacian
******************************************************************************/ ******************************************************************************/

View File

@ -33,12 +33,11 @@
#include <Hadrons/Distil.hpp> #include <Hadrons/Distil.hpp>
BEGIN_HADRONS_NAMESPACE BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MDistil)
/****************************************************************************** /******************************************************************************
* Perambulator * * Perambulator *
******************************************************************************/ ******************************************************************************/
BEGIN_MODULE_NAMESPACE(MDistil)
class PerambulatorPar: Serializable class PerambulatorPar: Serializable
{ {
@ -254,20 +253,43 @@ void TPerambulator<FImpl>::execute(void)
} }
} }
} }
LOG(Message) << "perambulator done" << std::endl; // Now share my timeslice data with other members of the grid
perambulator.SliceShare( grid3d, grid4d ); const int NumSlices{grid4d->_processors[Tdir] / grid3d->_processors[Tdir]};
if (NumSlices > 1)
if(grid4d->IsBoss())
{ {
std::string sPerambName{par().PerambFileName}; LOG(Debug) << "Sharing perambulator data with other nodes" << std::endl;
if( sPerambName.length() == 0 ) const int MySlice {grid4d->_processor_coor[Tdir]};
const int SliceCount {static_cast<int>(perambulator.tensor.size()/NumSlices)};
PerambTensor::Scalar * const MyData {perambulator.tensor.data()+MySlice*SliceCount};
Coordinate coor(Nd);
for (int i = 0 ; i < Tdir ; i++) coor[i] = grid4d->_processor_coor[i];
std::vector<CommsRequest_t> reqs(0);
for (int i = 1; i < NumSlices ; i++)
{
coor[Tdir] = (MySlice+i)%NumSlices;
const int SendRank { grid4d->RankFromProcessorCoor(coor) };
const int RecvSlice { ( MySlice - i + NumSlices ) % NumSlices };
coor[Tdir] = RecvSlice;
const auto RecvRank = grid4d->RankFromProcessorCoor(coor);
grid4d->SendToRecvFromBegin(reqs,MyData,SendRank, perambulator.tensor.data()
+ RecvSlice*SliceCount,RecvRank,SliceCount*sizeof(PerambTensor::Scalar));
}
grid4d->SendToRecvFromComplete(reqs);
}
// Save the perambulator to disk from the boss node
if (grid4d->IsBoss())
{
std::string sPerambName {par().PerambFileName};
if (sPerambName.empty())
sPerambName = getName(); sPerambName = getName();
sPerambName.append( "." ); sPerambName.append(".");
sPerambName.append( std::to_string(vm().getTrajectory())); sPerambName.append(std::to_string(vm().getTrajectory()));
perambulator.write(sPerambName.c_str()); perambulator.write(sPerambName.c_str());
} }
if( !UnsmearedSinkFileName.empty() ) //Save the unsmeared sinks if filename specified
if (!UnsmearedSinkFileName.empty())
{ {
bool bMulti = ( Hadrons::MDistil::DistilParameters::ParameterDefault( par().UnsmearedSinkMultiFile, 1, false ) != 0 ); bool bMulti = ( Hadrons::MDistil::DistilParameters::ParameterDefault( par().UnsmearedSinkMultiFile, 1, false ) != 0 );
LOG(Message) << "Writing unsmeared sink to " << UnsmearedSinkFileName << std::endl; LOG(Message) << "Writing unsmeared sink to " << UnsmearedSinkFileName << std::endl;
@ -276,7 +298,6 @@ void TPerambulator<FImpl>::execute(void)
} }
END_MODULE_NAMESPACE END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE END_HADRONS_NAMESPACE
#endif // Hadrons_MDistil_Perambulator_hpp_ #endif // Hadrons_MDistil_Perambulator_hpp_