From 91eaace19de25f45db52420a10350b36c548ad5e Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Fri, 15 Sep 2017 11:33:45 +0100 Subject: [PATCH 001/145] Added support for FFT accelerated updates --- lib/qcd/action/scalar/ScalarImpl.h | 129 +++++++++++++++++++++++++---- 1 file changed, 114 insertions(+), 15 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarImpl.h b/lib/qcd/action/scalar/ScalarImpl.h index f85ab840..3755d0ee 100644 --- a/lib/qcd/action/scalar/ScalarImpl.h +++ b/lib/qcd/action/scalar/ScalarImpl.h @@ -16,12 +16,12 @@ class ScalarImplTypes { typedef iImplField SiteField; typedef SiteField SitePropagator; typedef SiteField SiteComplex; - + typedef Lattice Field; typedef Field ComplexField; typedef Field FermionField; typedef Field PropagatorField; - + static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){ gaussian(pRNG, P); } @@ -47,54 +47,58 @@ class ScalarImplTypes { static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) { U = 1.0; } - + static void MomentumSpacePropagator(Field &out, RealD m) { GridBase *grid = out._grid; Field kmu(grid), one(grid); const unsigned int nd = grid->_ndimension; std::vector &l = grid->_fdimensions; - + one = Complex(1.0,0.0); out = m*m; for(int mu = 0; mu < nd; mu++) { Real twoPiL = M_PI*2./l[mu]; - + LatticeCoordinate(kmu,mu); kmu = 2.*sin(.5*twoPiL*kmu); out = out + kmu*kmu; } out = one/out; } - + static void FreePropagator(const Field &in, Field &out, const Field &momKernel) { FFT fft((GridCartesian *)in._grid); Field inFT(in._grid); - + fft.FFT_all_dim(inFT, in, FFT::forward); inFT = inFT*momKernel; fft.FFT_all_dim(out, inFT, FFT::backward); } - + static void FreePropagator(const Field &in, Field &out, RealD m) { Field momKernel(in._grid); - + MomentumSpacePropagator(momKernel, m); FreePropagator(in, out, momKernel); } - + }; + + #define USE_FFT_ACCELERATION + + template class ScalarAdjMatrixImplTypes { public: typedef S Simd; typedef QCD::SU Group; - + template using iImplField = iScalar>>; template @@ -103,24 +107,119 @@ class ScalarImplTypes { typedef iImplField SiteField; typedef SiteField SitePropagator; typedef iImplComplex SiteComplex; - + typedef Lattice Field; typedef Lattice ComplexField; typedef Field FermionField; typedef Field PropagatorField; + + static void MomentaSquare(ComplexField& out){ + GridBase *grid = out._grid; + const std::vector &l = grid->FullDimensions(); + ComplexField kmu(grid); + + for(int mu = 0; mu < grid->Nd(); mu++) + { + Real twoPiL = M_PI*2.0/l[mu]; + LatticeCoordinate(kmu,mu); + kmu = 2.0*sin(0.5*twoPiL*kmu); + out += kmu*kmu; + } + } + + static void MomentumSpacePropagator(ComplexField &out, RealD m) + { + GridBase *grid = out._grid; + ComplexField one(grid); one = Complex(1.0,0.0); + out = m*m; + MomentaSquare(out); + out = one/out; + } + + static inline void generate_momenta(Field& P, GridParallelRNG& pRNG) { + #ifndef USE_FFT_ACCELERATION Group::GaussianFundamentalLieAlgebraMatrix(pRNG, P); + #else + + Field Ptmp(P._grid), Pp(P._grid); + Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Ptmp); + // if we change the mass I need a renormalization here + // transform and multiply by (M*M+p*p)^-1 + GridCartesian *Grid = dynamic_cast(P._grid); + FFT theFFT(Grid); + ComplexField p2(Grid); + RealD M = 1.0; + p2= zero; + + theFFT.FFT_all_dim(Pp,Ptmp,FFT::forward); + MomentaSquare(p2); + p2 += M*M; + p2 = sqrt(p2); + Pp *= p2; + theFFT.FFT_all_dim(P,Pp,FFT::backward); + + #endif //USE_FFT_ACCELERATION } static inline Field projectForce(Field& P) {return P;} static inline void update_field(Field& P, Field& U, double ep) { + #ifndef USE_FFT_ACCELERATION U += P*ep; + #else + // Here we can eventually add the Fourier acceleration + // FFT transform P(x) -> P(p) + // divide by (M^2+p^2) M external parameter (how to pass?) + // P'(p) = P(p)/(M^2+p^2) + // Transform back -> P'(x) + // U += P'(x)*ep + + // the dynamic cast is safe + GridCartesian *Grid = dynamic_cast(U._grid); + FFT theFFT(Grid); + Field Pp(Grid), Pnew(Grid); + std::vector full_dim = Grid->FullDimensions(); + + theFFT.FFT_all_dim(Pp,P,FFT::forward); + RealD M = 1.0; + static bool first_call = true; + static ComplexField p2(Grid); + if (first_call){ + MomentumSpacePropagator(p2,M); + first_call = false; + } + Pp *= p2; + theFFT.FFT_all_dim(Pnew,Pp,FFT::backward); + U += Pnew * ep; + + #endif //USE_FFT_ACCELERATION } - static inline RealD FieldSquareNorm(Field& U) { + static inline RealD FieldSquareNorm(Field &U) + { + #ifndef USE_FFT_ACCELERATION return (TensorRemove(sum(trace(U*U))).real()); + #else + // In case of Fourier acceleration we have to: + // compute U(p)*U(p)/(M^2+p^2)) Parseval theorem + // 1 FFT needed U(x) -> U(p) + // M to be passed + + GridCartesian *Grid = dynamic_cast(U._grid); + FFT theFFT(Grid); + Field Up(Grid), Utilde(Grid); + std::vector full_dim = Grid->FullDimensions(); + + theFFT.FFT_all_dim(Up, U, FFT::forward); + RealD M = 1.0; + ComplexField p2(Grid); + MomentumSpacePropagator(p2,M); + Field Up2 = Up*p2; + // from the definition of the DFT we need to divide by the volume + return (-TensorRemove(sum(trace(adj(Up)*Up2))).real()/U._grid->gSites()); + #endif //USE_FFT_ACCELERATION } static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { @@ -146,7 +245,7 @@ class ScalarImplTypes { typedef ScalarImplTypes ScalarImplCR; typedef ScalarImplTypes ScalarImplCF; typedef ScalarImplTypes ScalarImplCD; - + // Hardcoding here the size of the matrices typedef ScalarAdjMatrixImplTypes ScalarAdjImplR; typedef ScalarAdjMatrixImplTypes ScalarAdjImplF; @@ -155,7 +254,7 @@ class ScalarImplTypes { template using ScalarNxNAdjImplR = ScalarAdjMatrixImplTypes; template using ScalarNxNAdjImplF = ScalarAdjMatrixImplTypes; template using ScalarNxNAdjImplD = ScalarAdjMatrixImplTypes; - + //} } From b542d349b8784fdd47339977e94575a7fdef5a58 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Fri, 15 Sep 2017 11:48:36 +0100 Subject: [PATCH 002/145] Minor cosmetic changes --- lib/qcd/action/scalar/ScalarImpl.h | 137 ++++++++++++++--------------- 1 file changed, 68 insertions(+), 69 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarImpl.h b/lib/qcd/action/scalar/ScalarImpl.h index 3755d0ee..3dd3cc70 100644 --- a/lib/qcd/action/scalar/ScalarImpl.h +++ b/lib/qcd/action/scalar/ScalarImpl.h @@ -91,6 +91,9 @@ class ScalarImplTypes { #define USE_FFT_ACCELERATION + #ifdef USE_FFT_ACCELERATION + #define FFT_MASS 0.707 + #endif template @@ -113,113 +116,109 @@ class ScalarImplTypes { typedef Field FermionField; typedef Field PropagatorField; + static void MomentaSquare(ComplexField &out) + { + GridBase *grid = out._grid; + const std::vector &l = grid->FullDimensions(); + ComplexField kmu(grid); - static void MomentaSquare(ComplexField& out){ - GridBase *grid = out._grid; - const std::vector &l = grid->FullDimensions(); - ComplexField kmu(grid); - - for(int mu = 0; mu < grid->Nd(); mu++) + for (int mu = 0; mu < grid->Nd(); mu++) { - Real twoPiL = M_PI*2.0/l[mu]; - LatticeCoordinate(kmu,mu); - kmu = 2.0*sin(0.5*twoPiL*kmu); - out += kmu*kmu; + Real twoPiL = M_PI * 2.0 / l[mu]; + LatticeCoordinate(kmu, mu); + kmu = 2.0 * sin(0.5 * twoPiL * kmu); + out += kmu * kmu; } } static void MomentumSpacePropagator(ComplexField &out, RealD m) { - GridBase *grid = out._grid; - ComplexField one(grid); one = Complex(1.0,0.0); - out = m*m; + GridBase *grid = out._grid; + ComplexField one(grid); + one = Complex(1.0, 0.0); + out = m * m; MomentaSquare(out); - out = one/out; + out = one / out; } - - static inline void generate_momenta(Field& P, GridParallelRNG& pRNG) { - #ifndef USE_FFT_ACCELERATION + static inline void generate_momenta(Field &P, GridParallelRNG &pRNG) + { +#ifndef USE_FFT_ACCELERATION Group::GaussianFundamentalLieAlgebraMatrix(pRNG, P); - #else - - Field Ptmp(P._grid), Pp(P._grid); - Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Ptmp); - // if we change the mass I need a renormalization here - // transform and multiply by (M*M+p*p)^-1 - GridCartesian *Grid = dynamic_cast(P._grid); - FFT theFFT(Grid); - ComplexField p2(Grid); - RealD M = 1.0; - p2= zero; +#else - theFFT.FFT_all_dim(Pp,Ptmp,FFT::forward); + Field Pgaussian(P._grid), Pp(P._grid); + ComplexField p2(P._grid); p2 = zero; + RealD M = FFT_MASS; + + Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Pgaussian); + + FFT theFFT((GridCartesian*)P._grid); + theFFT.FFT_all_dim(Pp, Pgaussian, FFT::forward); MomentaSquare(p2); - p2 += M*M; + p2 += M * M; p2 = sqrt(p2); Pp *= p2; - theFFT.FFT_all_dim(P,Pp,FFT::backward); - - #endif //USE_FFT_ACCELERATION + theFFT.FFT_all_dim(P, Pp, FFT::backward); + +#endif //USE_FFT_ACCELERATION } static inline Field projectForce(Field& P) {return P;} - static inline void update_field(Field& P, Field& U, double ep) { - #ifndef USE_FFT_ACCELERATION - U += P*ep; - #else - // Here we can eventually add the Fourier acceleration + static inline void update_field(Field &P, Field &U, double ep) + { +#ifndef USE_FFT_ACCELERATION + U += P * ep; +#else // FFT transform P(x) -> P(p) // divide by (M^2+p^2) M external parameter (how to pass?) // P'(p) = P(p)/(M^2+p^2) // Transform back -> P'(x) // U += P'(x)*ep - - // the dynamic cast is safe - GridCartesian *Grid = dynamic_cast(U._grid); - FFT theFFT(Grid); - Field Pp(Grid), Pnew(Grid); - std::vector full_dim = Grid->FullDimensions(); - theFFT.FFT_all_dim(Pp,P,FFT::forward); - RealD M = 1.0; + Field Pp(U._grid), P_FFT(U._grid); + static ComplexField p2(U._grid); + RealD M = FFT_MASS; + + FFT theFFT((GridCartesian*)U._grid); + theFFT.FFT_all_dim(Pp, P, FFT::forward); + static bool first_call = true; - static ComplexField p2(Grid); - if (first_call){ - MomentumSpacePropagator(p2,M); - first_call = false; + if (first_call) + { + // avoid recomputing + MomentumSpacePropagator(p2, M); + first_call = false; } Pp *= p2; - theFFT.FFT_all_dim(Pnew,Pp,FFT::backward); - U += Pnew * ep; - - #endif //USE_FFT_ACCELERATION + theFFT.FFT_all_dim(P_FFT, Pp, FFT::backward); + U += P_FFT * ep; + +#endif //USE_FFT_ACCELERATION } static inline RealD FieldSquareNorm(Field &U) { - #ifndef USE_FFT_ACCELERATION - return (TensorRemove(sum(trace(U*U))).real()); - #else +#ifndef USE_FFT_ACCELERATION + return (TensorRemove(sum(trace(U * U))).real()); +#else // In case of Fourier acceleration we have to: // compute U(p)*U(p)/(M^2+p^2)) Parseval theorem // 1 FFT needed U(x) -> U(p) // M to be passed - - GridCartesian *Grid = dynamic_cast(U._grid); - FFT theFFT(Grid); - Field Up(Grid), Utilde(Grid); - std::vector full_dim = Grid->FullDimensions(); - + + FFT theFFT((GridCartesian*)U._grid); + Field Up(U._grid); + theFFT.FFT_all_dim(Up, U, FFT::forward); - RealD M = 1.0; - ComplexField p2(Grid); - MomentumSpacePropagator(p2,M); - Field Up2 = Up*p2; + RealD M = FFT_MASS; + ComplexField p2(U._grid); + MomentumSpacePropagator(p2, M); + Field Up2 = Up * p2; // from the definition of the DFT we need to divide by the volume - return (-TensorRemove(sum(trace(adj(Up)*Up2))).real()/U._grid->gSites()); - #endif //USE_FFT_ACCELERATION + return (-TensorRemove(sum(trace(adj(Up) * Up2))).real() / U._grid->gSites()); +#endif //USE_FFT_ACCELERATION } static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { From 999c62359046674117c0e0e1348072e002622c15 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Mon, 18 Sep 2017 14:39:04 +0100 Subject: [PATCH 003/145] Solving a memory leak in Communicator_mpi --- lib/cartesian/Cartesian_base.h | 3 + lib/cartesian/Cartesian_full.h | 3 + lib/communicator/Communicator_base.h | 1 + lib/communicator/Communicator_mpi.cc | 7 + .../action/scalar/ScalarInteractionAction.h | 220 ++++++++++-------- 5 files changed, 135 insertions(+), 99 deletions(-) diff --git a/lib/cartesian/Cartesian_base.h b/lib/cartesian/Cartesian_base.h index f4f9a269..0c67e951 100644 --- a/lib/cartesian/Cartesian_base.h +++ b/lib/cartesian/Cartesian_base.h @@ -50,6 +50,9 @@ public: GridBase(const std::vector & processor_grid) : CartesianCommunicator(processor_grid) {}; + virtual ~GridBase() = default; + + // Physics Grid information. std::vector _simd_layout;// Which dimensions get relayed out over simd lanes. std::vector _fdimensions;// (full) Global dimensions of array prior to cb removal diff --git a/lib/cartesian/Cartesian_full.h b/lib/cartesian/Cartesian_full.h index 815e3b22..62481bb8 100644 --- a/lib/cartesian/Cartesian_full.h +++ b/lib/cartesian/Cartesian_full.h @@ -93,6 +93,7 @@ public: // Use a reduced simd grid _ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions + //std::cout << _ldimensions[d] << " " << _gdimensions[d] << " " << _processors[d] << std::endl; assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); _rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition @@ -137,6 +138,8 @@ public: block = block * _rdimensions[d]; } }; + + virtual ~GridCartesian() = default; }; } #endif diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index ac866ced..ada017b0 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -152,6 +152,7 @@ class CartesianCommunicator { // Constructor of any given grid //////////////////////////////////////////////// CartesianCommunicator(const std::vector &pdimensions_in); + virtual ~CartesianCommunicator(); //////////////////////////////////////////////////////////////////////////////////////// // Wraps MPI_Cart routines, or implements equivalent on other impls diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index bd2a62fb..a3427b00 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -75,6 +75,13 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(Size==_Nprocessors); } + +CartesianCommunicator::~CartesianCommunicator(){ + if (communicator && !MPI::Is_finalized()) + MPI_Comm_free(&communicator); +} + + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/qcd/action/scalar/ScalarInteractionAction.h b/lib/qcd/action/scalar/ScalarInteractionAction.h index 4d189352..a681b62c 100644 --- a/lib/qcd/action/scalar/ScalarInteractionAction.h +++ b/lib/qcd/action/scalar/ScalarInteractionAction.h @@ -30,119 +30,141 @@ directory #ifndef SCALAR_INT_ACTION_H #define SCALAR_INT_ACTION_H - // Note: this action can completely absorb the ScalarAction for real float fields // use the scalarObjs to generalise the structure -namespace Grid { - // FIXME drop the QCD namespace everywhere here +namespace Grid +{ +// FIXME drop the QCD namespace everywhere here - template - class ScalarInteractionAction : public QCD::Action { - public: - INHERIT_FIELD_TYPES(Impl); - private: - RealD mass_square; - RealD lambda; +template +class ScalarInteractionAction : public QCD::Action +{ +public: + INHERIT_FIELD_TYPES(Impl); +private: + RealD mass_square; + RealD lambda; - typedef typename Field::vector_object vobj; - typedef CartesianStencil Stencil; + typedef typename Field::vector_object vobj; + typedef CartesianStencil Stencil; - SimpleCompressor compressor; - int npoint = 2*Ndim; - std::vector directions;// = {0,1,2,3,0,1,2,3}; // forcing 4 dimensions - std::vector displacements;// = {1,1,1,1, -1,-1,-1,-1}; + SimpleCompressor compressor; + int npoint = 2 * Ndim; + std::vector directions; // = {0,1,2,3,0,1,2,3}; // forcing 4 dimensions + std::vector displacements; // = {1,1,1,1, -1,-1,-1,-1}; - - public: - - ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2*Ndim,0), directions(2*Ndim,0){ - for (int mu = 0 ; mu < Ndim; mu++){ - directions[mu] = mu; directions[mu+Ndim] = mu; - displacements[mu] = 1; displacements[mu+Ndim] = -1; - } +public: + ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) + { + for (int mu = 0; mu < Ndim; mu++) + { + directions[mu] = mu; + directions[mu + Ndim] = mu; + displacements[mu] = 1; + displacements[mu + Ndim] = -1; } + } - virtual std::string LogParameters() { - std::stringstream sstream; - sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; - sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; - return sstream.str(); - } + virtual std::string LogParameters() + { + std::stringstream sstream; + sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; + sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; + return sstream.str(); + } - virtual std::string action_name() {return "ScalarAction";} + virtual std::string action_name() { return "ScalarAction"; } - virtual void refresh(const Field &U, GridParallelRNG &pRNG) {} + virtual void refresh(const Field &U, GridParallelRNG &pRNG) {} - virtual RealD S(const Field &p) { - assert(p._grid->Nd() == Ndim); - static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); - phiStencil.HaloExchange(p, compressor); - Field action(p._grid), pshift(p._grid), phisquared(p._grid); - phisquared = p*p; - action = (2.0*Ndim + mass_square)*phisquared - lambda/24.*phisquared*phisquared; - for (int mu = 0; mu < Ndim; mu++) { - // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils - parallel_for (int i = 0; i < p._grid->oSites(); i++) { - int permute_type; - StencilEntry *SE; - vobj temp2; - const vobj *temp, *t_p; - - SE = phiStencil.GetEntry(permute_type, mu, i); - t_p = &p._odata[i]; - if ( SE->_is_local ) { - temp = &p._odata[SE->_offset]; - if ( SE->_permute ) { - permute(temp2, *temp, permute_type); - action._odata[i] -= temp2*(*t_p) + (*t_p)*temp2; - } else { - action._odata[i] -= (*temp)*(*t_p) + (*t_p)*(*temp); - } - } else { - action._odata[i] -= phiStencil.CommBuf()[SE->_offset]*(*t_p) + (*t_p)*phiStencil.CommBuf()[SE->_offset]; - } - } - // action -= pshift*p + p*pshift; - } - // NB the trace in the algebra is normalised to 1/2 - // minus sign coming from the antihermitian fields - return -(TensorRemove(sum(trace(action)))).real(); - }; - - virtual void deriv(const Field &p, Field &force) { - assert(p._grid->Nd() == Ndim); - force = (2.0*Ndim + mass_square)*p - lambda/12.*p*p*p; - // move this outside - static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); - phiStencil.HaloExchange(p, compressor); - - //for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1); - for (int point = 0; point < npoint; point++) { - parallel_for (int i = 0; i < p._grid->oSites(); i++) { - const vobj *temp; - vobj temp2; - int permute_type; - StencilEntry *SE; - SE = phiStencil.GetEntry(permute_type, point, i); - - if ( SE->_is_local ) { - temp = &p._odata[SE->_offset]; - if ( SE->_permute ) { - permute(temp2, *temp, permute_type); - force._odata[i] -= temp2; - } else { - force._odata[i] -= *temp; - } - } else { - force._odata[i] -= phiStencil.CommBuf()[SE->_offset]; - } - } + virtual RealD S(const Field &p) + { + assert(p._grid->Nd() == Ndim); + static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); + phiStencil.HaloExchange(p, compressor); + Field action(p._grid), pshift(p._grid), phisquared(p._grid); + phisquared = p * p; + action = (2.0 * Ndim + mass_square) * phisquared - lambda / 24. * phisquared * phisquared; + for (int mu = 0; mu < Ndim; mu++) + { + // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils + parallel_for(int i = 0; i < p._grid->oSites(); i++) + { + int permute_type; + StencilEntry *SE; + vobj temp2; + const vobj *temp, *t_p; + + SE = phiStencil.GetEntry(permute_type, mu, i); + t_p = &p._odata[i]; + if (SE->_is_local) + { + temp = &p._odata[SE->_offset]; + if (SE->_permute) + { + permute(temp2, *temp, permute_type); + action._odata[i] -= temp2 * (*t_p) + (*t_p) * temp2; + } + else + { + action._odata[i] -= (*temp) * (*t_p) + (*t_p) * (*temp); + } + } + else + { + action._odata[i] -= phiStencil.CommBuf()[SE->_offset] * (*t_p) + (*t_p) * phiStencil.CommBuf()[SE->_offset]; + } } + // action -= pshift*p + p*pshift; } + // NB the trace in the algebra is normalised to 1/2 + // minus sign coming from the antihermitian fields + return -(TensorRemove(sum(trace(action)))).real(); }; - -} // namespace Grid -#endif // SCALAR_INT_ACTION_H + virtual void deriv(const Field &p, Field &force) + { + assert(p._grid->Nd() == Ndim); + force = (2.0 * Ndim + mass_square) * p - lambda / 12. * p * p * p; + // move this outside + static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); + phiStencil.HaloExchange(p, compressor); + + //for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1); + for (int point = 0; point < npoint; point++) + { + parallel_for(int i = 0; i < p._grid->oSites(); i++) + { + const vobj *temp; + vobj temp2; + int permute_type; + StencilEntry *SE; + SE = phiStencil.GetEntry(permute_type, point, i); + + if (SE->_is_local) + { + temp = &p._odata[SE->_offset]; + if (SE->_permute) + { + permute(temp2, *temp, permute_type); + force._odata[i] -= temp2; + } + else + { + force._odata[i] -= *temp; + } + } + else + { + force._odata[i] -= phiStencil.CommBuf()[SE->_offset]; + } + } + } + } +}; + +} // namespace Grid + +#endif // SCALAR_INT_ACTION_H From 9a827d0242f7164a4bc02c5b8cefe606878fcb84 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Mon, 18 Sep 2017 14:55:51 +0100 Subject: [PATCH 004/145] Fixing a compilation error --- lib/communicator/Communicator_mpi3.cc | 3 +++ lib/communicator/Communicator_mpi3_leader.cc | 3 +++ lib/communicator/Communicator_mpit.cc | 3 +++ lib/communicator/Communicator_none.cc | 2 ++ lib/communicator/Communicator_shmem.cc | 3 +++ 5 files changed, 14 insertions(+) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 44aa1024..bb256e79 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -587,6 +587,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } }; + +CartesianCommunicator::~CartesianCommunicator() = default; + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_mpi3_leader.cc b/lib/communicator/Communicator_mpi3_leader.cc index 6e26bd3e..da863508 100644 --- a/lib/communicator/Communicator_mpi3_leader.cc +++ b/lib/communicator/Communicator_mpi3_leader.cc @@ -830,6 +830,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); }; +CartesianCommunicator::~CartesianCommunicator() = default; + + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index eb6ef87d..7b7ec14c 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -80,6 +80,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(Size==_Nprocessors); } + +CartesianCommunicator::~CartesianCommunicator() = default; + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 5319ab93..a4e6cf54 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -53,6 +53,8 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } +CartesianCommunicator::~CartesianCommunicator() = default; + void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSum(double &){} diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index 3c76c808..826471c4 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -98,6 +98,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(Size==_Nprocessors); } +CartesianCommunicator::~CartesianCommunicator() = default; + + void CartesianCommunicator::GlobalSum(uint32_t &u){ static long long source ; static long long dest ; From df21668f2c6d25b2c8c79e353514956517ed7682 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 22 Sep 2017 14:21:18 +0100 Subject: [PATCH 005/145] memory profiler update --- lib/allocator/AlignedAllocator.cc | 4 +- lib/allocator/AlignedAllocator.h | 72 +++++++++++++++++++++++++++---- 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/lib/allocator/AlignedAllocator.cc b/lib/allocator/AlignedAllocator.cc index 967b2571..944e287f 100644 --- a/lib/allocator/AlignedAllocator.cc +++ b/lib/allocator/AlignedAllocator.cc @@ -3,9 +3,11 @@ namespace Grid { +MemoryStats *MemoryProfiler::stats = nullptr; + int PointerCache::victim; - PointerCache::PointerCacheEntry PointerCache::Entries[PointerCache::Ncache]; +PointerCache::PointerCacheEntry PointerCache::Entries[PointerCache::Ncache]; void *PointerCache::Insert(void *ptr,size_t bytes) { diff --git a/lib/allocator/AlignedAllocator.h b/lib/allocator/AlignedAllocator.h index e64a5949..68fad0d2 100644 --- a/lib/allocator/AlignedAllocator.h +++ b/lib/allocator/AlignedAllocator.h @@ -63,6 +63,18 @@ namespace Grid { static void *Lookup(size_t bytes) ; }; + + struct MemoryStats + { + size_t totalAllocated{0}, maxAllocated{0}, + currentlyAllocated{0}, totalFreed{0}; + }; + + class MemoryProfiler + { + public: + static MemoryStats *stats; + }; void check_huge_pages(void *Buf,uint64_t BYTES); @@ -93,6 +105,13 @@ public: { size_type bytes = __n*sizeof(_Tp); + if (auto s = MemoryProfiler::stats) + { + s->totalAllocated += bytes; + s->currentlyAllocated += bytes; + s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); + } + _Tp *ptr = (_Tp *) PointerCache::Lookup(bytes); // if ( ptr != NULL ) // std::cout << "alignedAllocator "<<__n << " cache hit "<< std::hex << ptr <totalFreed += bytes; + s->currentlyAllocated -= bytes; + } + pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes); #ifdef HAVE_MM_MALLOC_H @@ -172,10 +197,18 @@ public: #ifdef GRID_COMMS_SHMEM pointer allocate(size_type __n, const void* _p= 0) { + size_type bytes = __n*sizeof(_Tp); + + if (auto s = MemoryProfiler::stats) + { + s->totalAllocated += bytes; + s->currentlyAllocated += bytes; + s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); + } #ifdef CRAY - _Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64); + _Tp *ptr = (_Tp *) shmem_align(bytes,64); #else - _Tp *ptr = (_Tp *) shmem_align(64,__n*sizeof(_Tp)); + _Tp *ptr = (_Tp *) shmem_align(64,bytes); #endif #ifdef PARANOID_SYMMETRIC_HEAP static void * bcast; @@ -193,18 +226,32 @@ public: #endif return ptr; } - void deallocate(pointer __p, size_type) { + void deallocate(pointer __p, size_type __n) { + size_type bytes = __n*sizeof(_Tp); + + if (auto s = MemoryProfiler::stats) + { + s->totalFreed += bytes; + s->currentlyAllocated -= bytes; + } shmem_free((void *)__p); } #else pointer allocate(size_type __n, const void* _p= 0) { -#ifdef HAVE_MM_MALLOC_H - _Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN); -#else - _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp)); -#endif size_type bytes = __n*sizeof(_Tp); + + if (auto s = MemoryProfiler::stats) + { + s->totalAllocated += bytes; + s->currentlyAllocated += bytes; + s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); + } +#ifdef HAVE_MM_MALLOC_H + _Tp * ptr = (_Tp *) _mm_malloc(bytes, GRID_ALLOC_ALIGN); +#else + _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN, bytes); +#endif uint8_t *cp = (uint8_t *)ptr; // One touch per 4k page, static OMP loop to catch same loop order #pragma omp parallel for schedule(static) @@ -213,7 +260,14 @@ public: } return ptr; } - void deallocate(pointer __p, size_type) { + void deallocate(pointer __p, size_type __n) { + size_type bytes = __n*sizeof(_Tp); + + if (auto s = MemoryProfiler::stats) + { + s->totalFreed += bytes; + s->currentlyAllocated -= bytes; + } #ifdef HAVE_MM_MALLOC_H _mm_free((void *)__p); #else From a02193300234b91a756710fa4b7cc99e96302a4d Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 29 Sep 2017 16:09:34 +0100 Subject: [PATCH 006/145] Scalar: SU(N) action change to t'Hooft scaling --- lib/qcd/action/scalar/ScalarInteractionAction.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarInteractionAction.h b/lib/qcd/action/scalar/ScalarInteractionAction.h index a681b62c..9d855137 100644 --- a/lib/qcd/action/scalar/ScalarInteractionAction.h +++ b/lib/qcd/action/scalar/ScalarInteractionAction.h @@ -44,8 +44,9 @@ public: INHERIT_FIELD_TYPES(Impl); private: - RealD mass_square; - RealD lambda; + RealD mass_square; + RealD lambda; + const unsigned int N = Impl::Group::Dimension; typedef typename Field::vector_object vobj; typedef CartesianStencil Stencil; @@ -85,8 +86,8 @@ public: static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); phiStencil.HaloExchange(p, compressor); Field action(p._grid), pshift(p._grid), phisquared(p._grid); - phisquared = p * p; - action = (2.0 * Ndim + mass_square) * phisquared - lambda / 24. * phisquared * phisquared; + phisquared = p*p; + action = (2.*Ndim + mass_square) * phisquared - phisquared * phisquared; for (int mu = 0; mu < Ndim; mu++) { // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils @@ -121,13 +122,13 @@ public: } // NB the trace in the algebra is normalised to 1/2 // minus sign coming from the antihermitian fields - return -(TensorRemove(sum(trace(action)))).real(); + return -(TensorRemove(sum(trace(action)))).real()*N/lambda; }; virtual void deriv(const Field &p, Field &force) { assert(p._grid->Nd() == Ndim); - force = (2.0 * Ndim + mass_square) * p - lambda / 12. * p * p * p; + force = (2.0 * Ndim + mass_square) * p - 2. * p * p * p; // move this outside static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); phiStencil.HaloExchange(p, compressor); @@ -162,6 +163,7 @@ public: } } } + force *= N/lambda; } }; From 05c1c88440a9b00c4a35e8487ab92a27afb48aea Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 3 Oct 2017 14:26:20 +0100 Subject: [PATCH 007/145] Scalar: more action generalisation --- lib/qcd/action/scalar/ScalarInteractionAction.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarInteractionAction.h b/lib/qcd/action/scalar/ScalarInteractionAction.h index 9d855137..3848751d 100644 --- a/lib/qcd/action/scalar/ScalarInteractionAction.h +++ b/lib/qcd/action/scalar/ScalarInteractionAction.h @@ -46,6 +46,7 @@ public: private: RealD mass_square; RealD lambda; + RealD g; const unsigned int N = Impl::Group::Dimension; typedef typename Field::vector_object vobj; @@ -57,7 +58,7 @@ private: std::vector displacements; // = {1,1,1,1, -1,-1,-1,-1}; public: - ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) + ScalarInteractionAction(RealD ms, RealD l, RealD gval) : mass_square(ms), lambda(l), g(gval), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) { for (int mu = 0; mu < Ndim; mu++) { @@ -73,6 +74,7 @@ public: std::stringstream sstream; sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; + sstream << GridLogMessage << "[ScalarAction] g : " << g << std::endl; return sstream.str(); } @@ -86,8 +88,8 @@ public: static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); phiStencil.HaloExchange(p, compressor); Field action(p._grid), pshift(p._grid), phisquared(p._grid); - phisquared = p*p; - action = (2.*Ndim + mass_square) * phisquared - phisquared * phisquared; + phisquared = p * p; + action = (2.0 * Ndim + mass_square) * phisquared - lambda * phisquared * phisquared; for (int mu = 0; mu < Ndim; mu++) { // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils @@ -122,13 +124,13 @@ public: } // NB the trace in the algebra is normalised to 1/2 // minus sign coming from the antihermitian fields - return -(TensorRemove(sum(trace(action)))).real()*N/lambda; + return -(TensorRemove(sum(trace(action)))).real()*N/g; }; virtual void deriv(const Field &p, Field &force) { assert(p._grid->Nd() == Ndim); - force = (2.0 * Ndim + mass_square) * p - 2. * p * p * p; + force = (2. * Ndim + mass_square) * p - 2. * lambda * p * p * p; // move this outside static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); phiStencil.HaloExchange(p, compressor); @@ -163,7 +165,7 @@ public: } } } - force *= N/lambda; + force *= N/g; } }; From 8784f2a88d780c7134574cf452d7c5550bda5769 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 3 Oct 2017 14:38:10 +0100 Subject: [PATCH 008/145] post-merge fix --- lib/communicator/Communicator_none.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index c97a181b..40feefec 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -56,8 +56,6 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } -CartesianCommunicator::~CartesianCommunicator() = default; - void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSum(double &){} From d38cee73bf1a9cc14bfa0e1f8aefcb2b99bdbb8d Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 3 Oct 2017 17:29:34 +0100 Subject: [PATCH 009/145] Scalar: easier Fourier acceleration parametrisation through -D flags --- lib/qcd/action/scalar/ScalarImpl.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarImpl.h b/lib/qcd/action/scalar/ScalarImpl.h index 3dd3cc70..650f4d17 100644 --- a/lib/qcd/action/scalar/ScalarImpl.h +++ b/lib/qcd/action/scalar/ScalarImpl.h @@ -89,13 +89,12 @@ class ScalarImplTypes { }; - - #define USE_FFT_ACCELERATION - #ifdef USE_FFT_ACCELERATION - #define FFT_MASS 0.707 + #ifdef USE_FFT_ACCELERATION + #ifndef FFT_MASS + #error "USE_FFT_ACCELERATION is defined but not FFT_MASS" #endif - - + #endif + template class ScalarAdjMatrixImplTypes { public: From 1e54882f7145bd38db8ac1681cb7d4f9bceb2297 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Wed, 4 Oct 2017 10:51:06 +0100 Subject: [PATCH 010/145] Stagger --- tests/solver/Test_staggered_cg_prec.cc | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/solver/Test_staggered_cg_prec.cc b/tests/solver/Test_staggered_cg_prec.cc index 66f11d3d..0a803c21 100644 --- a/tests/solver/Test_staggered_cg_prec.cc +++ b/tests/solver/Test_staggered_cg_prec.cc @@ -83,5 +83,14 @@ int main (int argc, char ** argv) ConjugateGradient CG(1.0e-8,10000); CG(HermOpEO,src_o,res_o); + FermionField tmp(&RBGrid); + + HermOpEO.Mpc(res_o,tmp); + std::cout << "check Mpc resid " << axpy_norm(tmp,-1.0,src_o,tmp)/norm2(src_o) << "\n"; + + RealD n1,n2; + HermOpEO.MpcDagMpc(res_o,tmp,n1,n2); + std::cout << "check MpcDagMpc resid " << axpy_norm(tmp,-1.0,src_o,tmp)/norm2(src_o) << "\n"; + Grid_finalize(); } From 15d690e9b9bd79e3ee9b6dae1a12753f131c024f Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Mon, 9 Oct 2017 09:59:58 +0100 Subject: [PATCH 011/145] Adding the cartesian communicator destructor --- lib/communicator/Communicator_none.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 40feefec..4b9029d6 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -56,6 +56,9 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } +CartesianCommunicator::~CartesianCommunicator(){} + + void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSum(double &){} From 07009c569a206b9e633e5ab01bdef386f10050c5 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 9 Oct 2017 23:16:51 +0100 Subject: [PATCH 012/145] Comms splitting improvements --- lib/communicator/Communicator_base.cc | 36 ++++++++++++++++----------- lib/communicator/Communicator_base.h | 17 +++++++++++++ lib/communicator/Communicator_mpi.cc | 17 +++++++++++++ lib/communicator/Communicator_none.cc | 4 +++ 4 files changed, 60 insertions(+), 14 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index bcf429ab..ce9a3cf0 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -117,32 +117,40 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors, int Nchild = Nparent/childsize; assert (childsize * Nchild == Nparent); - int prank; MPI_Comm_rank(parent.communicator,&prank); - int crank = prank % childsize; - int ccomm = prank / childsize; + std::vector ccoor(_ndimension); // coor within subcommunicator + std::vector scoor(_ndimension); // coor of split within parent + std::vector ssize(_ndimension); // coor of split within parent + + for(int d=0;d<_ndimension;d++){ + ccoor[d] = parent._processor_coor[d] % processors[d]; + scoor[d] = parent._processor_coor[d] / processors[d]; + ssize[d] = parent._processors[d]/ processors[d]; + } + int crank,srank; // rank within subcomm ; rank of subcomm within blocks of subcomms + Lexicographic::IndexFromCoor(ccoor,crank,processors); + Lexicographic::IndexFromCoor(scoor,srank,ssize); MPI_Comm comm_split; if ( Nchild > 1 ) { - std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< void AllToAll(int dim,std::vector &in, std::vector &out){ + assert(dim>=0); + assert(dim<_ndimension); + int numnode = _processors[dim]; + // std::cerr << " AllToAll in.size() "< void Broadcast(int root,obj &data) { diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index a55c0164..678e4517 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -187,6 +187,21 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) root, communicator); assert(ierr==0); +} +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +{ + std::vector row(_ndimension,1); + assert(dim>=0 && dim<_ndimension); + + // Split the communicator + row[dim] = _processors[dim]; + + CartesianCommunicator Comm(row,*this); + Comm.AllToAll(in,out,bytes); +} +void CartesianCommunicator::AllToAll(void *in,void *out,int bytes) +{ + MPI_Alltoall(in ,bytes,MPI_BYTE,out,bytes,MPI_BYTE,communicator); } /////////////////////////////////////////////////////// // Should only be used prior to Grid Init finished. @@ -207,5 +222,7 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) assert(ierr==0); } + + } diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 40feefec..e9d71a15 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -98,6 +98,10 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector & { assert(0); } +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +{ + bcopy(in,out,bytes); +} int CartesianCommunicator::RankWorld(void){return 0;} void CartesianCommunicator::Barrier(void){} From f7cbf82c0487be5a7be37fd6b7be148b74029884 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 9 Oct 2017 23:18:48 +0100 Subject: [PATCH 013/145] Better stdout/err debug --- lib/util/Init.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 3232d32f..1266d34d 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -243,6 +243,12 @@ void Grid_init(int *argc,char ***argv) fname< Date: Mon, 9 Oct 2017 23:19:45 +0100 Subject: [PATCH 014/145] Split grid communication --- lib/lattice/Lattice_transfer.h | 301 +++++++++++++++++++++++++++++++++ 1 file changed, 301 insertions(+) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index cbf31f86..713a8788 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -684,6 +684,307 @@ void precisionChange(Lattice &out, const Lattice &in){ merge(out._odata[out_oidx], ptrs, 0); } } + +//////////////////////////////////////////////////////////////////////////////// +// Communicate between grids +//////////////////////////////////////////////////////////////////////////////// +// +// All to all plan +// +// Subvolume on fine grid is v. Vectors a,b,c,d +// +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SIMPLEST CASE: +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Mesh of nodes (2) ; subdivide to 1 subdivisions +// +// Lex ord: +// N0 va0 vb0 N1 va1 vb1 +// +// For each dimension do an all to all +// +// full AllToAll(0) +// N0 va0 va1 N1 vb0 vb1 +// +// REARRANGE +// N0 va01 N1 vb01 +// +// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". +// NB: Easiest to programme if keep in lex order. +// +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SIMPLE CASE: +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Mesh of nodes (2x2) ; subdivide to 1x1 subdivisions +// +// Lex ord: +// N0 va0 vb0 vc0 vd0 N1 va1 vb1 vc1 vd1 +// N2 va2 vb2 vc2 vd2 N3 va3 vb3 vc3 vd3 +// +// Ratio = full[dim] / split[dim] +// +// For each dimension do an all to all; get Nvec -> Nvec / ratio +// Ldim -> Ldim * ratio +// LocalVol -> LocalVol * ratio +// full AllToAll(0) +// N0 va0 vb0 va1 vb1 N1 vc0 vd0 vc1 vd1 +// N2 va2 vb2 va3 vb3 N3 vc2 vd2 vc3 vd3 +// +// REARRANGE +// N0 va01 vb01 N1 vc01 vd01 +// N2 va23 vb23 N3 vc23 vd23 +// +// full AllToAll(1) // Not what is wanted. FIXME +// N0 va01 va23 N1 vc01 vc23 +// N2 vb01 vb23 N3 vd01 vd23 +// +// REARRANGE +// N0 va0123 N1 vc0123 +// N2 vb0123 N3 vd0123 +// +// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". +// NB: Easiest to programme if keep in lex order. +// +///////////////////////////////////////////////////////// +template +void Grid_split(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + int lsites = full_grid->lSites(); + Integer sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + for(int v=0;v ldims = full_grid->_ldimensions; + std::vector lcoor(ndim); + + for(int d=0;dAllToAll(d,alldata,tmpdata); + + ////////////////////////////////////////// + //Local volume for this dimension is expanded by ratio of processor extents + // Number of vectors is decreased by same factor + // Rearrange to lexico for bigger volume + ////////////////////////////////////////// + nvec /= ratio[d]; + auto rdims = ldims; rdims[d] *= ratio[d]; + auto rsites= lsites*ratio[d]; + for(int v=0;v_processors[d] > 1 ) { + tmpdata = alldata; + split_grid->AllToAll(d,tmpdata,alldata); + } + } + } + + vectorizeFromLexOrdArray(alldata,split); +} + +template +void Grid_split(Lattice &full,Lattice & split) +{ + int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors; + std::vector > full_v(nvector,full._grid); + for(int n=0;n +void Grid_unsplit(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + int lsites = full_grid->lSites(); + Integer sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + + unvectorizeToLexOrdArray(alldata,split); + + ///////////////////////////////////////////////////////////////// + // Start from split grid and work towards full grid + ///////////////////////////////////////////////////////////////// + std::vector lcoor(ndim); + std::vector rcoor(ndim); + + int nvec = 1; + lsites = split_grid->lSites(); + std::vector ldims = split_grid->_ldimensions; + + for(int d=ndim-1;d>=0;d--){ + + if ( ratio[d] != 1 ) { + + if ( split_grid->_processors[d] > 1 ) { + tmpdata = alldata; + split_grid->AllToAll(d,tmpdata,alldata); + } + + ////////////////////////////////////////// + //Local volume for this dimension is expanded by ratio of processor extents + // Number of vectors is decreased by same factor + // Rearrange to lexico for bigger volume + ////////////////////////////////////////// + auto rsites= lsites/ratio[d]; + auto rdims = ldims; rdims[d]/=ratio[d]; + + for(int v=0;v smaller local volume + // lsite, lcoor --> bigger original (single node?) volume + // For loop over each site within smaller subvol + for(int rsite=0;rsiteAllToAll(d,tmpdata,alldata); + } + } + + lsites = full_grid->lSites(); + for(int v=0;v Date: Mon, 9 Oct 2017 23:20:58 +0100 Subject: [PATCH 015/145] Split CG testing --- tests/solver/Test_dwf_mrhs_cg.cc | 64 +++++++--- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 144 ++++++++++++++++++++++ tests/solver/Test_dwf_mrhs_cg_mpieo.cc | 163 +++++++++++++++++++++++++ 3 files changed, 356 insertions(+), 15 deletions(-) create mode 100644 tests/solver/Test_dwf_mrhs_cg_mpi.cc create mode 100644 tests/solver/Test_dwf_mrhs_cg_mpieo.cc diff --git a/tests/solver/Test_dwf_mrhs_cg.cc b/tests/solver/Test_dwf_mrhs_cg.cc index 2d2cfcb1..d9215db2 100644 --- a/tests/solver/Test_dwf_mrhs_cg.cc +++ b/tests/solver/Test_dwf_mrhs_cg.cc @@ -38,7 +38,7 @@ int main (int argc, char ** argv) typedef typename DomainWallFermionR::ComplexField ComplexField; typename DomainWallFermionR::ImplParams params; - const int Ls=8; + const int Ls=4; Grid_init(&argc,&argv); @@ -47,29 +47,24 @@ int main (int argc, char ** argv) std::vector mpi_layout = GridDefaultMpi(); std::vector mpi_split (mpi_layout.size(),1); - std::cout << "UGrid (world root)"<RankCount() ; ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// - std::cout << "SGrid (world root)"< src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); for(int s=0;sThisRank(); LatticeGaugeField s_Umu(SGrid); FermionField s_src(SFGrid); + FermionField s_src_split(SFGrid); + FermionField s_tmp(SFGrid); FermionField s_res(SFGrid); { @@ -157,6 +156,24 @@ int main (int argc, char ** argv) FGrid->Barrier(); } + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + std::cout << GridLogMessage << " Splitting the grid data "<Barrier(); + if ( n==me ) { + std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<Barrier(); + } + /////////////////////////////////////////////////////////////// // Set up N-solvers as trivially parallel @@ -164,6 +181,7 @@ int main (int argc, char ** argv) RealD mass=0.01; RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); ConjugateGradient CG((1.0e-8/(me+1)),10000); s_res = zero; CG(HermOp,s_src,s_res); - /////////////////////////////////////// - // Share the information - /////////////////////////////////////// + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// std::vector iterations(nrhs,0); iterations[me] = CG.IterationsToComplete; for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Set up N-solvers as trivially parallel + /////////////////////////////////////////////////////////////// + RealD mass=0.01; + RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); + DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + std::vector src_e(nrhs,FrbGrid); + std::vector src_o(nrhs,FrbGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_src_e(SFrbGrid); + FermionField s_src_o(SFrbGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Check even odd cases + /////////////////////////////////////////////////////////////// + for(int s=0;s HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< Date: Tue, 10 Oct 2017 10:00:43 +0100 Subject: [PATCH 016/145] Schur staggered --- lib/algorithms/LinearOperator.h | 104 +++++++++- lib/algorithms/iterative/SchurRedBlack.h | 240 +++++++++++++++++++++++ 2 files changed, 342 insertions(+), 2 deletions(-) diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 6cb77296..6e4da248 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -192,10 +192,10 @@ namespace Grid { ni=Mpc(in,tmp); no=MpcDag(tmp,out); } - void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ MpcDagMpc(in,out,n1,n2); } - void HermOp(const Field &in, Field &out){ + virtual void HermOp(const Field &in, Field &out){ RealD n1,n2; HermOpAndNorm(in,out,n1,n2); } @@ -300,6 +300,106 @@ namespace Grid { } }; + // + template + class SchurStaggeredOperator : public SchurOperatorBase { + protected: + Matrix &_Mat; + public: + SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + ComplexD dot; + n2=Mpc(in,out); + dot= innerProduct(in,out); + n1= real(dot); + } + virtual void HermOp(const Field &in, Field &out){ + Mpc(in,out); + } + virtual RealD Mpc (const Field &in, Field &out) { + Field tmp(in._grid); + _Mat.Meooe(in,tmp); + _Mat.MooeeInv(tmp,out); + _Mat.MeooeDag(out,tmp); + _Mat.Mooee(in,out); + return axpy_norm(out,-1.0,tmp,out); + } + virtual RealD MpcDag (const Field &in, Field &out){ + return Mpc(in,out); + } + virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) { + assert(0);// Never need with staggered + } + }; + template using SchurStagOperator = SchurStaggeredOperator; + + // This is specific to (Z)mobius fermions + template + class KappaSimilarityTransform { + public: + + typedef typename Matrix::Coeff_t Coeff_t; + std::vector kappa, kappaDag, kappaInv, kappaInvDag; + + KappaSimilarityTransform (Matrix &zmob) { + for (int i=0;i<(int)zmob.bs.size();i++) { + Coeff_t k = 1.0 / ( 2.0 * (zmob.bs[i] *(4 - zmob.M5) + 1.0) ); + kappa.push_back( k ); + kappaDag.push_back( conj(k) ); + kappaInv.push_back( 1.0 / k ); + kappaInvDag.push_back( 1.0 / conj(k) ); + } + } + + template + void sscale(const Lattice& in, Lattice& out, Coeff_t* s) { + GridBase *grid=out._grid; + out.checkerboard = in.checkerboard; + assert(grid->_simd_layout[0] == 1); // should be fine for ZMobius for now + int Ls = grid->_rdimensions[0]; + parallel_for(int ss=0;ssoSites();ss++){ + vobj tmp = s[ss % Ls]*in._odata[ss]; + vstream(out._odata[ss],tmp); + } + } + + RealD sscale_norm(const Field& in, Field& out, Coeff_t* s) { + sscale(in,out,s); + return norm2(out); + } + + virtual RealD M (const Field& in, Field& out) { return sscale_norm(in,out,&kappa[0]); } + virtual RealD MDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaDag[0]);} + virtual RealD MInv (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInv[0]);} + virtual RealD MInvDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInvDag[0]);} + + }; + + template + class SchurDiagTwoKappaOperator : public SchurOperatorBase { + public: + KappaSimilarityTransform _S; + SchurDiagTwoOperator _Mat; + + SchurDiagTwoKappaOperator (Matrix &Mat): _S(Mat), _Mat(Mat) {}; + + virtual RealD Mpc (const Field &in, Field &out) { + Field tmp(in._grid); + + _S.MInv(in,out); + _Mat.Mpc(out,tmp); + return _S.M(tmp,out); + + } + virtual RealD MpcDag (const Field &in, Field &out){ + Field tmp(in._grid); + + _S.MDag(in,out); + _Mat.MpcDag(out,tmp); + return _S.MInvDag(tmp,out); + } + }; + ///////////////////////////////////////////////////////////// // Base classes for functions of operators diff --git a/lib/algorithms/iterative/SchurRedBlack.h b/lib/algorithms/iterative/SchurRedBlack.h index 5caabb4b..b6eab762 100644 --- a/lib/algorithms/iterative/SchurRedBlack.h +++ b/lib/algorithms/iterative/SchurRedBlack.h @@ -63,6 +63,85 @@ Author: Peter Boyle */ namespace Grid { + /////////////////////////////////////////////////////////////////////////////////////////////////////// + // Take a matrix and form a Red Black solver calling a Herm solver + // Use of RB info prevents making SchurRedBlackSolve conform to standard interface + /////////////////////////////////////////////////////////////////////////////////////////////////////// + + template class SchurRedBlackStaggeredSolve { + private: + OperatorFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackStaggeredSolve(OperatorFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurStaggeredOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + _Matrix.Mooee(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve; + /////////////////////////////////////////////////////////////////////////////////////////////////////// // Take a matrix and form a Red Black solver calling a Herm solver // Use of RB info prevents making SchurRedBlackSolve conform to standard interface @@ -141,5 +220,166 @@ namespace Grid { } }; + + /////////////////////////////////////////////////////////////////////////////////////////////////////// + // Take a matrix and form a Red Black solver calling a Herm solver + // Use of RB info prevents making SchurRedBlackSolve conform to standard interface + /////////////////////////////////////////////////////////////////////////////////////////////////////// + template class SchurRedBlackDiagTwoSolve { + private: + OperatorFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoSolve(OperatorFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< class SchurRedBlackDiagTwoMixed { + private: + LinearFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoMixed(LinearFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< Date: Tue, 10 Oct 2017 12:02:18 +0100 Subject: [PATCH 017/145] Schur for staggered --- lib/algorithms/LinearOperator.h | 80 +----------- tests/solver/Test_staggered_block_cg_prec.cc | 130 +++++++++++++++++++ tests/solver/Test_staggered_cg_prec.cc | 6 +- 3 files changed, 135 insertions(+), 81 deletions(-) create mode 100644 tests/solver/Test_staggered_block_cg_prec.cc diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 6e4da248..d402c5b7 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -162,15 +162,10 @@ namespace Grid { _Mat.M(in,out); } void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ - ComplexD dot; - _Mat.M(in,out); - dot= innerProduct(in,out); - n1=real(dot); - - dot = innerProduct(out,out); - n2=real(dot); + ComplexD dot= innerProduct(in,out); n1=real(dot); + n2=norm2(out); } void HermOp(const Field &in, Field &out){ _Mat.M(in,out); @@ -309,9 +304,9 @@ namespace Grid { SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ ComplexD dot; - n2=Mpc(in,out); + n2 = Mpc(in,out); dot= innerProduct(in,out); - n1= real(dot); + n1 = real(dot); } virtual void HermOp(const Field &in, Field &out){ Mpc(in,out); @@ -333,73 +328,6 @@ namespace Grid { }; template using SchurStagOperator = SchurStaggeredOperator; - // This is specific to (Z)mobius fermions - template - class KappaSimilarityTransform { - public: - - typedef typename Matrix::Coeff_t Coeff_t; - std::vector kappa, kappaDag, kappaInv, kappaInvDag; - - KappaSimilarityTransform (Matrix &zmob) { - for (int i=0;i<(int)zmob.bs.size();i++) { - Coeff_t k = 1.0 / ( 2.0 * (zmob.bs[i] *(4 - zmob.M5) + 1.0) ); - kappa.push_back( k ); - kappaDag.push_back( conj(k) ); - kappaInv.push_back( 1.0 / k ); - kappaInvDag.push_back( 1.0 / conj(k) ); - } - } - - template - void sscale(const Lattice& in, Lattice& out, Coeff_t* s) { - GridBase *grid=out._grid; - out.checkerboard = in.checkerboard; - assert(grid->_simd_layout[0] == 1); // should be fine for ZMobius for now - int Ls = grid->_rdimensions[0]; - parallel_for(int ss=0;ssoSites();ss++){ - vobj tmp = s[ss % Ls]*in._odata[ss]; - vstream(out._odata[ss],tmp); - } - } - - RealD sscale_norm(const Field& in, Field& out, Coeff_t* s) { - sscale(in,out,s); - return norm2(out); - } - - virtual RealD M (const Field& in, Field& out) { return sscale_norm(in,out,&kappa[0]); } - virtual RealD MDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaDag[0]);} - virtual RealD MInv (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInv[0]);} - virtual RealD MInvDag (const Field& in, Field& out) { return sscale_norm(in,out,&kappaInvDag[0]);} - - }; - - template - class SchurDiagTwoKappaOperator : public SchurOperatorBase { - public: - KappaSimilarityTransform _S; - SchurDiagTwoOperator _Mat; - - SchurDiagTwoKappaOperator (Matrix &Mat): _S(Mat), _Mat(Mat) {}; - - virtual RealD Mpc (const Field &in, Field &out) { - Field tmp(in._grid); - - _S.MInv(in,out); - _Mat.Mpc(out,tmp); - return _S.M(tmp,out); - - } - virtual RealD MpcDag (const Field &in, Field &out){ - Field tmp(in._grid); - - _S.MDag(in,out); - _Mat.MpcDag(out,tmp); - return _S.MInvDag(tmp,out); - } - }; - ///////////////////////////////////////////////////////////// // Base classes for functions of operators diff --git a/tests/solver/Test_staggered_block_cg_prec.cc b/tests/solver/Test_staggered_block_cg_prec.cc new file mode 100644 index 00000000..1d0117e0 --- /dev/null +++ b/tests/solver/Test_staggered_block_cg_prec.cc @@ -0,0 +1,130 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_wilson_cg_unprec.cc + + Copyright (C) 2015 + +Author: Azusa Yamaguchi +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + +int main (int argc, char ** argv) +{ + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; + typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; + typename ImprovedStaggeredFermion5DR::ImplParams params; + + const int Ls=8; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector seeds({1,2,3,4}); + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + + FermionField src(FGrid); random(pRNG5,src); + FermionField src_o(FrbGrid); pickCheckerboard(Odd,src_o,src); + FermionField result_o(FrbGrid); result_o=zero; + RealD nrm = norm2(src); + + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu); + + RealD mass=0.003; + ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass); + SchurDiagMooeeOperator HermOp(Ds); + + ConjugateGradient CG(1.0e-8,10000); + int blockDim = 0; + BlockConjugateGradient BCGrQ(BlockCGrQ,blockDim,1.0e-8,10000); + BlockConjugateGradient BCG (BlockCG,blockDim,1.0e-8,10000); + BlockConjugateGradient mCG (CGmultiRHS,blockDim,1.0e-8,10000); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp4d(Ds4d); + FermionField src4d(UGrid); random(pRNG,src4d); + FermionField src4d_o(UrbGrid); pickCheckerboard(Odd,src4d_o,src4d); + FermionField result4d_o(UrbGrid); + + result4d_o=zero; + CG(HermOp4d,src4d_o,result4d_o); + std::cout << GridLogMessage << "************************************************************************ "< Date: Tue, 10 Oct 2017 13:48:51 +0100 Subject: [PATCH 018/145] Christop mods --- lib/algorithms/approx/Chebyshev.h | 42 +++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/lib/algorithms/approx/Chebyshev.h b/lib/algorithms/approx/Chebyshev.h index 2793f138..f8c21a05 100644 --- a/lib/algorithms/approx/Chebyshev.h +++ b/lib/algorithms/approx/Chebyshev.h @@ -8,6 +8,7 @@ Author: Peter Boyle Author: paboyle +Author: Christoph Lehner This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -193,6 +194,47 @@ namespace Grid { return sum; }; + RealD approxD(RealD x) + { + RealD Un; + RealD Unm; + RealD Unp; + + RealD y=( x-0.5*(hi+lo))/(0.5*(hi-lo)); + + RealD U0=1; + RealD U1=2*y; + + RealD sum; + sum = Coeffs[1]*U0; + sum+= Coeffs[2]*U1*2.0; + + Un =U1; + Unm=U0; + for(int i=2;i::quiet_NaN(); + } + // Implement the required interface void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { From a1d80282eca8df8c1c7eb521c48c3aa78ccdb389 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 10 Oct 2017 13:49:31 +0100 Subject: [PATCH 019/145] cb factorise --- lib/algorithms/iterative/SchurRedBlack.h | 27 ++++++++++++++++++------ 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/lib/algorithms/iterative/SchurRedBlack.h b/lib/algorithms/iterative/SchurRedBlack.h index b6eab762..a309386b 100644 --- a/lib/algorithms/iterative/SchurRedBlack.h +++ b/lib/algorithms/iterative/SchurRedBlack.h @@ -53,13 +53,28 @@ Author: Peter Boyle * M psi = eta *********************** *Odd - * i) (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o + * i) D_oo psi_o = L^{-1} eta_o * eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) + * (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o *Even * ii) Mee psi_e + Meo psi_o = src_e * * => sol_e = M_ee^-1 * ( src_e - Meo sol_o )... * + * + * TODO: Other options: + * + * a) change checkerboards for Schur e<->o + * + * Left precon by Moo^-1 + * b) Doo^{dag} M_oo^-dag Moo^-1 Doo psi_0 = (D_oo)^dag M_oo^-dag Moo^-1 L^{-1} eta_o + * eta_o' = (D_oo)^dag M_oo^-dag Moo^-1 (eta_o - Moe Mee^{-1} eta_e) + * + * Right precon by Moo^-1 + * c) M_oo^-dag Doo^{dag} Doo Moo^-1 phi_0 = M_oo^-dag (D_oo)^dag L^{-1} eta_o + * eta_o' = M_oo^-dag (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) + * psi_o = M_oo^-1 phi_o + * TODO: Deflation */ namespace Grid { @@ -155,12 +170,10 @@ namespace Grid { ///////////////////////////////////////////////////// // Wrap the usual normal equations Schur trick ///////////////////////////////////////////////////// - SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver) : - _HermitianRBSolver(HermitianRBSolver) - { - CBfactorise=0; - }; - + SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver,int cb=0) : _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=cb; + }; template void operator() (Matrix & _Matrix,const Field &in, Field &out){ From 1374c943d4cbb493a6a909a54b7c55471b677a32 Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Tue, 10 Oct 2017 13:59:50 +0100 Subject: [PATCH 020/145] Correct Schur operator called --- tests/solver/Test_staggered_block_cg_prec.cc | 4 ++-- tests/solver/Test_staggered_cg_prec.cc | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/solver/Test_staggered_block_cg_prec.cc b/tests/solver/Test_staggered_block_cg_prec.cc index 1d0117e0..0076e5a0 100644 --- a/tests/solver/Test_staggered_block_cg_prec.cc +++ b/tests/solver/Test_staggered_block_cg_prec.cc @@ -76,7 +76,7 @@ int main (int argc, char ** argv) RealD mass=0.003; ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass); - SchurDiagMooeeOperator HermOp(Ds); + SchurStaggeredOperator HermOp(Ds); ConjugateGradient CG(1.0e-8,10000); int blockDim = 0; @@ -88,7 +88,7 @@ int main (int argc, char ** argv) std::cout << GridLogMessage << " Calling 4d CG "< HermOp4d(Ds4d); + SchurStaggeredOperator HermOp4d(Ds4d); FermionField src4d(UGrid); random(pRNG,src4d); FermionField src4d_o(UrbGrid); pickCheckerboard(Odd,src4d_o,src4d); FermionField result4d_o(UrbGrid); diff --git a/tests/solver/Test_staggered_cg_prec.cc b/tests/solver/Test_staggered_cg_prec.cc index 97251435..9a458f1f 100644 --- a/tests/solver/Test_staggered_cg_prec.cc +++ b/tests/solver/Test_staggered_cg_prec.cc @@ -79,7 +79,7 @@ int main (int argc, char ** argv) pickCheckerboard(Odd,src_o,src); res_o=zero; - SchurDiagMooeeOperator HermOpEO(Ds); + SchurStaggeredOperator HermOpEO(Ds); ConjugateGradient CG(1.0e-8,10000); CG(HermOpEO,src_o,res_o); From bf58557fb1ec710c766e19c9a8809b0a352de239 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 10 Oct 2017 14:15:11 +0100 Subject: [PATCH 021/145] Block compressed Lanczos --- lib/algorithms/LinearOperator.h | 16 +- .../BlockImplicitlyRestartedLanczos.h | 754 ++++++++++++ .../BlockProjector.h | 143 +++ .../BlockedGrid.h | 401 ++++++ .../FieldBasisVector.h | 163 +++ .../FieldVectorIO.h | 1085 +++++++++++++++++ .../action/fermion/DomainWallEOFAFermion.cc | 12 +- lib/qcd/action/fermion/MobiusEOFAFermion.cc | 14 +- tests/solver/Params.h | 136 +++ tests/solver/Test_dwf_compressed_lanczos.cc | 727 +++++++++++ 10 files changed, 3432 insertions(+), 19 deletions(-) create mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h create mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h create mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h create mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h create mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldVectorIO.h create mode 100644 tests/solver/Params.h create mode 100644 tests/solver/Test_dwf_compressed_lanczos.cc diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index d402c5b7..f1b8820e 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -207,7 +207,6 @@ namespace Grid { void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); } - }; template class SchurDiagMooeeOperator : public SchurOperatorBase { @@ -265,7 +264,6 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; - template class SchurDiagTwoOperator : public SchurOperatorBase { protected: @@ -294,8 +292,15 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; - - // + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta + // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi + /////////////////////////////////////////////////////////////////////////////////////////////////// + template using SchurDiagOneRH = SchurDiagTwoOperator ; + template using SchurDiagOneLH = SchurDiagOneOperator ; + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Staggered use + /////////////////////////////////////////////////////////////////////////////////////////////////// template class SchurStaggeredOperator : public SchurOperatorBase { protected: @@ -303,9 +308,8 @@ namespace Grid { public: SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ - ComplexD dot; n2 = Mpc(in,out); - dot= innerProduct(in,out); + ComplexD dot= innerProduct(in,out); n1 = real(dot); } virtual void HermOp(const Field &in, Field &out){ diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h new file mode 100644 index 00000000..82a00efa --- /dev/null +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h @@ -0,0 +1,754 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h + + Copyright (C) 2015 + +Author: Peter Boyle +Author: paboyle +Author: Chulwoo Jung +Author: Christoph Lehner + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_BIRL_H +#define GRID_BIRL_H + +#include //memset + +#include +#include + +#include +#include +#include +#include + +namespace Grid { + +///////////////////////////////////////////////////////////// +// Implicitly restarted lanczos +///////////////////////////////////////////////////////////// + + template + class BlockImplicitlyRestartedLanczos { + + const RealD small = 1.0e-16; +public: + int lock; + int get; + int Niter; + int converged; + + int Nminres; // Minimum number of restarts; only check for convergence after + int Nstop; // Number of evecs checked for convergence + int Nk; // Number of converged sought + int Np; // Np -- Number of spare vecs in kryloc space + int Nm; // Nm -- total number of vectors + + int orth_period; + + RealD OrthoTime; + + RealD eresid, betastp; + SortEigen _sort; + LinearFunction &_HermOp; + LinearFunction &_HermOpTest; + ///////////////////////// + // Constructor + ///////////////////////// + + BlockImplicitlyRestartedLanczos( + LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _Niter, // Max iterations + int _Nminres, int _orth_period = 1) : + _HermOp(HermOp), + _HermOpTest(HermOpTest), + Nstop(_Nstop), + Nk(_Nk), + Nm(_Nm), + eresid(_eresid), + betastp(_betastp), + Niter(_Niter), + Nminres(_Nminres), + orth_period(_orth_period) + { + Np = Nm-Nk; assert(Np>0); + }; + + BlockImplicitlyRestartedLanczos( + LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _Niter, // Max iterations + int _Nminres, + int _orth_period = 1) : + _HermOp(HermOp), + _HermOpTest(HermOpTest), + Nstop(_Nk), + Nk(_Nk), + Nm(_Nm), + eresid(_eresid), + betastp(_betastp), + Niter(_Niter), + Nminres(_Nminres), + orth_period(_orth_period) + { + Np = Nm-Nk; assert(Np>0); + }; + + +/* Saad PP. 195 +1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0 +2. For k = 1,2,...,m Do: +3. wk:=Avk−βkv_{k−1} +4. αk:=(wk,vk) // +5. wk:=wk−αkvk // wk orthog vk +6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop +7. vk+1 := wk/βk+1 +8. EndDo + */ + void step(std::vector& lmd, + std::vector& lme, + BasisFieldVector& evec, + Field& w,int Nm,int k) + { + assert( k< Nm ); + + GridStopWatch gsw_op,gsw_o; + + Field& evec_k = evec[k]; + + gsw_op.Start(); + _HermOp(evec_k,w); + gsw_op.Stop(); + + if(k>0){ + w -= lme[k-1] * evec[k-1]; + } + + ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) + RealD alph = real(zalph); + + w = w - alph * evec_k;// 5. wk:=wk−αkvk + + RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop + // 7. vk+1 := wk/βk+1 + + std::cout<0 && k % orth_period == 0) { + orthogonalize(w,evec,k); // orthonormalise + } + gsw_o.Stop(); + + if(k < Nm-1) { + evec[k+1] = w; + } + + std::cout << GridLogMessage << "Timing: operator=" << gsw_op.Elapsed() << + " orth=" << gsw_o.Elapsed() << std::endl; + + } + + void qr_decomp(std::vector& lmd, + std::vector& lme, + int Nk, + int Nm, + std::vector& Qt, + RealD Dsh, + int kmin, + int kmax) + { + int k = kmin-1; + RealD x; + + RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]); + RealD c = ( lmd[k] -Dsh) *Fden; + RealD s = -lme[k] *Fden; + + RealD tmpa1 = lmd[k]; + RealD tmpa2 = lmd[k+1]; + RealD tmpb = lme[k]; + + lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb; + lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb; + lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb; + x =-s*lme[k+1]; + lme[k+1] = c*lme[k+1]; + + for(int i=0; i& lmd, + std::vector& lme, + int N1, + int N2, + std::vector& Qt, + GridBase *grid){ + + std::cout << GridLogMessage << "diagonalize_lapack start\n"; + GridStopWatch gsw; + + const int size = Nm; + // tevals.resize(size); + // tevecs.resize(size); + LAPACK_INT NN = N1; + std::vector evals_tmp(NN); + std::vector evec_tmp(NN*NN); + memset(&evec_tmp[0],0,sizeof(double)*NN*NN); + // double AA[NN][NN]; + std::vector DD(NN); + std::vector EE(NN); + for (int i = 0; i< NN; i++) + for (int j = i - 1; j <= i + 1; j++) + if ( j < NN && j >= 0 ) { + if (i==j) DD[i] = lmd[i]; + if (i==j) evals_tmp[i] = lmd[i]; + if (j==(i-1)) EE[j] = lme[j]; + } + LAPACK_INT evals_found; + LAPACK_INT lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ; + LAPACK_INT liwork = 3+NN*10 ; + std::vector iwork(liwork); + std::vector work(lwork); + std::vector isuppz(2*NN); + char jobz = 'V'; // calculate evals & evecs + char range = 'I'; // calculate all evals + // char range = 'A'; // calculate all evals + char uplo = 'U'; // refer to upper half of original matrix + char compz = 'I'; // Compute eigenvectors of tridiagonal matrix + std::vector ifail(NN); + LAPACK_INT info; + // int total = QMP_get_number_of_nodes(); + // int node = QMP_get_node_number(); + // GridBase *grid = evec[0]._grid; + int total = grid->_Nprocessors; + int node = grid->_processor; + int interval = (NN/total)+1; + double vl = 0.0, vu = 0.0; + LAPACK_INT il = interval*node+1 , iu = interval*(node+1); + if (iu > NN) iu=NN; + double tol = 0.0; + if (1) { + memset(&evals_tmp[0],0,sizeof(double)*NN); + if ( il <= NN){ + std::cout << GridLogMessage << "dstegr started" << std::endl; + gsw.Start(); + dstegr(&jobz, &range, &NN, + (double*)&DD[0], (double*)&EE[0], + &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A' + &tol, // tolerance + &evals_found, &evals_tmp[0], (double*)&evec_tmp[0], &NN, + &isuppz[0], + &work[0], &lwork, &iwork[0], &liwork, + &info); + gsw.Stop(); + std::cout << GridLogMessage << "dstegr completed in " << gsw.Elapsed() << std::endl; + for (int i = iu-1; i>= il-1; i--){ + evals_tmp[i] = evals_tmp[i - (il-1)]; + if (il>1) evals_tmp[i-(il-1)]=0.; + for (int j = 0; j< NN; j++){ + evec_tmp[i*NN + j] = evec_tmp[(i - (il-1)) * NN + j]; + if (il>1) evec_tmp[(i-(il-1)) * NN + j]=0.; + } + } + } + { + // QMP_sum_double_array(evals_tmp,NN); + // QMP_sum_double_array((double *)evec_tmp,NN*NN); + grid->GlobalSumVector(&evals_tmp[0],NN); + grid->GlobalSumVector(&evec_tmp[0],NN*NN); + } + } + // cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order. + for(int i=0;i& lmd, + std::vector& lme, + int N2, + int N1, + std::vector& Qt, + GridBase *grid) + { + +#ifdef USE_LAPACK_IRL + const int check_lapack=0; // just use lapack if 0, check against lapack if 1 + + if(!check_lapack) + return diagonalize_lapack(lmd,lme,N2,N1,Qt,grid); + + std::vector lmd2(N1); + std::vector lme2(N1); + std::vector Qt2(N1*N1); + for(int k=0; k= kmin; --j){ + RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); + if(fabs(lme[j-1])+dds > dds){ + kmax = j+1; + goto continued; + } + } + Niter = iter; +#ifdef USE_LAPACK_IRL + if(check_lapack){ + const double SMALL=1e-8; + diagonalize_lapack(lmd2,lme2,N2,N1,Qt2,grid); + std::vector lmd3(N2); + for(int k=0; kSMALL) std::cout<SMALL) std::cout<SMALL) std::cout< dds){ + kmin = j+1; + break; + } + } + } + std::cout< + static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void orthogonalize(Field& w, + BasisFieldVector& evec, + int k) + { + double t0=-usecond()/1e6; + + evec.orthogonalize(w,k); + + normalise(w); + t0+=usecond()/1e6; + OrthoTime +=t0; + } + + void setUnit_Qt(int Nm, std::vector &Qt) { + for(int i=0; i K P = M − K † +Compute the factorization AVM = VM HM + fM eM +repeat + Q=I + for i = 1,...,P do + QiRi =HM −θiI Q = QQi + H M = Q †i H M Q i + end for + βK =HM(K+1,K) σK =Q(M,K) + r=vK+1βK +rσK + VK =VM(1:M)Q(1:M,1:K) + HK =HM(1:K,1:K) + →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM +until convergence +*/ + + void calc(std::vector& eval, + BasisFieldVector& evec, + const Field& src, + int& Nconv, + bool reverse, + int SkipTest) + { + + GridBase *grid = evec._v[0]._grid;//evec.get(0 + evec_offset)._grid; + assert(grid == src._grid); + + std::cout< lme(Nm); + std::vector lme2(Nm); + std::vector eval2(Nm); + std::vector eval2_copy(Nm); + std::vector Qt(Nm*Nm); + + + Field f(grid); + Field v(grid); + + int k1 = 1; + int k2 = Nk; + + Nconv = 0; + + RealD beta_k; + + // Set initial vector + evec[0] = src; + normalise(evec[0]); + std:: cout<0); + evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm); + + t1=usecond()/1e6; + std::cout<= Nminres) { + std::cout << GridLogMessage << "Rotation to test convergence " << std::endl; + + Field ev0_orig(grid); + ev0_orig = evec[0]; + + evec.rotate(Qt,0,Nk,0,Nk,Nm); + + { + std::cout << GridLogMessage << "Test convergence" << std::endl; + Field B(grid); + + for(int j = 0; j=Nstop || beta_k < betastp){ + goto converged; + } + + std::cout << GridLogMessage << "Rotate back" << std::endl; + //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; + { + Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); + for (int k=0;k QtI(Nm*Nm); + for (int k=0;k +class BlockProjector { +public: + + BasisFieldVector& _evec; + BlockedGrid& _bgrid; + + BlockProjector(BasisFieldVector& evec, BlockedGrid& bgrid) : _evec(evec), _bgrid(bgrid) { + } + + void createOrthonormalBasis(RealD thres = 0.0) { + + GridStopWatch sw; + sw.Start(); + + int cnt = 0; + +#pragma omp parallel shared(cnt) + { + int lcnt = 0; + +#pragma omp for + for (int b=0;b<_bgrid._o_blocks;b++) { + + for (int i=0;i<_evec._Nm;i++) { + + auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]); + + // |i> -= |j> + for (int j=0;j + void coarseToFine(const CoarseField& in, Field& out) { + + out = zero; + out.checkerboard = _evec._v[0].checkerboard; + + int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + _bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out); + } + } + + } + + template + void fineToCoarse(const Field& in, CoarseField& out) { + + out = zero; + + int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + + + Field tmp(_bgrid._grid); + tmp = in; + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + // |rhs> -= |j> + auto c = _bgrid.block_sp(b,_evec._v[j],tmp); + _bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable + out._odata[b]._internal._internal[j] = c; + } + } + + } + + template + void deflateFine(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + result = zero; + for (int i=0;i + void deflateCoarse(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + CoarseField src_coarse(_coef._v[0]._grid); + CoarseField result_coarse = src_coarse; + result_coarse = zero; + fineToCoarse(src_orig,src_coarse); + for (int i=0;i + void deflate(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + // Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now. + deflateCoarse(_coef,eval,N,src_orig,result); + } + +}; +} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h new file mode 100644 index 00000000..821272de --- /dev/null +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h @@ -0,0 +1,401 @@ +namespace Grid { + +template +class BlockedGrid { +public: + GridBase* _grid; + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + + std::vector _bs; // block size + std::vector _nb; // number of blocks + std::vector _l; // local dimensions irrespective of cb + std::vector _l_cb; // local dimensions of checkerboarded vector + std::vector _l_cb_o; // local dimensions of inner checkerboarded vector + std::vector _bs_cb; // block size in checkerboarded vector + std::vector _nb_o; // number of blocks of simd o-sites + + int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites; + + BlockedGrid(GridBase* grid, const std::vector& block_size) : + _grid(grid), _bs(block_size), _nd((int)_bs.size()), + _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size), + _l_cb_o(block_size), _bs_cb(block_size) { + + _blocks = 1; + _o_blocks = 1; + _l = grid->FullDimensions(); + _l_cb = grid->LocalDimensions(); + _l_cb_o = grid->_rdimensions; + + _cf_size = 1; + _block_sites = 1; + for (int i=0;i<_nd;i++) { + _l[i] /= grid->_processors[i]; + + assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize + + int r = _l[i] / _l_cb[i]; + assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize + _bs_cb[i] = _bs[i] / r; + _block_sites *= _bs_cb[i]; + _nb[i] = _l[i] / _bs[i]; + _nb_o[i] = _nb[i] / _grid->_simd_layout[i]; + if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize + std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl; + assert(0); + } + _blocks *= _nb[i]; + _o_blocks *= _nb_o[i]; + _cf_size *= _l[i]; + } + + _cf_size *= 12 / 2; + _cf_block_size = _cf_size / _blocks; + _cf_o_block_size = _cf_size / _o_blocks; + + std::cout << GridLogMessage << "BlockedGrid:" << std::endl; + std::cout << GridLogMessage << " _l = " << _l << std::endl; + std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl; + std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl; + std::cout << GridLogMessage << " _bs = " << _bs << std::endl; + std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl; + + std::cout << GridLogMessage << " _nb = " << _nb << std::endl; + std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl; + std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl; + std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl; + std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl; + std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl; + std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl; + std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl; + std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl; + + // _grid->Barrier(); + //abort(); + } + + void block_to_coor(int b, std::vector& x0) { + + std::vector bcoor; + bcoor.resize(_nd); + x0.resize(_nd); + assert(b < _o_blocks); + Lexicographic::CoorFromIndex(bcoor,b,_nb_o); + int i; + + for (i=0;i<_nd;i++) { + x0[i] = bcoor[i]*_bs_cb[i]; + } + + //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl; + + } + + void block_site_to_o_coor(const std::vector& x0, std::vector& coor, int i) { + Lexicographic::CoorFromIndex(coor,i,_bs_cb); + for (int j=0;j<_nd;j++) + coor[j] += x0[j]; + } + + int block_site_to_o_site(const std::vector& x0, int i) { + std::vector coor; coor.resize(_nd); + block_site_to_o_coor(x0,coor,i); + Lexicographic::IndexFromCoor(coor,i,_l_cb_o); + return i; + } + + vCoeff_t block_sp(int b, const Field& x, const Field& y) { + + std::vector x0; + block_to_coor(b,x0); + + vCoeff_t ret = 0.0; + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss])); + } + + return ret; + + } + + vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) { + + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + std::vector< ComplexD > ret(nsimd); + for (int i=0;i + void vcaxpy(iScalar& r,const vCoeff_t& a,const iScalar& x,const iScalar& y) { + vcaxpy(r._internal,a,x._internal,y._internal); + } + + template + void vcaxpy(iVector& r,const vCoeff_t& a,const iVector& x,const iVector& y) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]); + } + + } + + void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) { + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + int n = lsize / nsimd; + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l + void vcscale(iScalar& r,const vCoeff_t& a,const iScalar& x) { + vcscale(r._internal,a,x._internal); + } + + template + void vcscale(iVector& r,const vCoeff_t& a,const iVector& x) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcscale(ret._odata[ss],a,ret._odata[ss]); + } + } + + void getCanonicalBlockOffset(int cb, std::vector& x0) { + const int ndim = 5; + assert(_nb.size() == ndim); + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + x0.resize(ndim); + + assert(cb >= 0); + assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]); + + Lexicographic::CoorFromIndex(x0,cb,_nbc); + int i; + + for (i=0;i& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]); + } + } + } + } + + void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + + buf.resize(_cf_block_size * 2); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + buf[2*ti+0] = ld.real(); + buf[2*ti+1] = ld.imag(); + } + } + } + } + + int globalToLocalCanonicalBlock(int slot,const std::vector& src_nodes,int nb) { + // processor coordinate + int _nd = (int)src_nodes.size(); + std::vector _src_nodes = src_nodes; + std::vector pco(_nd); + Lexicographic::CoorFromIndex(pco,slot,_src_nodes); + std::vector cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] }; + + // get local block + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + assert(_nd == 5); + std::vector c_src_local_blocks(_nd); + for (int i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0); + c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i]; + } + std::vector cbcoor(_nd); // coordinate of block in slot in canonical form + Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks); + + // cpco, cbcoor + std::vector clbcoor(_nd); + for (int i=0;i<_nd;i++) { + int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate + int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid + int tpcoor = _grid->_processor_coor[(i+1)%5]; + if (pcoor != tpcoor) + return -1; + clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i + } + + int lnb; + Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc); + //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl; + return lnb; + } + + + }; + +} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h new file mode 100644 index 00000000..e715fc25 --- /dev/null +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h @@ -0,0 +1,163 @@ +namespace Grid { + +template +class BasisFieldVector { + public: + int _Nm; + + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + + std::vector _v; // _Nfull vectors + + void report(int n,GridBase* value) { + + std::cout << GridLogMessage << "BasisFieldVector allocated:\n"; + std::cout << GridLogMessage << " Delta N = " << n << "\n"; + std::cout << GridLogMessage << " Size of full vectors (size) = " << + ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n"; + std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl; + + value->Barrier(); + + if (value->IsBoss()) { + system("cat /proc/meminfo"); + } + + value->Barrier(); + + } + + BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) { + report(Nm,value); + } + + ~BasisFieldVector() { + } + + Field& operator[](int i) { + return _v[i]; + } + + void orthogonalize(Field& w, int k) { + for(int j=0; j& Qt,int j0, int j1, int k0,int k1,int Nm) { + + GridBase* grid = _v[0]._grid; + +#pragma omp parallel + { + std::vector < vobj > B(Nm); + +#pragma omp for + for(int ss=0;ss < grid->oSites();ss++){ + for(int j=j0; j _Nm) + _v.reserve(n); + + _v.resize(n,_v[0]._grid); + + if (n < _Nm) + _v.shrink_to_fit(); + + report(n - _Nm,_v[0]._grid); + + _Nm = n; + } + + std::vector getIndex(std::vector& sort_vals) { + + std::vector idx(sort_vals.size()); + iota(idx.begin(), idx.end(), 0); + + // sort indexes based on comparing values in v + sort(idx.begin(), idx.end(), + [&sort_vals](int i1, int i2) {return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);}); + + return idx; + } + + void reorderInPlace(std::vector& sort_vals, std::vector& idx) { + GridStopWatch gsw; + gsw.Start(); + + int nswaps = 0; + for (size_t i=0;i& sort_vals, bool reverse) { + + std::vector idx = getIndex(sort_vals); + if (reverse) + std::reverse(idx.begin(), idx.end()); + + reorderInPlace(sort_vals,idx); + + } + + void deflate(const std::vector& eval,const Field& src_orig,Field& result) { + result = zero; + int N = (int)_v.size(); + for (int i=0;i step) { + crc = crc32(crc,&data[blk],step); + blk += step; + len -= step; + } + + crc = crc32(crc,&data[blk],len); + return crc; + + } + + static int get_bfm_index( int* pos, int co, int* s ) { + + int ls = s[0]; + int NtHalf = s[4] / 2; + int simd_coor = pos[4] / NtHalf; + int regu_coor = (pos[1] + s[1] * (pos[2] + s[2] * ( pos[3] + s[3] * (pos[4] % NtHalf) ) )) / 2; + + return regu_coor * ls * 48 + pos[0] * 48 + co * 4 + simd_coor * 2; + } + + static void get_read_geometry(const GridBase* _grid,const std::vector& cnodes, + std::map >& slots, + std::vector& slot_lvol, + std::vector& lvol, + int64_t& slot_lsites,int& ntotal) { + + int _nd = (int)cnodes.size(); + std::vector nodes = cnodes; + + slots.clear(); + slot_lvol.clear(); + lvol.clear(); + + int i; + ntotal = 1; + int64_t lsites = 1; + slot_lsites = 1; + for (i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % nodes[i] == 0); + slot_lvol.push_back(_grid->_fdimensions[i] / nodes[i]); + lvol.push_back(_grid->_fdimensions[i] / _grid->_processors[i]); + lsites *= lvol.back(); + slot_lsites *= slot_lvol.back(); + ntotal *= nodes[i]; + } + + std::vector lcoor, gcoor, scoor; + lcoor.resize(_nd); gcoor.resize(_nd); scoor.resize(_nd); + + // create mapping of indices to slots + for (int lidx = 0; lidx < lsites; lidx++) { + Lexicographic::CoorFromIndex(lcoor,lidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + } + int slot; + Lexicographic::IndexFromCoor(scoor,slot,nodes); + auto sl = slots.find(slot); + if (sl == slots.end()) + slots[slot] = std::vector(); + slots[slot].push_back(lidx); + } + } + + static void canonical_block_to_coarse_coordinates(GridBase* _coarsegrid,int nb,int& ii,int& oi) { + // canonical nb needs to be mapped in a coordinate on my coarsegrid (ii,io) + std::vector _l = _coarsegrid->LocalDimensions(); + std::vector _cl = { _l[1], _l[2], _l[3], _l[4], _l[0] }; + std::vector _cc(_l.size()); + Lexicographic::CoorFromIndex(_cc,nb,_cl); + std::vector _c = { _cc[4], _cc[0], _cc[1], _cc[2], _cc[3] }; + ii = _coarsegrid->iIndex(_c); + oi = _coarsegrid->oIndex(_c); + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir, const std::vector& cnodes) { + + GridBase* _grid = ret._v[0]._grid; + + std::map > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + get_read_geometry(_grid,cnodes, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // this is slow code to read the argonne file format for debugging purposes + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Read " << dir << " nodes = " << cnodes << std::endl; + std::cout << GridLogMessage << " lvol = " << lvol << std::endl; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + // now load one slot at a time and fill the vector + for (auto sl=slots.begin();sl!=slots.end();sl++) { + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + sprintf(buf,"%s/checksums.txt",dir); printf("read_argonne: Reading from %s\n",buf); + FILE* f = fopen(buf,"rt"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + for (int l=0;l<3+slot;l++) + fgets(buf,sizeof(buf),f); + uint32_t crc_exp = strtol(buf, NULL, 16); + fclose(f); + + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + fseeko(f,0,SEEK_END); + off_t total_size = ftello(f); + fseeko(f,0,SEEK_SET); + + int64_t size = slot_lsites / 2 * 24*4; + rdata.resize(size); + + assert(total_size % size == 0); + + int _Nfull = total_size / size; + ret._v.resize(_Nfull,ret._v[0]); + ret._Nm = _Nfull; + + uint32_t crc = 0x0; + GridStopWatch gsw,gsw2; + for (int nev = 0;nev < _Nfull;nev++) { + + gsw.Start(); + assert(fread(&rdata[0],size,1,f) == 1); + gsw.Stop(); + + gsw2.Start(); + crc = crc32_threaded((unsigned char*)&rdata[0],size,crc); + gsw2.Stop(); + + for (int i=0;i lcoor, gcoor, scoor, slcoor; + lcoor.resize(_nd); gcoor.resize(_nd); + slcoor.resize(_nd); scoor.resize(_nd); + +#pragma omp for + for (int64_t lidx = 0; lidx < idx.size(); lidx++) { + int llidx = idx[lidx]; + Lexicographic::CoorFromIndex(lcoor,llidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + slcoor[i] = gcoor[i] - scoor[i]*slot_lvol[i]; + } + + if ((lcoor[1]+lcoor[2]+lcoor[3]+lcoor[4]) % 2 == 1) { + // poke + iScalar, 4> > sc; + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + sc()(s)(c) = *(std::complex*)&rdata[get_bfm_index(&slcoor[0],c+s*3, &slot_lvol[0] )]; + + pokeLocalSite(sc,ret._v[nev],lcoor); + } + + } + } + } + + fclose(f); + std::cout << GridLogMessage << "Loading slot " << slot << " with " << idx.size() << " points and " + << _Nfull << " vectors in " + << gsw.Elapsed() << " at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw.useconds()*1000.*1000. ) + << " GB/s " << " crc32 = " << std::hex << crc << " crc32_expected = " << crc_exp << std::dec + << " computed at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + assert(crc == crc_exp); + } + + _grid->Barrier(); + std::cout << GridLogMessage << "Loading complete" << std::endl; + + return true; + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir) { + + + GridBase* _grid = ret._v[0]._grid; + + char buf[4096]; + sprintf(buf,"%s/nodes.txt",dir); + FILE* f = fopen(buf,"rt"); + if (!f) { + if (_grid->IsBoss()) { + fprintf(stderr,"Attempting to load eigenvectors without secifying node layout failed due to absence of nodes.txt\n"); + fflush(stderr); + } + return false; + } + + + std::vector nodes((int)_grid->_processors.size()); + for (int i =0;i<(int)_grid->_processors.size();i++) + assert(fscanf(f,"%d\n",&nodes[i])==1); + fclose(f); + + return read_argonne(ret,dir,nodes); + } + + static void flush_bytes(FILE* f, std::vector& fbuf) { + if (fbuf.size()) { + + if (fwrite(&fbuf[0],fbuf.size(),1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)fbuf.size() / 1024./1024./1024.); + exit(2); + } + + fbuf.resize(0); + + } + } + + static void write_bytes(void* buf, int64_t s, FILE* f, std::vector& fbuf, uint32_t& crc) { + static double data_counter = 0.0; + static GridStopWatch gsw_crc, gsw_flush1,gsw_flush2,gsw_write,gsw_memcpy; + if (s == 0) + return; + + // checksum + gsw_crc.Start(); + crc = crc32_threaded((unsigned char*)buf,s,crc); + gsw_crc.Stop(); + + if (s > fbuf.capacity()) { + // cannot buffer this, so first flush current buffer contents and then write this directly to file + gsw_flush1.Start(); + flush_bytes(f,fbuf); + gsw_flush1.Stop(); + + gsw_write.Start(); + if (fwrite(buf,s,1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)s / 1024./1024./1024.); + exit(2); + } + gsw_write.Stop(); + + } + + // no room left in buffer, flush to disk + if (fbuf.size() + s > fbuf.capacity()) { + gsw_flush2.Start(); + flush_bytes(f,fbuf); + gsw_flush2.Stop(); + } + + // then fill buffer again + { + gsw_memcpy.Start(); + size_t t = fbuf.size(); + fbuf.resize(t + s); + memcpy(&fbuf[t],buf,s); + gsw_memcpy.Stop(); + } + + data_counter += (double)s; + if (data_counter > 1024.*1024.*20.) { + std::cout << GridLogMessage << "Writing " << ((double)data_counter / 1024./1024./1024.) << " GB at" + " crc = " << gsw_crc.Elapsed() << " flush1 = " << gsw_flush1.Elapsed() << " flush2 = " << gsw_flush2.Elapsed() << + " write = " << gsw_write.Elapsed() << " memcpy = " << gsw_memcpy.Elapsed() << std::endl; + data_counter = 0.0; + gsw_crc.Reset(); + gsw_write.Reset(); + gsw_memcpy.Reset(); + gsw_flush1.Reset(); + gsw_flush2.Reset(); + } + } + + static void write_floats(FILE* f, std::vector& fbuf, uint32_t& crc, float* buf, int64_t n) { + write_bytes(buf,n*sizeof(float),f,fbuf,crc); + } + + static void read_floats(char* & ptr, float* out, int64_t n) { + float* in = (float*)ptr; + ptr += 4*n; + + for (int64_t i=0;i 0, [0,6] -> 1; reconstruct 0 -> -3, 1-> 3 + // + // N=2 + // [-6,-2] -> 0, [-2,2] -> 1, [2,6] -> 2; reconstruct 0 -> -4, 1->0, 2->4 + int ret = (int) ( (float)(N+1) * ( (in - min) / (max - min) ) ); + if (ret == N+1) { + ret = N; + } + return ret; + } + + static float fp_unmap(int val, float min, float max, int N) { + return min + (float)(val + 0.5) * (max - min) / (float)( N + 1 ); + } + +#define SHRT_UMAX 65535 +#define FP16_BASE 1.4142135623730950488 +#define FP16_COEF_EXP_SHARE_FLOATS 10 + static float unmap_fp16_exp(unsigned short e) { + float de = (float)((int)e - SHRT_UMAX / 2); + return ::pow( FP16_BASE, de ); + } + + // can assume that v >=0 and need to guarantee that unmap_fp16_exp(map_fp16_exp(v)) >= v + static unsigned short map_fp16_exp(float v) { + // float has exponents 10^{-44.85} .. 10^{38.53} + int exp = (int)ceil(::log(v) / ::log(FP16_BASE)) + SHRT_UMAX / 2; + if (exp < 0 || exp > SHRT_UMAX) { + fprintf(stderr,"Error in map_fp16_exp(%g,%d)\n",v,exp); + exit(3); + } + + return (unsigned short)exp; + } + + template + static void read_floats_fp16(char* & ptr, OPT* out, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* in = (unsigned short*)ptr; + ptr += 2*(n+nsites); + + // do for each site + for (int64_t site = 0;site + static void write_floats_fp16(FILE* f, std::vector& fbuf, uint32_t& crc, OPT* in, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* buf = (unsigned short*)malloc( sizeof(short) * (n + nsites) ); + if (!buf) { + fprintf(stderr,"Out of mem\n"); + exit(1); + } + + // do for each site +#pragma omp parallel for + for (int64_t site = 0;site max) + max = fabs(ev[i]); + } + + unsigned short exp = map_fp16_exp(max); + max = unmap_fp16_exp(exp); + min = -max; + + *bptr++ = exp; + + for (int i=0;i SHRT_UMAX) { + fprintf(stderr,"Assert failed: val = %d (%d), ev[i] = %.15g, max = %.15g, exp = %d\n",val,SHRT_UMAX,ev[i],max,(int)exp); + exit(48); + } + *bptr++ = (unsigned short)val; + } + + } + + write_bytes(buf,sizeof(short)*(n + nsites),f,fbuf,crc); + + free(buf); + } + + template + static bool read_compressed_vectors(const char* dir,BlockProjector& pr,BasisFieldVector& coef, int ngroups = 1) { + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + std::cout << GridLogMessage << "Ready on host " << hostname << " with " << ngroups << " reader groups" << std::endl; + + // first read metadata + char buf[4096]; + sprintf(buf,"%s/metadata.txt",dir); + + std::vector s,b,nb,nn,crc32; + s.resize(5); b.resize(5); nb.resize(5); nn.resize(5); + uint32_t neig, nkeep, nkeep_single, blocks, _FP16_COEF_EXP_SHARE_FLOATS; + uint32_t nprocessors = 1; + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(buf,"rb"); + status=f ? 1 : 0; + } + _grid->GlobalSum(status); + std::cout << GridLogMessage << "Read params status " << status << std::endl; + + if (!status) { + return false; + } + +#define _IRL_READ_INT(buf,p) if (f) { assert(fscanf(f,buf,p)==1); } else { *(p) = 0; } _grid->GlobalSum(*(p)); + + for (int i=0;i<5;i++) { + sprintf(buf,"s[%d] = %%d\n",i); + _IRL_READ_INT(buf,&s[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"b[%d] = %%d\n",i); + _IRL_READ_INT(buf,&b[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"nb[%d] = %%d\n",i); + _IRL_READ_INT(buf,&nb[(i+1)%5]); + } + _IRL_READ_INT("neig = %d\n",&neig); + _IRL_READ_INT("nkeep = %d\n",&nkeep); + _IRL_READ_INT("nkeep_single = %d\n",&nkeep_single); + _IRL_READ_INT("blocks = %d\n",&blocks); + _IRL_READ_INT("FP16_COEF_EXP_SHARE_FLOATS = %d\n",&_FP16_COEF_EXP_SHARE_FLOATS); + + for (int i=0;i<5;i++) { + assert(_grid->FullDimensions()[i] % s[i] == 0); + nn[i] = _grid->FullDimensions()[i] / s[i]; + nprocessors *= nn[i]; + } + + std::cout << GridLogMessage << "Reading data that was generated on node-layout " << nn << std::endl; + + crc32.resize(nprocessors); + for (int i =0;i > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + std::vector _nn(nn.begin(),nn.end()); + get_read_geometry(_grid,_nn, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // types + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + // slot layout + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + + // add read groups + for (int ngroup=0;ngroupThisRank() % ngroups == ngroup; + + std::cout << GridLogMessage << "Reading in group " << ngroup << " / " << ngroups << std::endl; + + // load all necessary slots and store them appropriately + for (auto sl=slots.begin();sl!=slots.end();sl++) { + + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + if (action) { + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + } + + uint32_t crc = 0x0; + off_t size; + + GridStopWatch gsw; + _grid->Barrier(); + gsw.Start(); + + std::vector raw_in(0); + if (action) { + fseeko(f,0,SEEK_END); + size = ftello(f); + fseeko(f,0,SEEK_SET); + + raw_in.resize(size); + assert(fread(&raw_in[0],size,1,f) == 1); + } + + _grid->Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)size / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + + if (action) { + std::cout << GridLogMessage << "[" << slot << "] Read " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s" << std::endl; + + uint32_t crc_comp = crc32_threaded((unsigned char*)&raw_in[0],size,0); + + if (crc_comp != crc32[slot]) { + std::cout << "Node " << hostname << " found crc mismatch for file " << buf << " (" << std::hex << crc_comp << " vs " << crc32[slot] << std::dec << ")" << std::endl; + std::cout << "Byte size: " << size << std::endl; + } + + assert(crc_comp == crc32[slot]); + } + + _grid->Barrier(); + + if (action) { + fclose(f); + } + + char* ptr = &raw_in[0]; + + GridStopWatch gsw2; + gsw2.Start(); + if (action) { + int nsingleCap = nkeep_single; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + int _cf_block_size = slot_lsites * 12 / 2 / blocks; + +#define FP_16_SIZE(a,b) (( (a) + (a/b) )*2) + + // first read single precision basis vectors +#pragma omp parallel + { + std::vector buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf1(nkeep_single*2); + std::vector buf2((nkeep - nkeep_single)*2); + +#pragma omp for + for (int j=0;j<(int)coef.size();j++) + for (int nb=0;nb + static void write_compressed_vectors(const char* dir,const BlockProjector& pr, + const BasisFieldVector& coef, + int nsingle,int writer_nodes = 0) { + + GridStopWatch gsw; + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + _grid->Barrier(); + gsw.Start(); + + char buf[4096]; + + // Making the directories is somewhat tricky. + // If we run on a joint filesystem we would just + // have the boss create the directories and then + // have a barrier. We also want to be able to run + // on local /scratch, so potentially all nodes need + // to create their own directories. So do the following + // for now. + for (int j=0;j<_grid->_Nprocessors;j++) { + if (j == _grid->ThisRank()) { + conditionalMkDir(dir); + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + conditionalMkDir(buf); + } + _grid->Barrier(); // make sure directories are ready + } + } + + + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + + int64_t off = 0x0; + uint32_t crc = 0x0; + if (writer_nodes < 1) + writer_nodes = _grid->_Nprocessors; + int groups = _grid->_Nprocessors / writer_nodes; + if (groups<1) + groups = 1; + + std::cout << GridLogMessage << " Write " << dir << " nodes = " << writer_nodes << std::endl; + + for (int group=0;groupBarrier(); + if (_grid->ThisRank() % groups == group) { + + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + //buffer does not seem to help + //assert(!setvbuf ( f , NULL , _IOFBF , 1024*1024*2 )); + + int nsingleCap = nsingle; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + GridStopWatch gsw1,gsw2,gsw3,gsw4,gsw5; + + gsw1.Start(); + + std::vector fbuf; + fbuf.reserve( 1024 * 1024 * 8 ); + + // first write single precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + +#if 0 + { + RealD nrm = 0.0; + for (int j=0;j<(int)buf.size();j++) + nrm += buf[j]*buf[j]; + std::cout << GridLogMessage << "Norm: " << nrm << std::endl; + } +#endif + write_floats(f,fbuf,crc, &buf[0], buf.size() ); + } + } + + gsw1.Stop(); + gsw2.Start(); + + // then write fixed precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + write_floats_fp16(f,fbuf,crc, &buf[0], buf.size(), 24); + } + } + + gsw2.Stop(); + assert(coef._v[0]._grid->_isites*coef._v[0]._grid->_osites == pr._bgrid._blocks); + + gsw3.Start(); + for (int j=0;j<(int)coef.size();j++) { + + int64_t size1 = nsingleCap*2; + int64_t size2 = 2*(pr._evec.size()-nsingleCap); + int64_t size = size1; + if (size2>size) + size=size2; + std::vector buf(size); + + //RealD nrmTest = 0.0; + for (int nb=0;nbGlobalSum(nrmTest); + //std::cout << GridLogMessage << "Test norm: " << nrmTest << std::endl; + } + gsw3.Stop(); + + flush_bytes(f,fbuf); + + off = ftello(f); + fclose(f); + + std::cout<Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)off / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + std::cout << GridLogMessage << "Write " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s in " << seconds << " s" << std::endl; + + // gather crcs + std::vector crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/metadata.txt",dir); + FILE* f = fopen(buf,"wb"); + assert(f); + for (int i=0;i<5;i++) + fprintf(f,"s[%d] = %d\n",i,_grid->FullDimensions()[(i+1)%5] / _grid->_processors[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"b[%d] = %d\n",i,pr._bgrid._bs[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"nb[%d] = %d\n",i,pr._bgrid._nb[(i+1)%5]); + fprintf(f,"neig = %d\n",(int)coef.size()); + fprintf(f,"nkeep = %d\n",(int)pr._evec.size()); + fprintf(f,"nkeep_single = %d\n",nsingle); + fprintf(f,"blocks = %d\n",pr._bgrid._blocks); + fprintf(f,"FP16_COEF_EXP_SHARE_FLOATS = %d\n",FP16_COEF_EXP_SHARE_FLOATS); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"crc32[%d] = %X\n",i,crcs[i]); + fclose(f); + } + + } + + template + static void write_argonne(const BasisFieldVector& ret,const char* dir) { + + GridBase* _grid = ret._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + char buf[4096]; + + if (_grid->IsBoss()) { + mkdir(dir,ACCESSPERMS); + + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + mkdir(buf,ACCESSPERMS); + } + } + + _grid->Barrier(); // make sure directories are ready + + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Write " << dir << " nodes = " << _grid->_Nprocessors << std::endl; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + //printf("Slot: %d <> %d\n",slot, _grid->ThisRank()); + + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + int N = (int)ret._v.size(); + uint32_t crc = 0x0; + int64_t cf_size = _grid->oSites()*_grid->iSites()*12; + std::vector< float > rdata(cf_size*2); + + GridStopWatch gsw1,gsw2; + + for (int i=0;i coor(_l.size()); + for (coor[1] = 0;coor[1]<_l[1];coor[1]++) { + for (coor[2] = 0;coor[2]<_l[2];coor[2]++) { + for (coor[3] = 0;coor[3]<_l[3];coor[3]++) { + for (coor[4] = 0;coor[4]<_l[4];coor[4]++) { + for (coor[0] = 0;coor[0]<_l[0];coor[0]++) { + + if ((coor[1]+coor[2]+coor[3]+coor[4]) % 2 == 1) { + // peek + iScalar, 4> > sc; + peekLocalSite(sc,ret._v[i],coor); + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + *(std::complex*)&rdata[get_bfm_index(&coor[0],c+s*3, &_l[0] )] = sc()(s)(c); + } + } + } + } + } + } + + // endian flip + for (int i=0;i crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/checksums.txt",dir); + FILE* f = fopen(buf,"wt"); + assert(f); + fprintf(f,"00000000\n\n"); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"%X\n",crcs[i]); + fclose(f); + + sprintf(buf,"%s/nodes.txt",dir); + f = fopen(buf,"wt"); + assert(f); + for (int i =0;i<(int)_grid->_processors.size();i++) + fprintf(f,"%d\n",_grid->_processors[i]); + fclose(f); + } + + + std::cout << GridLogMessage << "Writing slot " << slot << " with " + << N << " vectors in " + << gsw2.Elapsed() << " at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s with crc computed at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw1.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + _grid->Barrier(); + std::cout << GridLogMessage << "Writing complete" << std::endl; + + } + } + +} diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermion.cc b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc index dd8a500d..37ab5fa6 100644 --- a/lib/qcd/action/fermion/DomainWallEOFAFermion.cc +++ b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc @@ -61,10 +61,10 @@ namespace QCD { } /*************************************************************** - /* Additional EOFA operators only called outside the inverter. - /* Since speed is not essential, simple axpby-style - /* implementations should be fine. - /***************************************************************/ + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ template void DomainWallEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) { @@ -116,8 +116,8 @@ namespace QCD { } /******************************************************************** - /* Performance critical fermion operators called inside the inverter - /********************************************************************/ + * Performance critical fermion operators called inside the inverter + ********************************************************************/ template void DomainWallEOFAFermion::M5D(const FermionField& psi, FermionField& chi) diff --git a/lib/qcd/action/fermion/MobiusEOFAFermion.cc b/lib/qcd/action/fermion/MobiusEOFAFermion.cc index 085fa988..0344afbf 100644 --- a/lib/qcd/action/fermion/MobiusEOFAFermion.cc +++ b/lib/qcd/action/fermion/MobiusEOFAFermion.cc @@ -77,11 +77,11 @@ namespace QCD { } } - /*************************************************************** - /* Additional EOFA operators only called outside the inverter. - /* Since speed is not essential, simple axpby-style - /* implementations should be fine. - /***************************************************************/ + /**************************************************************** + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ template void MobiusEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) { @@ -194,8 +194,8 @@ namespace QCD { } /******************************************************************** - /* Performance critical fermion operators called inside the inverter - /********************************************************************/ + * Performance critical fermion operators called inside the inverter + ********************************************************************/ template void MobiusEOFAFermion::M5D(const FermionField& psi, FermionField& chi) diff --git a/tests/solver/Params.h b/tests/solver/Params.h new file mode 100644 index 00000000..d9a6d3b3 --- /dev/null +++ b/tests/solver/Params.h @@ -0,0 +1,136 @@ +/* + Params IO + + Author: Christoph Lehner + Date: 2017 +*/ + +#define PADD(p,X) p.get(#X,X); + +class Params { + protected: + + std::string trim(const std::string& sc) { + std::string s = sc; + s.erase(s.begin(), std::find_if(s.begin(), s.end(), + std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), + std::not1(std::ptr_fun(std::isspace))).base(), s.end()); + return s; + } + + public: + + std::map< std::string, std::string > lines; + std::string _fn; + + Params(const char* fn) : _fn(fn) { + FILE* f = fopen(fn,"rt"); + assert(f); + while (!feof(f)) { + char buf[4096]; + if (fgets(buf,sizeof(buf),f)) { + if (buf[0] != '#' && buf[0] != '\r' && buf[0] != '\n') { + char* sep = strchr(buf,'='); + assert(sep); + *sep = '\0'; + lines[trim(buf)] = trim(sep+1); + } + } + } + fclose(f); + } + + ~Params() { + } + + std::string loghead() { + return _fn + ": "; + } + + bool has(const char* name) { + auto f = lines.find(name); + return (f != lines.end()); + } + + const std::string& get(const char* name) { + auto f = lines.find(name); + if (f == lines.end()) { + std::cout << Grid::GridLogMessage << loghead() << "Could not find value for " << name << std::endl; + abort(); + } + return f->second; + } + + void parse(std::string& s, const std::string& cval) { + std::stringstream trimmer; + trimmer << cval; + s.clear(); + trimmer >> s; + } + + void parse(int& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%d",&i)==1); + } + + void parse(long long& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lld",&i)==1); + } + + void parse(double& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lf",&f)==1); + } + + void parse(float& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%f",&f)==1); + } + + void parse(bool& b, const std::string& cval) { + std::string lcval = cval; + std::transform(lcval.begin(), lcval.end(), lcval.begin(), ::tolower); + if (lcval == "true" || lcval == "yes") { + b = true; + } else if (lcval == "false" || lcval == "no") { + b = false; + } else { + std::cout << "Invalid value for boolean: " << b << std::endl; + assert(0); + } + } + + void parse(std::complex& f, const std::string& cval) { + double r,i; + assert(sscanf(cval.c_str(),"%lf %lf",&r,&i)==2); + f = std::complex(r,i); + } + + void parse(std::complex& f, const std::string& cval) { + float r,i; + assert(sscanf(cval.c_str(),"%f %f",&r,&i)==2); + f = std::complex(r,i); + } + + template + void get(const char* name, std::vector& v) { + int i = 0; + v.resize(0); + while (true) { + char buf[4096]; + sprintf(buf,"%s[%d]",name,i++); + if (!has(buf)) + break; + T val; + parse(val,get(buf)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << buf << " to " << val << std::endl; + v.push_back(val); + } + } + + template + void get(const char* name, T& f) { + parse(f,get(name)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << name << " to " << f << std::endl; + } + + +}; diff --git a/tests/solver/Test_dwf_compressed_lanczos.cc b/tests/solver/Test_dwf_compressed_lanczos.cc new file mode 100644 index 00000000..b42a2d55 --- /dev/null +++ b/tests/solver/Test_dwf_compressed_lanczos.cc @@ -0,0 +1,727 @@ +/* + Authors: Christoph Lehner + Date: 2017 + + Multigrid Lanczos + + + + TODO: + + High priority: + - Explore filtering of starting vector again, should really work: If cheby has 4 for low mode region and 1 for high mode, applying 15 iterations has 1e9 suppression + of high modes, which should create the desired invariant subspace already? Missing something here??? Maybe dynamic range dangerous, i.e., could also kill interesting + eigenrange if not careful. + + Better: Use all Cheby up to order N in order to approximate a step function; try this! Problem: width of step function. Can kill eigenspace > 1e-3 and have < 1e-5 equal + to 1 + + Low priority: + - Given that I seem to need many restarts and high degree poly to create the base and this takes about 1 day, seriously consider a simple method to create a basis + (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough) +*/ +#include +#include "Params.h" + +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +bool read_evals(GridBase* _grid, char* fn, std::vector& evals) { + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(fn,"rt"); + status = f ? 1 : 0; + } + _grid->GlobalSum(status); + + if (!status) + return false; + + uint32_t N; + if (f) + assert(fscanf(f,"%d\n",&N)==1); + else + N = 0; + _grid->GlobalSum(N); + + std::cout << "Reading " << N << " eigenvalues" << std::endl; + + evals.resize(N); + + for (int i=0;iGlobalSumVector(&evals[0],evals.size()); + + if (f) + fclose(f); + return true; +} + +void write_evals(char* fn, std::vector& evals) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)evals.size(); + fprintf(f,"%d\n",N); + + for (int i=0;i& hist) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)hist.size(); + for (int i=0;i +class FunctionHermOp : public LinearFunction { +public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + + FunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop) { + } + + void operator()(const Field& in, Field& out) { + _poly(_Linop,in,out); + } +}; + +template +class CheckpointedLinearFunction : public LinearFunction { +public: + LinearFunction& _op; + std::string _dir; + int _max_apply; + int _apply, _apply_actual; + GridBase* _grid; + FILE* _f; + + CheckpointedLinearFunction(GridBase* grid, LinearFunction& op, const char* dir,int max_apply) : _op(op), _dir(dir), _grid(grid), _f(0), + _max_apply(max_apply), _apply(0), _apply_actual(0) { + + FieldVectorIO::conditionalMkDir(dir); + + char fn[4096]; + sprintf(fn,"%s/ckpt_op.%4.4d",_dir.c_str(),_grid->ThisRank()); + printf("CheckpointLinearFunction:: file %s\n",fn); + _f = fopen(fn,"r+b"); + if (!_f) + _f = fopen(fn,"w+b"); + assert(_f); + fseek(_f,0,SEEK_CUR); + + } + + ~CheckpointedLinearFunction() { + if (_f) { + fclose(_f); + _f = 0; + } + } + + bool load_ckpt(const Field& in, Field& out) { + + off_t cur = ftello(_f); + fseeko(_f,0,SEEK_END); + if (cur == ftello(_f)) + return false; + fseeko(_f,cur,SEEK_SET); + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc_exp; + assert(fread(&crc_exp,4,1,_f)==1); + assert(fread(&out._odata[0],sz,1,_f)==1); + assert(FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0)==crc_exp); + gsw.Stop(); + + printf("CheckpointLinearFunction:: reading %lld\n",(long long)sz); + std::cout << GridLogMessage << "Loading " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + return true; + } + + void save_ckpt(const Field& in, Field& out) { + + fseek(_f,0,SEEK_CUR); // switch to write + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc = FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0); + assert(fwrite(&crc,4,1,_f)==1); + assert(fwrite(&out._odata[0],sz,1,_f)==1); + fflush(_f); // try this on the GPFS to suppress OPA usage for disk during dslash; this is not needed at Lustre/JLAB + gsw.Stop(); + + printf("CheckpointLinearFunction:: writing %lld\n",(long long)sz); + std::cout << GridLogMessage << "Saving " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + } + + void operator()(const Field& in, Field& out) { + + _apply++; + + if (load_ckpt(in,out)) + return; + + _op(in,out); + + save_ckpt(in,out); + + if (_apply_actual++ >= _max_apply) { + std::cout << GridLogMessage << "Maximum application of operator reached, checkpoint and finish in future job" << std::endl; + if (_f) { fclose(_f); _f=0; } + in._grid->Barrier(); + Grid_finalize(); + exit(3); + } + } +}; + +template +class ProjectedFunctionHermOp : public LinearFunction { +public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedFunctionHermOp(BlockProjector& pr,OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + + GridStopWatch gsw1,gsw2,gsw3; + // fill fin + gsw1.Start(); + _pr.coarseToFine(in,fin); + gsw1.Stop(); + + // apply poly + gsw2.Start(); + _poly(_Linop,fin,fout); + gsw2.Stop(); + + // fill out + gsw3.Start(); + _pr.fineToCoarse(fout,out); + gsw3.Stop(); + + auto eps = innerProduct(in,out); + std::cout << GridLogMessage << "Operator timing details: c2f = " << gsw1.Elapsed() << " poly = " << gsw2.Elapsed() << " f2c = " << gsw3.Elapsed() << + " Complimentary Hermiticity check: " << eps.imag() / std::abs(eps) << std::endl; + + } +}; + +template +class ProjectedHermOp : public LinearFunction { +public: + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedHermOp(BlockProjector& pr,LinearOperatorBase& linop) : _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + _pr.coarseToFine(in,fin); + _Linop.HermOp(fin,fout); + _pr.fineToCoarse(fout,out); + + } +}; + +template +class PlainHermOp : public LinearFunction { +public: + LinearOperatorBase &_Linop; + + PlainHermOp(LinearOperatorBase& linop) : _Linop(linop) { + } + + void operator()(const Field& in, Field& out) { + _Linop.HermOp(in,out); + } +}; + +template using CoarseSiteFieldGeneral = iScalar< iVector >; +template using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >; +template using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >; +template using CoarseSiteField = CoarseSiteFieldGeneral< vComplex, N >; +template using CoarseLatticeFermion = Lattice< CoarseSiteField >; +template using CoarseLatticeFermionD = Lattice< CoarseSiteFieldD >; + +template +void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npoly2, + int Nstop2,int Nk2,int Nm2,RealD resid2,RealD betastp2,int MaxIt,int MinRes2, + LinearOperatorBase& HermOp, std::vector& eval1, bool cg_test_enabled, + int cg_test_maxiter,int nsingle,int SkipTest2, int MaxApply2,bool smoothed_eval_enabled, + int smoothed_eval_inner,int smoothed_eval_outer,int smoothed_eval_begin, + int smoothed_eval_end,RealD smoothed_eval_inner_resid) { + + BlockedGrid& bgrid = pr._bgrid; + BasisFieldVector& basis = pr._evec; + + + std::vector coarseFourDimLatt; + for (int i=0;i<4;i++) + coarseFourDimLatt.push_back(bgrid._nb[1+i] * bgrid._grid->_processors[1+i]); + assert(bgrid._grid->_processors[0] == 1); + + std::cout << GridLogMessage << "CoarseGrid = " << coarseFourDimLatt << " with basis = " << Nstop1 << std::endl; + GridCartesian * UCoarseGrid = SpaceTimeGrid::makeFourDimGrid(coarseFourDimLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FCoarseGrid = SpaceTimeGrid::makeFiveDimGrid(bgrid._nb[0],UCoarseGrid); + + Chebyshev Cheb2(alpha2,beta,Npoly2); + CoarseLatticeFermion src_coarse(FCoarseGrid); + + // Second round of Lanczos in blocked space + std::vector eval2(Nm2); + std::vector eval3(Nm2); + BasisFieldVector > coef(Nm2,FCoarseGrid); + + ProjectedFunctionHermOp,LatticeFermion> Op2plain(pr,Cheb2,HermOp); + CheckpointedLinearFunction > Op2ckpt(src_coarse._grid,Op2plain,"checkpoint",MaxApply2); + LinearFunction< CoarseLatticeFermion >* Op2; + if (MaxApply2) { + Op2 = &Op2ckpt; + } else { + Op2 = &Op2plain; + } + ProjectedHermOp,LatticeFermion> Op2nopoly(pr,HermOp); + BlockImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2); + + + src_coarse = 1.0; + + // Precision test + { + Field tmp(bgrid._grid); + CoarseLatticeFermion tmp2(FCoarseGrid); + CoarseLatticeFermion tmp3(FCoarseGrid); + tmp2 = 1.0; + tmp3 = 1.0; + + pr.coarseToFine(tmp2,tmp); + pr.fineToCoarse(tmp,tmp2); + + tmp2 -= tmp3; + std::cout << GridLogMessage << "Precision Test c->f->c: " << norm2(tmp2) / norm2(tmp3) << std::endl; + + //bgrid._grid->Barrier(); + //return; + } + + int Nconv; + if (!FieldVectorIO::read_compressed_vectors("lanczos.output",pr,coef) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt",eval3) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.linear",eval1) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.poly",eval2) + ) { + + + IRL2.calc(eval2,coef,src_coarse,Nconv,true,SkipTest2); + + coef.resize(Nstop2); + eval2.resize(Nstop2); + eval3.resize(Nstop2); + + std::vector step3_cache; + + // reconstruct eigenvalues of original operator + for (int i=0;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt.linear",eval1); + write_evals((char *)"lanczos.output/eigen-values.txt.poly",eval2); + } + + } + + // fix up eigenvalues + if (!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.smoothed",eval3) && smoothed_eval_enabled) { + + ConjugateGradient CG(smoothed_eval_inner_resid, smoothed_eval_inner, false); + + LatticeFermion v_i(basis[0]._grid); + auto tmp = v_i; + auto tmp2 = v_i; + + for (int i=smoothed_eval_begin;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt.smoothed",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); // also reset this to the best ones we have available + } + } + + // do CG test with and without deflation + if (cg_test_enabled) { + ConjugateGradient CG(1.0e-8, cg_test_maxiter, false); + LatticeFermion src_orig(bgrid._grid); + src_orig.checkerboard = Odd; + src_orig = 1.0; + src_orig = src_orig * (1.0 / ::sqrt(norm2(src_orig)) ); + auto result = src_orig; + + // undeflated solve + result = zero; + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.undefl",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors + result = zero; + pr.deflate(coef,eval2,Nstop2,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_all",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with non-blocked eigenvectors + result = zero; + pr.deflate(coef,eval1,Nstop1,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_full",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors and original eigenvalues from proj + result = zero; + pr.deflate(coef,eval3,Nstop2,src_orig,result); + CG(HermOp, src_orig, result); + // if (UCoarseGrid->IsBoss()) + // write_history("cg_test.defl_all_ev3",CG.ResHistory); + // CG.ResHistory.clear(); + + } + +} + + +template +void quick_krylov_basis(BasisFieldVector& evec,Field& src,LinearFunction& Op,int Nstop) { + Field tmp = src; + Field tmp2 = tmp; + + for (int i=0;i HermOp(Ddwf); + + // Eigenvector storage + const int Nm1 = Np1 + Nk1; + const int Nm2 = Np2 + Nk2; // maximum number of vectors we need to keep + std::cout << GridLogMessage << "Keep " << Nm1 << " full vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << Nm2 << " total vectors" << std::endl; + assert(Nm2 >= Nm1); + BasisFieldVector evec(Nm1,FrbGrid); // start off with keeping full vectors + + // First and second cheby + Chebyshev Cheb1(alpha1,beta,Npoly1); + FunctionHermOp Op1(Cheb1,HermOp); + PlainHermOp Op1test(HermOp); + + // Eigenvalue storage + std::vector eval1(evec.size()); + + // Construct source vector + LatticeFermion src(FrbGrid); + { + src=1.0; + src.checkerboard = Odd; + + // normalize + RealD nn = norm2(src); + nn = Grid::sqrt(nn); + src = src * (1.0/nn); + } + + // Do a benchmark and a quick exit if performance is too little (ugly but needed due to performance fluctuations) + if (max_cheb_time_ms) { + // one round of warmup + auto tmp = src; + GridStopWatch gsw1,gsw2; + gsw1.Start(); + Cheb1(HermOp,src,tmp); + gsw1.Stop(); + Ddwf.ZeroCounters(); + gsw2.Start(); + Cheb1(HermOp,src,tmp); + gsw2.Stop(); + Ddwf.Report(); + std::cout << GridLogMessage << "Performance check; warmup = " << gsw1.Elapsed() << " test = " << gsw2.Elapsed() << std::endl; + int ms = (int)(gsw2.useconds()/1e3); + if (ms > max_cheb_time_ms) { + std::cout << GridLogMessage << "Performance too poor: " << ms << " ms, cutoff = " << max_cheb_time_ms << " ms" << std::endl; + Grid_finalize(); + return 2; + } + + } + + // First round of Lanczos to get low mode basis + BlockImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1); + int Nconv; + + char tag[1024]; + if (!FieldVectorIO::read_argonne(evec,(char *)"checkpoint") || !read_evals(UGrid,(char *)"checkpoint/eigen-values.txt",eval1)) { + + if (simple_krylov_basis) { + quick_krylov_basis(evec,src,Op1,Nstop1); + } else { + IRL1.calc(eval1,evec,src,Nconv,false,1); + } + evec.resize(Nstop1); // and throw away superfluous + eval1.resize(Nstop1); + if (checkpoint_basis) + FieldVectorIO::write_argonne(evec,(char *)"checkpoint"); + if (UGrid->IsBoss() && checkpoint_basis) + write_evals((char *)"checkpoint/eigen-values.txt",eval1); + + Ddwf.Report(); + + if (exit_after_basis_calculation) { + Grid_finalize(); + return 0; + } + } + + // now test eigenvectors + if (!simple_krylov_basis) { + for (int i=0;i Date: Wed, 11 Oct 2017 10:12:07 +0100 Subject: [PATCH 022/145] Starting reorg of Blocked lanczos --- configure.ac | 1 + .../BlockImplicitlyRestartedLanczos.h | 1 - tests/Makefile.am | 2 +- .../lanczos}/FieldVectorIO.h | 0 tests/lanczos/Makefile.am | 1 + tests/{solver => lanczos}/Params.h | 0 tests/{solver => lanczos}/Test_dwf_compressed_lanczos.cc | 4 ++-- tests/{solver => lanczos}/Test_dwf_lanczos.cc | 0 tests/{debug => lanczos}/Test_synthetic_lanczos.cc | 0 tests/{solver => lanczos}/Test_wilson_lanczos.cc | 0 10 files changed, 5 insertions(+), 4 deletions(-) rename {lib/algorithms/iterative/BlockImplicitlyRestartedLanczos => tests/lanczos}/FieldVectorIO.h (100%) create mode 100644 tests/lanczos/Makefile.am rename tests/{solver => lanczos}/Params.h (100%) rename tests/{solver => lanczos}/Test_dwf_compressed_lanczos.cc (99%) rename tests/{solver => lanczos}/Test_dwf_lanczos.cc (100%) rename tests/{debug => lanczos}/Test_synthetic_lanczos.cc (100%) rename tests/{solver => lanczos}/Test_wilson_lanczos.cc (100%) diff --git a/configure.ac b/configure.ac index b11d6b42..496f7fd7 100644 --- a/configure.ac +++ b/configure.ac @@ -550,6 +550,7 @@ AC_CONFIG_FILES(tests/forces/Makefile) AC_CONFIG_FILES(tests/hadrons/Makefile) AC_CONFIG_FILES(tests/hmc/Makefile) AC_CONFIG_FILES(tests/solver/Makefile) +AC_CONFIG_FILES(tests/lanczos/Makefile) AC_CONFIG_FILES(tests/smearing/Makefile) AC_CONFIG_FILES(tests/qdpxx/Makefile) AC_CONFIG_FILES(tests/testu01/Makefile) diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h index 82a00efa..55a85552 100644 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h @@ -39,7 +39,6 @@ Author: Christoph Lehner #include #include #include -#include namespace Grid { diff --git a/tests/Makefile.am b/tests/Makefile.am index a8935268..7928a7fe 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = . core forces hmc solver debug smearing IO +SUBDIRS = . core forces hmc solver debug smearing IO lanczos if BUILD_CHROMA_REGRESSION SUBDIRS+= qdpxx diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldVectorIO.h b/tests/lanczos/FieldVectorIO.h similarity index 100% rename from lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldVectorIO.h rename to tests/lanczos/FieldVectorIO.h diff --git a/tests/lanczos/Makefile.am b/tests/lanczos/Makefile.am new file mode 100644 index 00000000..60b82dd7 --- /dev/null +++ b/tests/lanczos/Makefile.am @@ -0,0 +1 @@ +include Make.inc diff --git a/tests/solver/Params.h b/tests/lanczos/Params.h similarity index 100% rename from tests/solver/Params.h rename to tests/lanczos/Params.h diff --git a/tests/solver/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc similarity index 99% rename from tests/solver/Test_dwf_compressed_lanczos.cc rename to tests/lanczos/Test_dwf_compressed_lanczos.cc index b42a2d55..7fe37387 100644 --- a/tests/solver/Test_dwf_compressed_lanczos.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -21,9 +21,9 @@ (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough) */ #include -#include "Params.h" - #include +#include "FieldVectorIO.h" +#include "Params.h" using namespace std; using namespace Grid; diff --git a/tests/solver/Test_dwf_lanczos.cc b/tests/lanczos/Test_dwf_lanczos.cc similarity index 100% rename from tests/solver/Test_dwf_lanczos.cc rename to tests/lanczos/Test_dwf_lanczos.cc diff --git a/tests/debug/Test_synthetic_lanczos.cc b/tests/lanczos/Test_synthetic_lanczos.cc similarity index 100% rename from tests/debug/Test_synthetic_lanczos.cc rename to tests/lanczos/Test_synthetic_lanczos.cc diff --git a/tests/solver/Test_wilson_lanczos.cc b/tests/lanczos/Test_wilson_lanczos.cc similarity index 100% rename from tests/solver/Test_wilson_lanczos.cc rename to tests/lanczos/Test_wilson_lanczos.cc From cb9ff20249d90f528ba1b2609f4cbe3e62b1f437 Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 13 Oct 2017 11:30:50 +0100 Subject: [PATCH 023/145] Approx tests and lanczos improvement --- lib/algorithms/approx/Chebyshev.h | 6 +- .../BlockImplicitlyRestartedLanczos.h | 1399 +++++++++-------- .../FieldBasisVector.h | 5 +- .../iterative/ImplicitlyRestartedLanczos.h | 3 +- lib/log/Log.cc | 10 +- lib/log/Log.h | 13 +- lib/threads/Threads.h | 2 + tests/debug/Test_cheby.cc | 36 +- tests/hmc/Test_remez.cc | 61 +- 9 files changed, 823 insertions(+), 712 deletions(-) diff --git a/lib/algorithms/approx/Chebyshev.h b/lib/algorithms/approx/Chebyshev.h index f8c21a05..5088c51b 100644 --- a/lib/algorithms/approx/Chebyshev.h +++ b/lib/algorithms/approx/Chebyshev.h @@ -83,8 +83,10 @@ namespace Grid { public: void csv(std::ostream &out){ - RealD diff = hi-lo; - for (RealD x=lo-0.2*diff; x #define GRID_BIRL_H #include //memset - -#include +//#include #include #include @@ -42,420 +41,185 @@ Author: Christoph Lehner namespace Grid { +template +void basisOrthogonalize(std::vector &basis,Field &w,int k) +{ + for(int j=0; j +void basisRotate(std::vector &basis,Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) +{ + typedef typename Field::vector_object vobj; + GridBase* grid = basis[0]._grid; + + parallel_region + { + std::vector < vobj > B(Nm); // Thread private + + parallel_for_internal(int ss=0;ss < grid->oSites();ss++){ + for(int j=j0; j +void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, std::vector& idx) +{ + int vlen = idx.size(); + + assert(vlen>=1); + assert(vlen<=sort_vals.size()); + assert(vlen<=_v.size()); + + for (size_t i=0;i i); + ////////////////////////////////////// + // idx[i] is a table of desired sources giving a permutation. + // + // Swap v[i] with v[idx[i]]. + // + // Find j>i for which _vnew[j] = _vold[i], + // track the move idx[j] => idx[i] + // track the move idx[i] => i + ////////////////////////////////////// + size_t j; + for (j=i;j basisSortGetIndex(std::vector& sort_vals) +{ + std::vector idx(sort_vals.size()); + std::iota(idx.begin(), idx.end(), 0); + + // sort indexes based on comparing values in v + std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) { + return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]); + }); + return idx; +} + +template +void basisSortInPlace(std::vector & _v,std::vector& sort_vals, bool reverse) +{ + std::vector idx = basisSortGetIndex(sort_vals); + if (reverse) + std::reverse(idx.begin(), idx.end()); + + basisReorderInPlace(_v,sort_vals,idx); +} + +// PAB: faster to compute the inner products first then fuse loops. +// If performance critical can improve. +template +void basisDeflate(const std::vector &_v,const std::vector& eval,const Field& src_orig,Field& result) { + result = zero; + assert(_v.size()==eval.size()); + int N = (int)_v.size(); + for (int i=0;i - class BlockImplicitlyRestartedLanczos { - - const RealD small = 1.0e-16; +template +class BlockImplicitlyRestartedLanczos { + private: + const RealD small = 1.0e-8; + int MaxIter; + int MinRestart; // Minimum number of restarts; only check for convergence after + int Nstop; // Number of evecs checked for convergence + int Nk; // Number of converged sought + // int Np; // Np -- Number of spare vecs in krylov space // == Nm - Nk + int Nm; // Nm -- total number of vectors + IRLdiagonalisation diagonalisation; + int orth_period; + + RealD OrthoTime; + RealD eresid, betastp; + //////////////////////////////// + // Embedded objects + //////////////////////////////// + SortEigen _sort; + LinearFunction &_HermOp; + LinearFunction &_HermOpTest; + ///////////////////////// + // Constructor + ///////////////////////// public: - int lock; - int get; - int Niter; - int converged; + BlockImplicitlyRestartedLanczos(LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _MaxIter, // Max iterations + int _MinRestart, int _orth_period = 1, + IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : + _HermOp(HermOp), _HermOpTest(HermOpTest), + Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), + eresid(_eresid), betastp(_betastp), + MaxIter(_MaxIter) , MinRestart(_MinRestart), + orth_period(_orth_period), diagonalisation(_diagonalisation) { }; - int Nminres; // Minimum number of restarts; only check for convergence after - int Nstop; // Number of evecs checked for convergence - int Nk; // Number of converged sought - int Np; // Np -- Number of spare vecs in kryloc space - int Nm; // Nm -- total number of vectors + //////////////////////////////// + // Helpers + //////////////////////////////// + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = sqrt(nn); + v = v * (1.0/nn); + return nn; + } - int orth_period; - - RealD OrthoTime; - - RealD eresid, betastp; - SortEigen _sort; - LinearFunction &_HermOp; - LinearFunction &_HermOpTest; - ///////////////////////// - // Constructor - ///////////////////////// - - BlockImplicitlyRestartedLanczos( - LinearFunction & HermOp, - LinearFunction & HermOpTest, - int _Nstop, // sought vecs - int _Nk, // sought vecs - int _Nm, // spare vecs - RealD _eresid, // resid in lmdue deficit - RealD _betastp, // if beta(k) < betastp: converged - int _Niter, // Max iterations - int _Nminres, int _orth_period = 1) : - _HermOp(HermOp), - _HermOpTest(HermOpTest), - Nstop(_Nstop), - Nk(_Nk), - Nm(_Nm), - eresid(_eresid), - betastp(_betastp), - Niter(_Niter), - Nminres(_Nminres), - orth_period(_orth_period) - { - Np = Nm-Nk; assert(Np>0); - }; - - BlockImplicitlyRestartedLanczos( - LinearFunction & HermOp, - LinearFunction & HermOpTest, - int _Nk, // sought vecs - int _Nm, // spare vecs - RealD _eresid, // resid in lmdue deficit - RealD _betastp, // if beta(k) < betastp: converged - int _Niter, // Max iterations - int _Nminres, - int _orth_period = 1) : - _HermOp(HermOp), - _HermOpTest(HermOpTest), - Nstop(_Nk), - Nk(_Nk), - Nm(_Nm), - eresid(_eresid), - betastp(_betastp), - Niter(_Niter), - Nminres(_Nminres), - orth_period(_orth_period) - { - Np = Nm-Nk; assert(Np>0); - }; - - -/* Saad PP. 195 -1. Choose an initial vector v1 of 2-norm unity. Set β1 ≡ 0, v0 ≡ 0 -2. For k = 1,2,...,m Do: -3. wk:=Avk−βkv_{k−1} -4. αk:=(wk,vk) // -5. wk:=wk−αkvk // wk orthog vk -6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop -7. vk+1 := wk/βk+1 -8. EndDo - */ - void step(std::vector& lmd, - std::vector& lme, - BasisFieldVector& evec, - Field& w,int Nm,int k) - { - assert( k< Nm ); - - GridStopWatch gsw_op,gsw_o; - - Field& evec_k = evec[k]; - - gsw_op.Start(); - _HermOp(evec_k,w); - gsw_op.Stop(); - - if(k>0){ - w -= lme[k-1] * evec[k-1]; - } - - ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) - RealD alph = real(zalph); - - w = w - alph * evec_k;// 5. wk:=wk−αkvk - - RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop - // 7. vk+1 := wk/βk+1 - - std::cout<0 && k % orth_period == 0) { - orthogonalize(w,evec,k); // orthonormalise - } - gsw_o.Stop(); - - if(k < Nm-1) { - evec[k+1] = w; - } - - std::cout << GridLogMessage << "Timing: operator=" << gsw_op.Elapsed() << - " orth=" << gsw_o.Elapsed() << std::endl; - - } - - void qr_decomp(std::vector& lmd, - std::vector& lme, - int Nk, - int Nm, - std::vector& Qt, - RealD Dsh, - int kmin, - int kmax) - { - int k = kmin-1; - RealD x; - - RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]); - RealD c = ( lmd[k] -Dsh) *Fden; - RealD s = -lme[k] *Fden; - - RealD tmpa1 = lmd[k]; - RealD tmpa2 = lmd[k+1]; - RealD tmpb = lme[k]; - - lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb; - lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb; - lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb; - x =-s*lme[k+1]; - lme[k+1] = c*lme[k+1]; - - for(int i=0; i& lmd, - std::vector& lme, - int N1, - int N2, - std::vector& Qt, - GridBase *grid){ - - std::cout << GridLogMessage << "diagonalize_lapack start\n"; - GridStopWatch gsw; - - const int size = Nm; - // tevals.resize(size); - // tevecs.resize(size); - LAPACK_INT NN = N1; - std::vector evals_tmp(NN); - std::vector evec_tmp(NN*NN); - memset(&evec_tmp[0],0,sizeof(double)*NN*NN); - // double AA[NN][NN]; - std::vector DD(NN); - std::vector EE(NN); - for (int i = 0; i< NN; i++) - for (int j = i - 1; j <= i + 1; j++) - if ( j < NN && j >= 0 ) { - if (i==j) DD[i] = lmd[i]; - if (i==j) evals_tmp[i] = lmd[i]; - if (j==(i-1)) EE[j] = lme[j]; - } - LAPACK_INT evals_found; - LAPACK_INT lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ; - LAPACK_INT liwork = 3+NN*10 ; - std::vector iwork(liwork); - std::vector work(lwork); - std::vector isuppz(2*NN); - char jobz = 'V'; // calculate evals & evecs - char range = 'I'; // calculate all evals - // char range = 'A'; // calculate all evals - char uplo = 'U'; // refer to upper half of original matrix - char compz = 'I'; // Compute eigenvectors of tridiagonal matrix - std::vector ifail(NN); - LAPACK_INT info; - // int total = QMP_get_number_of_nodes(); - // int node = QMP_get_node_number(); - // GridBase *grid = evec[0]._grid; - int total = grid->_Nprocessors; - int node = grid->_processor; - int interval = (NN/total)+1; - double vl = 0.0, vu = 0.0; - LAPACK_INT il = interval*node+1 , iu = interval*(node+1); - if (iu > NN) iu=NN; - double tol = 0.0; - if (1) { - memset(&evals_tmp[0],0,sizeof(double)*NN); - if ( il <= NN){ - std::cout << GridLogMessage << "dstegr started" << std::endl; - gsw.Start(); - dstegr(&jobz, &range, &NN, - (double*)&DD[0], (double*)&EE[0], - &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A' - &tol, // tolerance - &evals_found, &evals_tmp[0], (double*)&evec_tmp[0], &NN, - &isuppz[0], - &work[0], &lwork, &iwork[0], &liwork, - &info); - gsw.Stop(); - std::cout << GridLogMessage << "dstegr completed in " << gsw.Elapsed() << std::endl; - for (int i = iu-1; i>= il-1; i--){ - evals_tmp[i] = evals_tmp[i - (il-1)]; - if (il>1) evals_tmp[i-(il-1)]=0.; - for (int j = 0; j< NN; j++){ - evec_tmp[i*NN + j] = evec_tmp[(i - (il-1)) * NN + j]; - if (il>1) evec_tmp[(i-(il-1)) * NN + j]=0.; - } - } - } - { - // QMP_sum_double_array(evals_tmp,NN); - // QMP_sum_double_array((double *)evec_tmp,NN*NN); - grid->GlobalSumVector(&evals_tmp[0],NN); - grid->GlobalSumVector(&evec_tmp[0],NN*NN); - } - } - // cheating a bit. It is better to sort instead of just reversing it, but the document of the routine says evals are sorted in increasing order. qr gives evals in decreasing order. - for(int i=0;i& lmd, - std::vector& lme, - int N2, - int N1, - std::vector& Qt, - GridBase *grid) - { - -#ifdef USE_LAPACK_IRL - const int check_lapack=0; // just use lapack if 0, check against lapack if 1 - - if(!check_lapack) - return diagonalize_lapack(lmd,lme,N2,N1,Qt,grid); - - std::vector lmd2(N1); - std::vector lme2(N1); - std::vector Qt2(N1*N1); - for(int k=0; k= kmin; --j){ - RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); - if(fabs(lme[j-1])+dds > dds){ - kmax = j+1; - goto continued; - } - } - Niter = iter; -#ifdef USE_LAPACK_IRL - if(check_lapack){ - const double SMALL=1e-8; - diagonalize_lapack(lmd2,lme2,N2,N1,Qt2,grid); - std::vector lmd3(N2); - for(int k=0; kSMALL) std::cout<SMALL) std::cout<SMALL) std::cout< dds){ - kmin = j+1; - break; - } - } - } - std::cout< - static RealD normalise(T& v) - { - RealD nn = norm2(v); - nn = sqrt(nn); - v = v * (1.0/nn); - return nn; - } - - void orthogonalize(Field& w, - BasisFieldVector& evec, - int k) - { - double t0=-usecond()/1e6; - - evec.orthogonalize(w,k); - - normalise(w); - t0+=usecond()/1e6; - OrthoTime +=t0; - } - - void setUnit_Qt(int Nm, std::vector &Qt) { - for(int i=0; i& evec,int k) + { + OrthoTime-=usecond()/1e6; + //evec.orthogonalize(w,k); + basisOrthogonalize(evec._v,w,k); + normalise(w); + OrthoTime+=usecond()/1e6; + } /* Rudy Arthur's thesis pp.137 ------------------------ @@ -474,280 +238,555 @@ repeat →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM until convergence */ - - void calc(std::vector& eval, - BasisFieldVector& evec, - const Field& src, - int& Nconv, - bool reverse, - int SkipTest) - { - - GridBase *grid = evec._v[0]._grid;//evec.get(0 + evec_offset)._grid; - assert(grid == src._grid); - - std::cout< lme(Nm); - std::vector lme2(Nm); - std::vector eval2(Nm); - std::vector eval2_copy(Nm); - std::vector Qt(Nm*Nm); - - - Field f(grid); - Field v(grid); - - int k1 = 1; - int k2 = Nk; - - Nconv = 0; - - RealD beta_k; - - // Set initial vector - evec[0] = src; - normalise(evec[0]); - std:: cout<0); - evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm); - - t1=usecond()/1e6; - std::cout<= Nminres) { - std::cout << GridLogMessage << "Rotation to test convergence " << std::endl; - - Field ev0_orig(grid); - ev0_orig = evec[0]; - - evec.rotate(Qt,0,Nk,0,Nk,Nm); - - { - std::cout << GridLogMessage << "Test convergence" << std::endl; - Field B(grid); - - for(int j = 0; j=Nstop || beta_k < betastp){ - goto converged; - } - - std::cout << GridLogMessage << "Rotate back" << std::endl; - //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; - { - Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); - for (int k=0;k QtI(Nm*Nm); - for (int k=0;k lme(Nm); + std::vector lme2(Nm); + std::vector eval2(Nm); + std::vector eval2_copy(Nm); + Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); + + Field f(grid); + Field v(grid); + int k1 = 1; + int k2 = Nk; + RealD beta_k; + + Nconv = 0; + + // Set initial vector + evec[0] = src; + normalise(evec[0]); + + // Initial Nk steps + OrthoTime=0.; + for(int k=0; k0); + // evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + basisRotate(evec._v,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + + std::cout<= MinRestart) { + std::cout << GridLogIRL << "Rotation to test convergence " << std::endl; + + Field ev0_orig(grid); + ev0_orig = evec[0]; + + // evec.rotate(Qt,0,Nk,0,Nk,Nm); + basisRotate(evec._v,Qt,0,Nk,0,Nk,Nm); + + { + std::cout << GridLogIRL << "Test convergence" << std::endl; + Field B(grid); + + for(int j = 0; j=Nstop || beta_k < betastp){ + goto converged; + } + + std::cout << GridLogIRL << "Convergence testing: Rotating back" << std::endl; + //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; + { + Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); // Restrict Qt to Nk x Nk + for (int k=0;k& lmd, + std::vector& lme, + BasisFieldVector& evec, + Field& w,int Nm,int k) + { + const RealD tiny = 1.0e-20; + assert( k< Nm ); + + GridStopWatch gsw_op,gsw_o; + + Field& evec_k = evec[k]; + + _HermOp(evec_k,w); + std::cout<0) w -= lme[k-1] * evec[k-1]; + + ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) + RealD alph = real(zalph); + + w = w - alph * evec_k;// 5. wk:=wk−αkvk + + RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop + // 7. vk+1 := wk/βk+1 + + lmd[k] = alph; + lme[k] = beta; + + std::cout<0 && k % orth_period == 0) { + orthogonalize(w,evec,k); // orthonormalise + std::cout<& lmd, std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd & Qt, // Nm x Nm + GridBase *grid) + { + Eigen::MatrixXd TriDiag = Eigen::MatrixXd::Zero(Nk,Nk); + + for(int i=0;i eigensolver(TriDiag); + + for (int i = 0; i < Nk; i++) { + lmd[Nk-1-i] = eigensolver.eigenvalues()(i); + } + for (int i = 0; i < Nk; i++) { + for (int j = 0; j < Nk; j++) { + Qt(Nk-1-i,j) = eigensolver.eigenvectors()(j,i); + } + } + } + + /////////////////////////////////////////////////////////////////////////// + // File could end here if settle on Eigen ??? + /////////////////////////////////////////////////////////////////////////// + + void QR_decomp(std::vector& lmd, // Nm + std::vector& lme, // Nm + int Nk, int Nm, // Nk, Nm + Eigen::MatrixXd& Qt, // Nm x Nm matrix + RealD Dsh, int kmin, int kmax) + { + int k = kmin-1; + RealD x; + + RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]); + RealD c = ( lmd[k] -Dsh) *Fden; + RealD s = -lme[k] *Fden; + + RealD tmpa1 = lmd[k]; + RealD tmpa2 = lmd[k+1]; + RealD tmpb = lme[k]; + + lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb; + lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb; + lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb; + x =-s*lme[k+1]; + lme[k+1] = c*lme[k+1]; + + for(int i=0; i& lmd, std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd & Qt, + GridBase *grid) + { + Qt = Eigen::MatrixXd::Identity(Nm,Nm); + if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) { + diagonalize_lapack(lmd,lme,Nk,Nm,Qt,grid); + } else if ( diagonalisation == IRLdiagonaliseWithQR ) { + diagonalize_QR(lmd,lme,Nk,Nm,Qt,grid); + } else if ( diagonalisation == IRLdiagonaliseWithEigen ) { + diagonalize_Eigen(lmd,lme,Nk,Nm,Qt,grid); + } else { + assert(0); + } + } + +#ifdef USE_LAPACK +void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e, + double *vl, double *vu, int *il, int *iu, double *abstol, + int *m, double *w, double *z, int *ldz, int *isuppz, + double *work, int *lwork, int *iwork, int *liwork, + int *info); #endif - }; - +void diagonalize_lapack(std::vector& lmd, + std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd& Qt, + GridBase *grid) +{ +#ifdef USE_LAPACK + const int size = Nm; + int NN = Nk; + double evals_tmp[NN]; + double evec_tmp[NN][NN]; + memset(evec_tmp[0],0,sizeof(double)*NN*NN); + double DD[NN]; + double EE[NN]; + for (int i = 0; i< NN; i++) { + for (int j = i - 1; j <= i + 1; j++) { + if ( j < NN && j >= 0 ) { + if (i==j) DD[i] = lmd[i]; + if (i==j) evals_tmp[i] = lmd[i]; + if (j==(i-1)) EE[j] = lme[j]; + } + } + } + int evals_found; + int lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ; + int liwork = 3+NN*10 ; + int iwork[liwork]; + double work[lwork]; + int isuppz[2*NN]; + char jobz = 'V'; // calculate evals & evecs + char range = 'I'; // calculate all evals + // char range = 'A'; // calculate all evals + char uplo = 'U'; // refer to upper half of original matrix + char compz = 'I'; // Compute eigenvectors of tridiagonal matrix + int ifail[NN]; + int info; + int total = grid->_Nprocessors; + int node = grid->_processor; + int interval = (NN/total)+1; + double vl = 0.0, vu = 0.0; + int il = interval*node+1 , iu = interval*(node+1); + if (iu > NN) iu=NN; + double tol = 0.0; + if (1) { + memset(evals_tmp,0,sizeof(double)*NN); + if ( il <= NN){ + LAPACK_dstegr(&jobz, &range, &NN, + (double*)DD, (double*)EE, + &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A' + &tol, // tolerance + &evals_found, evals_tmp, (double*)evec_tmp, &NN, + isuppz, + work, &lwork, iwork, &liwork, + &info); + for (int i = iu-1; i>= il-1; i--){ + evals_tmp[i] = evals_tmp[i - (il-1)]; + if (il>1) evals_tmp[i-(il-1)]=0.; + for (int j = 0; j< NN; j++){ + evec_tmp[i][j] = evec_tmp[i - (il-1)][j]; + if (il>1) evec_tmp[i-(il-1)][j]=0.; + } + } + } + { + grid->GlobalSumVector(evals_tmp,NN); + grid->GlobalSumVector((double*)evec_tmp,NN*NN); + } + } + // Safer to sort instead of just reversing it, + // but the document of the routine says evals are sorted in increasing order. + // qr gives evals in decreasing order. + for(int i=0;i& lmd, std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd & Qt, + GridBase *grid) + { + int QRiter = 100*Nm; + int kmin = 1; + int kmax = Nk; + + // (this should be more sophisticated) + for(int iter=0; iter= kmin; --j){ + RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); + if(fabs(lme[j-1])+dds > dds){ + kmax = j+1; + goto continued; + } + } + QRiter = iter; + return; + + continued: + for(int j=0; j dds){ + kmin = j+1; + break; + } + } + } + std::cout << GridLogError << "[QL method] Error - Too many iteration: "<& Qt,int j0, int j1, int k0,int k1,int Nm) { + void rotate(Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) { GridBase* grid = _v[0]._grid; @@ -62,7 +62,7 @@ class BasisFieldVector { for(int j=j0; j &logstreams) { GridLogError.Active(0); diff --git a/lib/log/Log.h b/lib/log/Log.h index 74d080bb..8db83266 100644 --- a/lib/log/Log.h +++ b/lib/log/Log.h @@ -85,6 +85,7 @@ class Logger { protected: Colours &Painter; int active; + int timing_mode; static int timestamp; std::string name, topName; std::string COLOUR; @@ -101,20 +102,24 @@ public: name(nm), topName(topNm), Painter(col_class), + timing_mode(0), COLOUR(col) {} ; void Active(int on) {active = on;}; int isActive(void) {return active;}; static void Timestamp(int on) {timestamp = on;}; - + void Reset(void) { StopWatch.Reset(); } + void TimingMode(int on) { timing_mode = on; if(on) Reset(); } + friend std::ostream& operator<< (std::ostream& stream, Logger& log){ if ( log.active ) { - stream << log.background()<< std::setw(8) << std::left << log.topName << log.background()<< " : "; - stream << log.colour() << std::setw(10) << std::left << log.name << log.background() << " : "; + stream << log.background()<< std::left << log.topName << log.background()<< " : "; + stream << log.colour() << std::left << log.name << log.background() << " : "; if ( log.timestamp ) { StopWatch.Stop(); GridTime now = StopWatch.Elapsed(); + if ( log.timing_mode==1 ) StopWatch.Reset(); StopWatch.Start(); stream << log.evidence()<< now << log.background() << " : " ; } @@ -135,6 +140,8 @@ public: void GridLogConfigure(std::vector &logstreams); +extern GridLogger GridLogIRL; +extern GridLogger GridLogSolver; extern GridLogger GridLogError; extern GridLogger GridLogWarning; extern GridLogger GridLogMessage; diff --git a/lib/threads/Threads.h b/lib/threads/Threads.h index d15f15ce..36daf2af 100644 --- a/lib/threads/Threads.h +++ b/lib/threads/Threads.h @@ -51,7 +51,9 @@ Author: paboyle #define PARALLEL_CRITICAL #endif +#define parallel_region PARALLEL_REGION #define parallel_for PARALLEL_FOR_LOOP for +#define parallel_for_internal PARALLEL_FOR_LOOP_INTERN for #define parallel_for_nest2 PARALLEL_NESTED_LOOP2 for namespace Grid { diff --git a/tests/debug/Test_cheby.cc b/tests/debug/Test_cheby.cc index 40544c56..72d07885 100644 --- a/tests/debug/Test_cheby.cc +++ b/tests/debug/Test_cheby.cc @@ -37,8 +37,15 @@ RealD InverseApproximation(RealD x){ RealD SqrtApproximation(RealD x){ return std::sqrt(x); } +RealD Approximation32(RealD x){ + return std::pow(x,-1.0/32.0); +} +RealD Approximation2(RealD x){ + return std::pow(x,-1.0/2.0); +} + RealD StepFunction(RealD x){ - if ( x<0.1 ) return 1.0; + if ( x<10.0 ) return 1.0; else return 0.0; } @@ -56,7 +63,6 @@ int main (int argc, char ** argv) Chebyshev ChebyInv(lo,hi,2000,InverseApproximation); - { std::ofstream of("chebyinv"); ChebyInv.csv(of); @@ -78,7 +84,6 @@ int main (int argc, char ** argv) ChebyStep.JacksonSmooth(); - { std::ofstream of("chebystepjack"); ChebyStep.csv(of); @@ -100,5 +105,30 @@ int main (int argc, char ** argv) ChebyNE.csv(of); } + lo=0.0; + hi=4.0; + Chebyshev Cheby32(lo,hi,2000,Approximation32); + { + std::ofstream of("cheby32"); + Cheby32.csv(of); + } + Cheby32.JacksonSmooth(); + { + std::ofstream of("cheby32jack"); + Cheby32.csv(of); + } + + Chebyshev ChebySqrt(lo,hi,2000,Approximation2); + { + std::ofstream of("chebysqrt"); + ChebySqrt.csv(of); + } + ChebySqrt.JacksonSmooth(); + { + std::ofstream of("chebysqrtjack"); + ChebySqrt.csv(of); + } + + Grid_finalize(); } diff --git a/tests/hmc/Test_remez.cc b/tests/hmc/Test_remez.cc index bc851173..5f4b0a25 100644 --- a/tests/hmc/Test_remez.cc +++ b/tests/hmc/Test_remez.cc @@ -38,11 +38,11 @@ int main (int argc, char ** argv) std::cout< Date: Fri, 13 Oct 2017 13:22:26 +0100 Subject: [PATCH 024/145] Final version prior to reunification --- .../BlockImplicitlyRestartedLanczos.h | 45 +++++++++---------- 1 file changed, 21 insertions(+), 24 deletions(-) diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h index 90d45193..de3f1790 100644 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h @@ -35,9 +35,6 @@ Author: Christoph Lehner //#include #include -#include -#include -#include namespace Grid { @@ -178,7 +175,7 @@ class BlockImplicitlyRestartedLanczos { //////////////////////////////// // Embedded objects //////////////////////////////// - SortEigen _sort; + // SortEigen _sort; LinearFunction &_HermOp; LinearFunction &_HermOpTest; ///////////////////////// @@ -212,11 +209,10 @@ public: return nn; } - void orthogonalize(Field& w, BasisFieldVector& evec,int k) + void orthogonalize(Field& w, std::vector& evec,int k) { OrthoTime-=usecond()/1e6; - //evec.orthogonalize(w,k); - basisOrthogonalize(evec._v,w,k); + basisOrthogonalize(evec,w,k); normalise(w); OrthoTime+=usecond()/1e6; } @@ -238,7 +234,7 @@ repeat →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM until convergence */ - void calc(std::vector& eval, BasisFieldVector& evec, const Field& src, int& Nconv, bool reverse, int SkipTest) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse, int SkipTest) { GridBase *grid = src._grid; assert(grid == evec[0]._grid); @@ -341,7 +337,8 @@ until convergence ////////////////////////////////// eval2_copy = eval2; - _sort.push(eval2,Nm); + // _sort.push(eval2,Nm); + std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end()); std::cout<0); - // evec.rotate(Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis - basisRotate(evec._v,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis std::cout< //memset +//#include +#include -namespace Grid { +namespace Grid { - enum IRLdiagonalisation { - IRLdiagonaliseWithDSTEGR, - IRLdiagonaliseWithQR, - IRLdiagonaliseWithEigen - }; - -//////////////////////////////////////////////////////////////////////////////// -// Helper class for sorting the evalues AND evectors by Field -// Use pointer swizzle on vectors -//////////////////////////////////////////////////////////////////////////////// template -class SortEigen { - private: - static bool less_lmd(RealD left,RealD right){ - return left > right; - } - static bool less_pair(std::pair& left, - std::pair& right){ - return left.first > (right.first); - } - - public: - void push(std::vector& lmd,std::vector& evec,int N) { - - //////////////////////////////////////////////////////////////////////// - // PAB: FIXME: VERY VERY VERY wasteful: takes a copy of the entire vector set. - // : The vector reorder should be done by pointer swizzle somehow - //////////////////////////////////////////////////////////////////////// - std::vector cpy(lmd.size(),evec[0]._grid); - for(int i=0;i > emod(lmd.size()); +void basisOrthogonalize(std::vector &basis,Field &w,int k) +{ + for(int j=0; j(lmd[i],&cpy[i]); - - partial_sort(emod.begin(),emod.begin()+N,emod.end(),less_pair); - - typename std::vector >::iterator it = emod.begin(); - for(int i=0;ifirst; - evec[i]=*(it->second); - ++it; +template +void basisRotate(std::vector &basis,Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) +{ + typedef typename Field::vector_object vobj; + GridBase* grid = basis[0]._grid; + + parallel_region + { + std::vector < vobj > B(Nm); // Thread private + + parallel_for_internal(int ss=0;ss < grid->oSites();ss++){ + for(int j=j0; j& lmd,int N) { - std::partial_sort(lmd.begin(),lmd.begin()+N,lmd.end(),less_lmd); +} + +template +void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, std::vector& idx) +{ + int vlen = idx.size(); + + assert(vlen>=1); + assert(vlen<=sort_vals.size()); + assert(vlen<=_v.size()); + + for (size_t i=0;i i); + ////////////////////////////////////// + // idx[i] is a table of desired sources giving a permutation. + // + // Swap v[i] with v[idx[i]]. + // + // Find j>i for which _vnew[j] = _vold[i], + // track the move idx[j] => idx[i] + // track the move idx[i] => i + ////////////////////////////////////// + size_t j; + for (j=i;j fabs(thrs); +} + +inline std::vector basisSortGetIndex(std::vector& sort_vals) +{ + std::vector idx(sort_vals.size()); + std::iota(idx.begin(), idx.end(), 0); + + // sort indexes based on comparing values in v + std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) { + return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]); + }); + return idx; +} + +template +void basisSortInPlace(std::vector & _v,std::vector& sort_vals, bool reverse) +{ + std::vector idx = basisSortGetIndex(sort_vals); + if (reverse) + std::reverse(idx.begin(), idx.end()); + + basisReorderInPlace(_v,sort_vals,idx); +} + +// PAB: faster to compute the inner products first then fuse loops. +// If performance critical can improve. +template +void basisDeflate(const std::vector &_v,const std::vector& eval,const Field& src_orig,Field& result) { + result = zero; + assert(_v.size()==eval.size()); + int N = (int)_v.size(); + for (int i=0;i class ImplicitlyRestartedLanczos { - -private: - - int MaxIter; // Max iterations - int Nstop; // Number of evecs checked for convergence - int Nk; // Number of converged sought - int Nm; // Nm -- total number of vectors - RealD eresid; + private: + const RealD small = 1.0e-8; + int MaxIter; + int MinRestart; // Minimum number of restarts; only check for convergence after + int Nstop; // Number of evecs checked for convergence + int Nk; // Number of converged sought + // int Np; // Np -- Number of spare vecs in krylov space // == Nm - Nk + int Nm; // Nm -- total number of vectors IRLdiagonalisation diagonalisation; - //////////////////////////////////// + int orth_period; + + RealD OrthoTime; + RealD eresid, betastp; + //////////////////////////////// // Embedded objects - //////////////////////////////////// - SortEigen _sort; - LinearOperatorBase &_Linop; - OperatorFunction &_poly; - + //////////////////////////////// + LinearFunction &_HermOp; + LinearFunction &_HermOpTest; ///////////////////////// // Constructor ///////////////////////// public: - ImplicitlyRestartedLanczos(LinearOperatorBase &Linop, // op - OperatorFunction & poly, // polynomial - int _Nstop, // really sought vecs - int _Nk, // sought vecs - int _Nm, // total vecs - RealD _eresid, // resid in lmd deficit - int _MaxIter, // Max iterations - IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen ) : - _Linop(Linop), _poly(poly), - Nstop(_Nstop), Nk(_Nk), Nm(_Nm), - eresid(_eresid), MaxIter(_MaxIter), - diagonalisation(_diagonalisation) - { }; + ImplicitlyRestartedLanczos(LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + RealD _betastp, // if beta(k) < betastp: converged + int _MaxIter, // Max iterations + int _MinRestart, int _orth_period = 1, + IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : + _HermOp(HermOp), _HermOpTest(HermOpTest), + Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), + eresid(_eresid), betastp(_betastp), + MaxIter(_MaxIter) , MinRestart(_MinRestart), + orth_period(_orth_period), diagonalisation(_diagonalisation) { }; //////////////////////////////// // Helpers //////////////////////////////// - static RealD normalise(Field& v) + template static RealD normalise(T& v) { RealD nn = norm2(v); nn = sqrt(nn); v = v * (1.0/nn); return nn; } - - void orthogonalize(Field& w, std::vector& evec, int k) + + void orthogonalize(Field& w, std::vector& evec,int k) { - typedef typename Field::scalar_type MyComplex; - MyComplex ip; - - for(int j=0; j& eval, std::vector& evec, const Field& src, int& Nconv) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse, int SkipTest) { + GridBase *grid = src._grid; + assert(grid == evec[0]._grid); - GridBase *grid = evec[0]._grid; - assert(grid == src._grid); - - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout << GridLogMessage <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl; - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout << GridLogMessage <<" -- seek Nk = " << Nk <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- total Nm = " << Nm <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- size of eval = " << eval.size() << std::endl; - std::cout << GridLogMessage <<" -- size of evec = " << evec.size() << std::endl; + GridLogIRL.TimingMode(1); + std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; + std::cout << GridLogIRL <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl; + std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; + std::cout << GridLogIRL <<" -- seek Nk = " << Nk <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- total Nm = " << Nm <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- size of eval = " << eval.size() << std::endl; + std::cout << GridLogIRL <<" -- size of evec = " << evec.size() << std::endl; if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) { - std::cout << GridLogMessage << "Diagonalisation is DSTEGR "< lme(Nm); std::vector lme2(Nm); std::vector eval2(Nm); + std::vector eval2_copy(Nm); + Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); - Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); - - std::vector Iconv(Nm); - std::vector B(Nm,grid); // waste of space replicating - Field f(grid); Field v(grid); - int k1 = 1; int k2 = Nk; - - Nconv = 0; - RealD beta_k; + + Nconv = 0; // Set initial vector evec[0] = src; - std::cout << GridLogMessage <<"norm2(src)= " << norm2(src)<0); + basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + + std::cout<= MinRestart) { + std::cout << GridLogIRL << "Rotation to test convergence " << std::endl; - _Linop.HermOp(B[i],v); + Field ev0_orig(grid); + ev0_orig = evec[0]; - RealD vnum = real(innerProduct(B[i],v)); // HermOp. - RealD vden = norm2(B[i]); - eval2[i] = vnum/vden; - v -= eval2[i]*B[i]; - RealD vv = norm2(v); - - std::cout.precision(13); - std::cout << GridLogMessage << "[" << std::setw(3)<< std::setiosflags(std::ios_base::right) <=Nstop ){ - goto converged; - } - } // end of iter loop - - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout << GridLogError <<" ImplicitlyRestartedLanczos::calc() NOT converged."; - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; + { + std::cout << GridLogIRL << "Test convergence" << std::endl; + Field B(grid); + + for(int j = 0; j=Nstop || beta_k < betastp){ + goto converged; + } + + //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; + { + Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); // Restrict Qt to Nk x Nk + for (int k=0;k0) w -= lme[k-1] * evec[k-1]; - - ComplexD zalph = innerProduct(evec[k],w); // 4. αk:=(wk,vk) + + ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) RealD alph = real(zalph); - - w = w - alph * evec[k];// 5. wk:=wk−αkvk - + + w = w - alph * evec_k;// 5. wk:=wk−αkvk + RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop // 7. vk+1 := wk/βk+1 - + lmd[k] = alph; lme[k] = beta; - - if ( k > 0 ) orthogonalize(w,evec,k); // orthonormalise - if ( k < Nm-1) evec[k+1] = w; - - if ( beta < tiny ) std::cout << GridLogMessage << " beta is tiny "<0 && k % orth_period == 0) { + orthogonalize(w,evec,k); // orthonormalise + std::cout<& lmd, std::vector& lme, int Nk, int Nm, Eigen::MatrixXd & Qt, // Nm x Nm @@ -405,11 +565,12 @@ private: } } } + /////////////////////////////////////////////////////////////////////////// // File could end here if settle on Eigen ??? /////////////////////////////////////////////////////////////////////////// - void qr_decomp(std::vector& lmd, // Nm + void QR_decomp(std::vector& lmd, // Nm std::vector& lme, // Nm int Nk, int Nm, // Nk, Nm Eigen::MatrixXd& Qt, // Nm x Nm matrix @@ -576,51 +737,50 @@ void diagonalize_lapack(std::vector& lmd, #endif } - void diagonalize_QR(std::vector& lmd, std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd & Qt, - GridBase *grid) - { - int Niter = 100*Nm; - int kmin = 1; - int kmax = Nk; - - // (this should be more sophisticated) - for(int iter=0; iter= kmin; --j){ - RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); - if(fabs(lme[j-1])+dds > dds){ - kmax = j+1; - goto continued; - } - } - Niter = iter; - return; - - continued: - for(int j=0; j dds){ - kmin = j+1; - break; - } +void diagonalize_QR(std::vector& lmd, std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd & Qt, + GridBase *grid) +{ + int QRiter = 100*Nm; + int kmin = 1; + int kmax = Nk; + + // (this should be more sophisticated) + for(int iter=0; iter= kmin; --j){ + RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); + if(fabs(lme[j-1])+dds > dds){ + kmax = j+1; + goto continued; + } + } + QRiter = iter; + return; + + continued: + for(int j=0; j dds){ + kmin = j+1; + break; } } - std::cout << GridLogError << "[QL method] Error - Too many iteration: "< Date: Fri, 13 Oct 2017 13:23:07 +0100 Subject: [PATCH 026/145] Logging improvement; reunified the Lanczos codes --- .../BlockImplicitlyRestartedLanczos.h | 789 ------------------ lib/log/Log.cc | 2 +- lib/log/Log.h | 30 +- lib/util/Init.cc | 2 +- tests/lanczos/Test_dwf_compressed_lanczos.cc | 17 +- 5 files changed, 36 insertions(+), 804 deletions(-) delete mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h deleted file mode 100644 index de3f1790..00000000 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockImplicitlyRestartedLanczos.h +++ /dev/null @@ -1,789 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/algorithms/iterative/ImplicitlyRestartedLanczos.h - - Copyright (C) 2015 - -Author: Peter Boyle -Author: paboyle -Author: Chulwoo Jung -Author: Christoph Lehner - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#ifndef GRID_BIRL_H -#define GRID_BIRL_H - -#include //memset -//#include -#include - - -namespace Grid { - -template -void basisOrthogonalize(std::vector &basis,Field &w,int k) -{ - for(int j=0; j -void basisRotate(std::vector &basis,Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) -{ - typedef typename Field::vector_object vobj; - GridBase* grid = basis[0]._grid; - - parallel_region - { - std::vector < vobj > B(Nm); // Thread private - - parallel_for_internal(int ss=0;ss < grid->oSites();ss++){ - for(int j=j0; j -void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, std::vector& idx) -{ - int vlen = idx.size(); - - assert(vlen>=1); - assert(vlen<=sort_vals.size()); - assert(vlen<=_v.size()); - - for (size_t i=0;i i); - ////////////////////////////////////// - // idx[i] is a table of desired sources giving a permutation. - // - // Swap v[i] with v[idx[i]]. - // - // Find j>i for which _vnew[j] = _vold[i], - // track the move idx[j] => idx[i] - // track the move idx[i] => i - ////////////////////////////////////// - size_t j; - for (j=i;j basisSortGetIndex(std::vector& sort_vals) -{ - std::vector idx(sort_vals.size()); - std::iota(idx.begin(), idx.end(), 0); - - // sort indexes based on comparing values in v - std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) { - return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]); - }); - return idx; -} - -template -void basisSortInPlace(std::vector & _v,std::vector& sort_vals, bool reverse) -{ - std::vector idx = basisSortGetIndex(sort_vals); - if (reverse) - std::reverse(idx.begin(), idx.end()); - - basisReorderInPlace(_v,sort_vals,idx); -} - -// PAB: faster to compute the inner products first then fuse loops. -// If performance critical can improve. -template -void basisDeflate(const std::vector &_v,const std::vector& eval,const Field& src_orig,Field& result) { - result = zero; - assert(_v.size()==eval.size()); - int N = (int)_v.size(); - for (int i=0;i -class BlockImplicitlyRestartedLanczos { - private: - const RealD small = 1.0e-8; - int MaxIter; - int MinRestart; // Minimum number of restarts; only check for convergence after - int Nstop; // Number of evecs checked for convergence - int Nk; // Number of converged sought - // int Np; // Np -- Number of spare vecs in krylov space // == Nm - Nk - int Nm; // Nm -- total number of vectors - IRLdiagonalisation diagonalisation; - int orth_period; - - RealD OrthoTime; - RealD eresid, betastp; - //////////////////////////////// - // Embedded objects - //////////////////////////////// - // SortEigen _sort; - LinearFunction &_HermOp; - LinearFunction &_HermOpTest; - ///////////////////////// - // Constructor - ///////////////////////// -public: - BlockImplicitlyRestartedLanczos(LinearFunction & HermOp, - LinearFunction & HermOpTest, - int _Nstop, // sought vecs - int _Nk, // sought vecs - int _Nm, // spare vecs - RealD _eresid, // resid in lmdue deficit - RealD _betastp, // if beta(k) < betastp: converged - int _MaxIter, // Max iterations - int _MinRestart, int _orth_period = 1, - IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : - _HermOp(HermOp), _HermOpTest(HermOpTest), - Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), - eresid(_eresid), betastp(_betastp), - MaxIter(_MaxIter) , MinRestart(_MinRestart), - orth_period(_orth_period), diagonalisation(_diagonalisation) { }; - - //////////////////////////////// - // Helpers - //////////////////////////////// - template static RealD normalise(T& v) - { - RealD nn = norm2(v); - nn = sqrt(nn); - v = v * (1.0/nn); - return nn; - } - - void orthogonalize(Field& w, std::vector& evec,int k) - { - OrthoTime-=usecond()/1e6; - basisOrthogonalize(evec,w,k); - normalise(w); - OrthoTime+=usecond()/1e6; - } - -/* Rudy Arthur's thesis pp.137 ------------------------- -Require: M > K P = M − K † -Compute the factorization AVM = VM HM + fM eM -repeat - Q=I - for i = 1,...,P do - QiRi =HM −θiI Q = QQi - H M = Q †i H M Q i - end for - βK =HM(K+1,K) σK =Q(M,K) - r=vK+1βK +rσK - VK =VM(1:M)Q(1:M,1:K) - HK =HM(1:K,1:K) - →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM -until convergence -*/ - void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse, int SkipTest) - { - GridBase *grid = src._grid; - assert(grid == evec[0]._grid); - - GridLogIRL.TimingMode(1); - std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; - std::cout << GridLogIRL <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl; - std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; - std::cout << GridLogIRL <<" -- seek Nk = " << Nk <<" vectors"<< std::endl; - std::cout << GridLogIRL <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl; - std::cout << GridLogIRL <<" -- total Nm = " << Nm <<" vectors"<< std::endl; - std::cout << GridLogIRL <<" -- size of eval = " << eval.size() << std::endl; - std::cout << GridLogIRL <<" -- size of evec = " << evec.size() << std::endl; - if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) { - std::cout << GridLogIRL << "Diagonalisation is DSTEGR "< lme(Nm); - std::vector lme2(Nm); - std::vector eval2(Nm); - std::vector eval2_copy(Nm); - Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); - - Field f(grid); - Field v(grid); - int k1 = 1; - int k2 = Nk; - RealD beta_k; - - Nconv = 0; - - // Set initial vector - evec[0] = src; - normalise(evec[0]); - - // Initial Nk steps - OrthoTime=0.; - for(int k=0; k0); - basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis - - std::cout<= MinRestart) { - std::cout << GridLogIRL << "Rotation to test convergence " << std::endl; - - Field ev0_orig(grid); - ev0_orig = evec[0]; - - basisRotate(evec,Qt,0,Nk,0,Nk,Nm); - - { - std::cout << GridLogIRL << "Test convergence" << std::endl; - Field B(grid); - - for(int j = 0; j=Nstop || beta_k < betastp){ - goto converged; - } - - //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; - { - Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); // Restrict Qt to Nk x Nk - for (int k=0;k& lmd, - std::vector& lme, - std::vector& evec, - Field& w,int Nm,int k) - { - const RealD tiny = 1.0e-20; - assert( k< Nm ); - - GridStopWatch gsw_op,gsw_o; - - Field& evec_k = evec[k]; - - _HermOp(evec_k,w); - std::cout<0) w -= lme[k-1] * evec[k-1]; - - ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) - RealD alph = real(zalph); - - w = w - alph * evec_k;// 5. wk:=wk−αkvk - - RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop - // 7. vk+1 := wk/βk+1 - - lmd[k] = alph; - lme[k] = beta; - - std::cout<0 && k % orth_period == 0) { - orthogonalize(w,evec,k); // orthonormalise - std::cout<& lmd, std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd & Qt, // Nm x Nm - GridBase *grid) - { - Eigen::MatrixXd TriDiag = Eigen::MatrixXd::Zero(Nk,Nk); - - for(int i=0;i eigensolver(TriDiag); - - for (int i = 0; i < Nk; i++) { - lmd[Nk-1-i] = eigensolver.eigenvalues()(i); - } - for (int i = 0; i < Nk; i++) { - for (int j = 0; j < Nk; j++) { - Qt(Nk-1-i,j) = eigensolver.eigenvectors()(j,i); - } - } - } - - /////////////////////////////////////////////////////////////////////////// - // File could end here if settle on Eigen ??? - /////////////////////////////////////////////////////////////////////////// - - void QR_decomp(std::vector& lmd, // Nm - std::vector& lme, // Nm - int Nk, int Nm, // Nk, Nm - Eigen::MatrixXd& Qt, // Nm x Nm matrix - RealD Dsh, int kmin, int kmax) - { - int k = kmin-1; - RealD x; - - RealD Fden = 1.0/hypot(lmd[k]-Dsh,lme[k]); - RealD c = ( lmd[k] -Dsh) *Fden; - RealD s = -lme[k] *Fden; - - RealD tmpa1 = lmd[k]; - RealD tmpa2 = lmd[k+1]; - RealD tmpb = lme[k]; - - lmd[k] = c*c*tmpa1 +s*s*tmpa2 -2.0*c*s*tmpb; - lmd[k+1] = s*s*tmpa1 +c*c*tmpa2 +2.0*c*s*tmpb; - lme[k] = c*s*(tmpa1-tmpa2) +(c*c-s*s)*tmpb; - x =-s*lme[k+1]; - lme[k+1] = c*lme[k+1]; - - for(int i=0; i& lmd, std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd & Qt, - GridBase *grid) - { - Qt = Eigen::MatrixXd::Identity(Nm,Nm); - if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) { - diagonalize_lapack(lmd,lme,Nk,Nm,Qt,grid); - } else if ( diagonalisation == IRLdiagonaliseWithQR ) { - diagonalize_QR(lmd,lme,Nk,Nm,Qt,grid); - } else if ( diagonalisation == IRLdiagonaliseWithEigen ) { - diagonalize_Eigen(lmd,lme,Nk,Nm,Qt,grid); - } else { - assert(0); - } - } - -#ifdef USE_LAPACK -void LAPACK_dstegr(char *jobz, char *range, int *n, double *d, double *e, - double *vl, double *vu, int *il, int *iu, double *abstol, - int *m, double *w, double *z, int *ldz, int *isuppz, - double *work, int *lwork, int *iwork, int *liwork, - int *info); -#endif - -void diagonalize_lapack(std::vector& lmd, - std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd& Qt, - GridBase *grid) -{ -#ifdef USE_LAPACK - const int size = Nm; - int NN = Nk; - double evals_tmp[NN]; - double evec_tmp[NN][NN]; - memset(evec_tmp[0],0,sizeof(double)*NN*NN); - double DD[NN]; - double EE[NN]; - for (int i = 0; i< NN; i++) { - for (int j = i - 1; j <= i + 1; j++) { - if ( j < NN && j >= 0 ) { - if (i==j) DD[i] = lmd[i]; - if (i==j) evals_tmp[i] = lmd[i]; - if (j==(i-1)) EE[j] = lme[j]; - } - } - } - int evals_found; - int lwork = ( (18*NN) > (1+4*NN+NN*NN)? (18*NN):(1+4*NN+NN*NN)) ; - int liwork = 3+NN*10 ; - int iwork[liwork]; - double work[lwork]; - int isuppz[2*NN]; - char jobz = 'V'; // calculate evals & evecs - char range = 'I'; // calculate all evals - // char range = 'A'; // calculate all evals - char uplo = 'U'; // refer to upper half of original matrix - char compz = 'I'; // Compute eigenvectors of tridiagonal matrix - int ifail[NN]; - int info; - int total = grid->_Nprocessors; - int node = grid->_processor; - int interval = (NN/total)+1; - double vl = 0.0, vu = 0.0; - int il = interval*node+1 , iu = interval*(node+1); - if (iu > NN) iu=NN; - double tol = 0.0; - if (1) { - memset(evals_tmp,0,sizeof(double)*NN); - if ( il <= NN){ - LAPACK_dstegr(&jobz, &range, &NN, - (double*)DD, (double*)EE, - &vl, &vu, &il, &iu, // these four are ignored if second parameteris 'A' - &tol, // tolerance - &evals_found, evals_tmp, (double*)evec_tmp, &NN, - isuppz, - work, &lwork, iwork, &liwork, - &info); - for (int i = iu-1; i>= il-1; i--){ - evals_tmp[i] = evals_tmp[i - (il-1)]; - if (il>1) evals_tmp[i-(il-1)]=0.; - for (int j = 0; j< NN; j++){ - evec_tmp[i][j] = evec_tmp[i - (il-1)][j]; - if (il>1) evec_tmp[i-(il-1)][j]=0.; - } - } - } - { - grid->GlobalSumVector(evals_tmp,NN); - grid->GlobalSumVector((double*)evec_tmp,NN*NN); - } - } - // Safer to sort instead of just reversing it, - // but the document of the routine says evals are sorted in increasing order. - // qr gives evals in decreasing order. - for(int i=0;i& lmd, std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd & Qt, - GridBase *grid) - { - int QRiter = 100*Nm; - int kmin = 1; - int kmax = Nk; - - // (this should be more sophisticated) - for(int iter=0; iter= kmin; --j){ - RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); - if(fabs(lme[j-1])+dds > dds){ - kmax = j+1; - goto continued; - } - } - QRiter = iter; - return; - - continued: - for(int j=0; j dds){ - kmin = j+1; - break; - } - } - } - std::cout << GridLogError << "[QL method] Error - Too many iteration: "<Reset(); + StopWatch->Start(); + } + void TimingMode(int on) { + timing_mode = on; + if(on) { + StopWatch = &LocalStopWatch; + Reset(); + } + } friend std::ostream& operator<< (std::ostream& stream, Logger& log){ @@ -117,10 +131,10 @@ public: stream << log.background()<< std::left << log.topName << log.background()<< " : "; stream << log.colour() << std::left << log.name << log.background() << " : "; if ( log.timestamp ) { - StopWatch.Stop(); - GridTime now = StopWatch.Elapsed(); - if ( log.timing_mode==1 ) StopWatch.Reset(); - StopWatch.Start(); + log.StopWatch->Stop(); + GridTime now = log.StopWatch->Elapsed(); + if ( log.timing_mode==1 ) log.StopWatch->Reset(); + log.StopWatch->Start(); stream << log.evidence()<< now << log.background() << " : " ; } stream << log.colour(); diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 1266d34d..031f8f5a 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -208,7 +208,7 @@ static int Grid_is_initialised = 0; void Grid_init(int *argc,char ***argv) { - GridLogger::StopWatch.Start(); + GridLogger::GlobalStopWatch.Start(); std::string arg; diff --git a/tests/lanczos/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc index 7fe37387..544d0358 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -21,7 +21,14 @@ (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough) */ #include -#include +#include +///////////////////////////////////////////////////////////////////////////// +// The following are now decoupled from the Lanczos and deal with grids. +// Safe to replace functionality +///////////////////////////////////////////////////////////////////////////// +#include +#include +#include #include "FieldVectorIO.h" #include "Params.h" @@ -319,7 +326,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo Op2 = &Op2plain; } ProjectedHermOp,LatticeFermion> Op2nopoly(pr,HermOp); - BlockImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2); + ImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2); src_coarse = 1.0; @@ -350,7 +357,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo ) { - IRL2.calc(eval2,coef,src_coarse,Nconv,true,SkipTest2); + IRL2.calc(eval2,coef._v,src_coarse,Nconv,true,SkipTest2); coef.resize(Nstop2); eval2.resize(Nstop2); @@ -641,7 +648,7 @@ int main (int argc, char ** argv) { } // First round of Lanczos to get low mode basis - BlockImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1); + ImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1); int Nconv; char tag[1024]; @@ -650,7 +657,7 @@ int main (int argc, char ** argv) { if (simple_krylov_basis) { quick_krylov_basis(evec,src,Op1,Nstop1); } else { - IRL1.calc(eval1,evec,src,Nconv,false,1); + IRL1.calc(eval1,evec._v,src,Nconv,false,1); } evec.resize(Nstop1); // and throw away superfluous eval1.resize(Nstop1); From e325929851aa0e26055875a22b39aee39ed186cd Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 13 Oct 2017 14:02:43 +0100 Subject: [PATCH 027/145] ALl codes compile against the new Lanczos call signature --- lib/algorithms/LinearOperator.h | 59 +++++++++++++++++++ lib/algorithms/approx/Chebyshev.h | 35 ----------- .../iterative/ImplicitlyRestartedLanczos.h | 6 +- tests/lanczos/Test_dwf_compressed_lanczos.cc | 30 +--------- tests/lanczos/Test_dwf_lanczos.cc | 11 ++-- tests/lanczos/Test_synthetic_lanczos.cc | 10 ++-- tests/lanczos/Test_wilson_lanczos.cc | 9 ++- 7 files changed, 82 insertions(+), 78 deletions(-) diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index f1b8820e..0d32cc15 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -346,6 +346,7 @@ namespace Grid { virtual void operator() (const Field &in, Field &out) = 0; }; + ///////////////////////////////////////////////////////////// // Base classes for Multishift solvers for operators ///////////////////////////////////////////////////////////// @@ -368,6 +369,64 @@ namespace Grid { }; */ + //////////////////////////////////////////////////////////////////////////////////////////// + // Hermitian operator Linear function and operator function + //////////////////////////////////////////////////////////////////////////////////////////// + template + class HermOpOperatorFunction : public OperatorFunction { + void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { + Linop.HermOp(in,out); + }; + }; + + template + class PlainHermOp : public LinearFunction { + public: + LinearOperatorBase &_Linop; + + PlainHermOp(LinearOperatorBase& linop) : _Linop(linop) + {} + + void operator()(const Field& in, Field& out) { + _Linop.HermOp(in,out); + } + }; + + template + class FunctionHermOp : public LinearFunction { + public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + + FunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop) + : _poly(poly), _Linop(linop) {}; + + void operator()(const Field& in, Field& out) { + _poly(_Linop,in,out); + } + }; + + template + class Polynomial : public OperatorFunction { + private: + std::vector Coeffs; + public: + Polynomial(std::vector &_Coeffs) : Coeffs(_Coeffs) { }; + + // Implement the required interface + void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { + + Field AtoN(in._grid); + Field Mtmp(in._grid); + AtoN = in; + out = AtoN*Coeffs[0]; + for(int n=1;n namespace Grid { - //////////////////////////////////////////////////////////////////////////////////////////// - // Simple general polynomial with user supplied coefficients - //////////////////////////////////////////////////////////////////////////////////////////// - template - class HermOpOperatorFunction : public OperatorFunction { - void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { - Linop.HermOp(in,out); - }; - }; - - template - class Polynomial : public OperatorFunction { - private: - std::vector Coeffs; - public: - Polynomial(std::vector &_Coeffs) : Coeffs(_Coeffs) { }; - - // Implement the required interface - void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { - - Field AtoN(in._grid); - Field Mtmp(in._grid); - AtoN = in; - out = AtoN*Coeffs[0]; -// std::cout <<"Poly in " <& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse, int SkipTest) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=true, int SkipTest=0) { GridBase *grid = src._grid; assert(grid == evec[0]._grid); diff --git a/tests/lanczos/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc index 544d0358..10d6c3ae 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -100,19 +100,6 @@ void write_history(char* fn, std::vector& hist) { fclose(f); } -template -class FunctionHermOp : public LinearFunction { -public: - OperatorFunction & _poly; - LinearOperatorBase &_Linop; - - FunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop) { - } - - void operator()(const Field& in, Field& out) { - _poly(_Linop,in,out); - } -}; template class CheckpointedLinearFunction : public LinearFunction { @@ -268,19 +255,6 @@ public: } }; -template -class PlainHermOp : public LinearFunction { -public: - LinearOperatorBase &_Linop; - - PlainHermOp(LinearOperatorBase& linop) : _Linop(linop) { - } - - void operator()(const Field& in, Field& out) { - _Linop.HermOp(in,out); - } -}; - template using CoarseSiteFieldGeneral = iScalar< iVector >; template using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >; template using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >; @@ -326,7 +300,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo Op2 = &Op2plain; } ProjectedHermOp,LatticeFermion> Op2nopoly(pr,HermOp); - ImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,betastp2,MaxIt,MinRes2); + ImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,MaxIt,betastp2,MinRes2); src_coarse = 1.0; @@ -648,7 +622,7 @@ int main (int argc, char ** argv) { } // First round of Lanczos to get low mode basis - ImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,betastp1,MaxIt,MinRes1); + ImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,MaxIt,betastp1,MinRes1); int Nconv; char tag[1024]; diff --git a/tests/lanczos/Test_dwf_lanczos.cc b/tests/lanczos/Test_dwf_lanczos.cc index 1dd5dae3..b1e205cf 100644 --- a/tests/lanczos/Test_dwf_lanczos.cc +++ b/tests/lanczos/Test_dwf_lanczos.cc @@ -84,11 +84,12 @@ int main (int argc, char ** argv) std::vector Coeffs { 0.,-1.}; Polynomial PolyX(Coeffs); - Chebyshev Cheb(0.2,5.,11); -// ChebyshevLanczos Cheb(9.,1.,0.,20); -// Cheb.csv(std::cout); -// exit(-24); - ImplicitlyRestartedLanczos IRL(HermOp,Cheb,Nstop,Nk,Nm,resid,MaxIt); + Chebyshev Cheby(0.2,5.,11); + + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op (HermOp); + + ImplicitlyRestartedLanczos IRL(OpCheby,Op,Nstop,Nk,Nm,resid,MaxIt); std::vector eval(Nm); diff --git a/tests/lanczos/Test_synthetic_lanczos.cc b/tests/lanczos/Test_synthetic_lanczos.cc index 32fd6f32..4be9ca31 100644 --- a/tests/lanczos/Test_synthetic_lanczos.cc +++ b/tests/lanczos/Test_synthetic_lanczos.cc @@ -119,12 +119,13 @@ int main (int argc, char ** argv) RealD beta = 0.1; RealD mu = 0.0; int order = 11; - ChebyshevLanczos Cheby(alpha,beta,mu,order); + Chebyshev Cheby(alpha,beta,order); std::ofstream file("cheby.dat"); Cheby.csv(file); - HermOpOperatorFunction X; DumbOperator HermOp(grid); + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op(HermOp); const int Nk = 40; const int Nm = 80; @@ -133,8 +134,9 @@ int main (int argc, char ** argv) int Nconv; RealD eresid = 1.0e-6; - ImplicitlyRestartedLanczos IRL(HermOp,X,Nk,Nk,Nm,eresid,Nit); - ImplicitlyRestartedLanczos ChebyIRL(HermOp,Cheby,Nk,Nk,Nm,eresid,Nit); + + ImplicitlyRestartedLanczos IRL(Op,Op,Nk,Nk,Nm,eresid,Nit); + ImplicitlyRestartedLanczos ChebyIRL(OpCheby,Op,Nk,Nk,Nm,eresid,Nit); LatticeComplex src(grid); gaussian(RNG,src); { diff --git a/tests/lanczos/Test_wilson_lanczos.cc b/tests/lanczos/Test_wilson_lanczos.cc index e8549234..eabc86d7 100644 --- a/tests/lanczos/Test_wilson_lanczos.cc +++ b/tests/lanczos/Test_wilson_lanczos.cc @@ -86,9 +86,12 @@ int main(int argc, char** argv) { std::vector Coeffs{0, 1.}; Polynomial PolyX(Coeffs); - Chebyshev Cheb(0.0, 10., 12); - ImplicitlyRestartedLanczos IRL(HermOp, PolyX, Nstop, Nk, Nm, - resid, MaxIt); + Chebyshev Cheby(0.0, 10., 12); + + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op (HermOp); + + ImplicitlyRestartedLanczos IRL(OpCheby, Op, Nstop, Nk, Nm, resid, MaxIt); std::vector eval(Nm); FermionField src(FGrid); From 27936900e63c51aa6d52d2cb2cfdfa2936adbbd3 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Wed, 18 Oct 2017 13:08:09 +0100 Subject: [PATCH 028/145] Putting the FG verbosity in the Integrator level --- lib/qcd/hmc/integrators/Integrator_algorithm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/qcd/hmc/integrators/Integrator_algorithm.h b/lib/qcd/hmc/integrators/Integrator_algorithm.h index ecc125ef..13a37aeb 100644 --- a/lib/qcd/hmc/integrators/Integrator_algorithm.h +++ b/lib/qcd/hmc/integrators/Integrator_algorithm.h @@ -231,7 +231,7 @@ class ForceGradient : public Integrator Date: Wed, 25 Oct 2017 10:24:14 +0100 Subject: [PATCH 029/145] Solving again the MPI comm bug with FFTs --- lib/cartesian/Cartesian_base.h | 2 ++ lib/cartesian/Cartesian_full.h | 2 ++ lib/cartesian/Cartesian_red_black.h | 2 ++ lib/communicator/Communicator_base.h | 1 + lib/communicator/Communicator_mpi.cc | 6 ++++++ lib/communicator/Communicator_mpit.cc | 6 ++++++ lib/communicator/Communicator_none.cc | 2 ++ 7 files changed, 21 insertions(+) diff --git a/lib/cartesian/Cartesian_base.h b/lib/cartesian/Cartesian_base.h index 324772c5..6aa0e3c7 100644 --- a/lib/cartesian/Cartesian_base.h +++ b/lib/cartesian/Cartesian_base.h @@ -52,6 +52,8 @@ public: GridBase(const std::vector & processor_grid, const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {}; + virtual ~GridBase() = default; + // Physics Grid information. std::vector _simd_layout;// Which dimensions get relayed out over simd lanes. std::vector _fdimensions;// (full) Global dimensions of array prior to cb removal diff --git a/lib/cartesian/Cartesian_full.h b/lib/cartesian/Cartesian_full.h index a6a85ab7..c7ea68c9 100644 --- a/lib/cartesian/Cartesian_full.h +++ b/lib/cartesian/Cartesian_full.h @@ -81,6 +81,8 @@ public: Init(dimensions,simd_layout,processor_grid); } + virtual ~GridCartesian() = default; + void Init(const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid) diff --git a/lib/cartesian/Cartesian_red_black.h b/lib/cartesian/Cartesian_red_black.h index f89cacc5..166c8491 100644 --- a/lib/cartesian/Cartesian_red_black.h +++ b/lib/cartesian/Cartesian_red_black.h @@ -133,6 +133,8 @@ public: { Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim) ; } + + virtual ~GridRedBlackCartesian() = default; #if 0 //////////////////////////////////////////////////////////// // Create redblack grid ;; deprecate these. Should not diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 8ff22dbd..22c9e4d0 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -155,6 +155,7 @@ class CartesianCommunicator { //////////////////////////////////////////////// CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent); CartesianCommunicator(const std::vector &pdimensions_in); + virtual ~CartesianCommunicator(); private: #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 678e4517..f1dad1e9 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -52,6 +52,12 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); ShmInitGeneric(); } + +CartesianCommunicator::~CartesianCommunicator(){ +  if (communicator && !MPI::Is_finalized()) +  MPI_Comm_free(&communicator); +} + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index 5137c27b..2d257a44 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -53,6 +53,12 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } +CartesianCommunicator::~CartesianCommunicator(){ +  if (communicator && !MPI::Is_finalized()) +  MPI_Comm_free(&communicator); +} + + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index e9d71a15..629a3e4a 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -56,6 +56,8 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } +CartesianCommunicator::~CartesianCommunicator(){} + void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSum(double &){} From 8a3aae98f6ffba03dcb85e1be23cd387a510e35d Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Wed, 25 Oct 2017 10:34:49 +0100 Subject: [PATCH 030/145] Solving minor bug in compilation --- lib/communicator/Communicator_mpi.cc | 7 ++++--- lib/communicator/Communicator_mpit.cc | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index f1dad1e9..5a2dc4d0 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -53,9 +53,10 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } -CartesianCommunicator::~CartesianCommunicator(){ -  if (communicator && !MPI::Is_finalized()) -  MPI_Comm_free(&communicator); +CartesianCommunicator::~CartesianCommunicator() +{ + if (communicator && !MPI::Is_finalized()) + MPI_Comm_free(&communicator); } void CartesianCommunicator::GlobalSum(uint32_t &u){ diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index 2d257a44..15ee13fd 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -53,9 +53,10 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } -CartesianCommunicator::~CartesianCommunicator(){ -  if (communicator && !MPI::Is_finalized()) -  MPI_Comm_free(&communicator); +CartesianCommunicator::~CartesianCommunicator() +{ + if (communicator && !MPI::Is_finalized()) + MPI_Comm_free(&communicator); } From 28ba8a0f481f0451b5dc22691fe0ad35963af55a Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:45:57 +0100 Subject: [PATCH 031/145] Force spacing more nicely --- lib/log/Log.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/log/Log.h b/lib/log/Log.h index 1b4732ab..ddff4c1d 100644 --- a/lib/log/Log.h +++ b/lib/log/Log.h @@ -135,7 +135,7 @@ public: GridTime now = log.StopWatch->Elapsed(); if ( log.timing_mode==1 ) log.StopWatch->Reset(); log.StopWatch->Start(); - stream << log.evidence()<< now << log.background() << " : " ; + stream << log.evidence()<< std::setw(6)< Date: Wed, 25 Oct 2017 23:46:33 +0100 Subject: [PATCH 032/145] Improvements for coarse grid compressed lanczos --- lib/algorithms/CoarsenedMatrix.h | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lib/algorithms/CoarsenedMatrix.h b/lib/algorithms/CoarsenedMatrix.h index c2910151..8af8d7ac 100644 --- a/lib/algorithms/CoarsenedMatrix.h +++ b/lib/algorithms/CoarsenedMatrix.h @@ -103,29 +103,32 @@ namespace Grid { GridBase *CoarseGrid; GridBase *FineGrid; std::vector > subspace; + int checkerboard; - Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid) : - CoarseGrid(_CoarseGrid), + Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) : + CoarseGrid(_CoarseGrid), FineGrid(_FineGrid), - subspace(nbasis,_FineGrid) + subspace(nbasis,_FineGrid), + checkerboard(_checkerboard) { }; void Orthogonalise(void){ CoarseScalar InnerProd(CoarseGrid); + std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"< pokey(CoarseGrid); - - for(int i=0;ioSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ eProj._odata[ss](i)=CComplex(1.0); } eProj=eProj - iProj; @@ -137,6 +140,7 @@ namespace Grid { blockProject(CoarseVec,FineVec,subspace); } void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ + FineVec.checkerboard = subspace[0].checkerboard; blockPromote(CoarseVec,FineVec,subspace); } void CreateSubspaceRandom(GridParallelRNG &RNG){ @@ -147,6 +151,7 @@ namespace Grid { Orthogonalise(); } + /* virtual void CreateSubspaceLanczos(GridParallelRNG &RNG,LinearOperatorBase &hermop,int nn=nbasis) { // Run a Lanczos with sloppy convergence @@ -195,7 +200,7 @@ namespace Grid { std::cout << GridLogMessage <<"subspace["< &hermop,int nn=nbasis) { RealD scale; From d83868fdbbc6a3e9f67c966a190d517a2fb7f9f7 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:47:10 +0100 Subject: [PATCH 033/145] Identity linear op added -- useful in circumstances where a linear op may or may not be needed. Supply a trivial one if not needed --- lib/algorithms/LinearOperator.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 0d32cc15..2a68a7b9 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -346,6 +346,13 @@ namespace Grid { virtual void operator() (const Field &in, Field &out) = 0; }; + template class IdentityLinearFunction : public LinearFunction { + public: + void operator() (const Field &in, Field &out){ + out = in; + }; + }; + ///////////////////////////////////////////////////////////// // Base classes for Multishift solvers for operators From f6c3f6bf2d6ff210e25844b64f0d09fe5d074212 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:47:59 +0100 Subject: [PATCH 034/145] XML serialisation of parms and initialise from parms object --- lib/algorithms/approx/Chebyshev.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/algorithms/approx/Chebyshev.h b/lib/algorithms/approx/Chebyshev.h index 7a6e9a9b..b34fac7f 100644 --- a/lib/algorithms/approx/Chebyshev.h +++ b/lib/algorithms/approx/Chebyshev.h @@ -34,6 +34,12 @@ Author: Christoph Lehner namespace Grid { +struct ChebyParams : Serializable { + GRID_SERIALIZABLE_CLASS_MEMBERS(ChebyParams, + RealD, alpha, + RealD, beta, + int, Npoly); +}; //////////////////////////////////////////////////////////////////////////////////////////// // Generic Chebyshev approximations @@ -67,6 +73,7 @@ namespace Grid { }; Chebyshev(){}; + Chebyshev(ChebyParams p){ Init(p.alpha,p.beta,p.Npoly);}; Chebyshev(RealD _lo,RealD _hi,int _order, RealD (* func)(RealD) ) {Init(_lo,_hi,_order,func);}; Chebyshev(RealD _lo,RealD _hi,int _order) {Init(_lo,_hi,_order);}; From a479325349d5eed9351abe5adf267311d8b6d34c Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:48:47 +0100 Subject: [PATCH 035/145] Rewrite of local coherence lanczos --- .../Test_dwf_compressed_lanczos_reorg.cc | 518 ++++++++++++++++++ 1 file changed, 518 insertions(+) create mode 100644 tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc new file mode 100644 index 00000000..a0691116 --- /dev/null +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc @@ -0,0 +1,518 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_dwf_compressed_lanczos_reorg.cc + + Copyright (C) 2017 + +Author: Leans heavily on Christoph Lehner's code +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +/* + * Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features + * in Grid that were intended to be used to support blocked Aggregates, from + */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +struct LanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams, + ChebyParams, Cheby,/*Chebyshev*/ + int, Nstop, /*Vecs in Lanczos must converge Nstop < Nk < Nm*/ + int, Nk, /*Vecs in Lanczos seek converge*/ + int, Nm, /*Total vecs in Lanczos include restart*/ + RealD, resid, /*residual*/ + int, MaxIt, + RealD, betastp, /* ? */ + int, MinRes); // Must restart +}; + +struct CompressedLanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(CompressedLanczosParams, + LanczosParams, FineParams, + LanczosParams, CoarseParams, + ChebyParams, Smoother, + std::vector, blockSize, + std::string, config, + std::vector < std::complex >, omega, + RealD, mass, + RealD, M5 + ); +}; + +// Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function +template +class ProjectedHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedHermOp(LinearOperatorBase& linop, Aggregation &aggregate) : + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + FineField fin(FineGrid); + FineField fout(FineGrid); + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ProjectedFunctionHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, + Aggregation &aggregate) : + _poly(poly), + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + + FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; + FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanczosTester > > +{ + public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearFunction & _Poly; + OperatorFunction & _smoother; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ImplicitlyRestartedLanczosSmoothedTester(LinearFunction &Poly, + OperatorFunction &smoother, + LinearOperatorBase &Linop, + Aggregation &Aggregate) + : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly) { }; + + int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) + { + CoarseField v(B); + RealD eval_poly = eval; + // Apply operator + _Poly(B,v); + + RealD vnum = real(innerProduct(B,v)); // HermOp. + RealD vden = norm2(B); + RealD vv0 = norm2(v); + eval = vnum/vden; + v -= eval*B; + + RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); + + std::cout.precision(13); + std::cout< +class CoarseFineIRL +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice CoarseField; + typedef Lattice FineField; + +private: + GridBase *_CoarseGrid; + GridBase *_FineGrid; + int _checkerboard; + LinearOperatorBase & _FineOp; + + // FIXME replace Aggregation with vector of fine; the code reuse is too small for + // the hassle and complexity of cross coupling. + Aggregation _Aggregate; + std::vector evals_fine; + std::vector evals_coarse; + std::vector evec_coarse; +public: + CoarseFineIRL(GridBase *FineGrid, + GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) : + _CoarseGrid(CoarseGrid), + _FineGrid(FineGrid), + _Aggregate(CoarseGrid,FineGrid,checkerboard), + _FineOp(FineOp), + _checkerboard(checkerboard) + { + evals_fine.resize(0); + evals_coarse.resize(0); + }; + void Orthogonalise(void ) { _Aggregate.Orthogonalise(); } + + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = ::sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void testFine(void) + { + int Nk = nbasis; + _Aggregate.subspace.resize(Nk,_FineGrid); + _Aggregate.subspace[0]=1.0; + _Aggregate.subspace[0].checkerboard=_checkerboard; + normalise(_Aggregate.subspace[0]); + PlainHermOp Op(_FineOp); + for(int k=1;k Cheby(cheby_parms); + FunctionHermOp ChebyOp(Cheby,_FineOp); + PlainHermOp Op(_FineOp); + + evals_fine.resize(Nm); + _Aggregate.subspace.resize(Nm,_FineGrid); + + ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + + FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; + + int Nconv; + IRL.calc(evals_fine,_Aggregate.subspace,src,Nconv,false,0); + + // Shrink down to number saved + assert(Nstop>=nbasis); + assert(Nconv>=nbasis); + evals_fine.resize(nbasis); + _Aggregate.subspace.resize(nbasis,_FineGrid); + } + void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth, + int Nstop, int Nk, int Nm,RealD resid, + RealD MaxIt, RealD betastp, int MinRes) + { + Chebyshev Cheby(cheby_op); + ProjectedHermOp Op(_FineOp,_Aggregate); + ProjectedFunctionHermOp ChebyOp (Cheby,_FineOp,_Aggregate); + ////////////////////////////////////////////////////////////////////////////////////////////////// + // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL + ////////////////////////////////////////////////////////////////////////////////////////////////// + + Chebyshev ChebySmooth(cheby_smooth); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate); + + + evals_coarse.resize(Nm); + evec_coarse.resize(Nm,_CoarseGrid); + + CoarseField src(_CoarseGrid); src=1.0; + + ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + int Nconv=0; + IRL.calc(evals_coarse,evec_coarse,src,Nconv,false,1); + assert(Nconv>=Nstop); + + for (int i=0;i blockSize = Params.blockSize; + + // Grids + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector fineLatt = GridDefaultLatt(); + int dims=fineLatt.size(); + assert(blockSize.size()==dims+1); + std::vector coarseLatt(dims); + std::vector coarseLatt5d ; + + for (int d=0;d HermOp(Ddwf); + + // Eigenvector storage + LanczosParams fine =Params.FineParams; + LanczosParams coarse=Params.CoarseParams; + + const int Ns1 = fine.Nstop; const int Ns2 = coarse.Nstop; + const int Nk1 = fine.Nk; const int Nk2 = coarse.Nk; + const int Nm1 = fine.Nm; const int Nm2 = coarse.Nm; + + std::cout << GridLogMessage << "Keep " << fine.Nstop << " fine vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << coarse.Nstop << " coarse vectors" << std::endl; + assert(Nm2 >= Nm1); + + const int nbasis= 60; + assert(nbasis==Ns1); + CoarseFineIRL IRL(FrbGrid,CoarseGrid5rb,HermOp,Odd); + std::cout << GridLogMessage << "Constructed CoarseFine IRL" << std::endl; + + int do_fine = 1; + int do_coarse = 0; + int do_smooth = 0; + if ( do_fine ) { + std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "< Date: Wed, 25 Oct 2017 23:49:23 +0100 Subject: [PATCH 036/145] 64 bit safe offsets --- lib/parallelIO/BinaryIO.h | 94 +++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 43 deletions(-) diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index d14f3fe2..a2abc9be 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -261,7 +261,7 @@ class BinaryIO { GridBase *grid, std::vector &iodata, std::string file, - int offset, + Integer offset, const std::string &format, int control, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -367,7 +367,7 @@ class BinaryIO { assert(0); #endif } else { - std::cout << GridLogMessage << "C++ read I/O " << file << " : " + std::cout << GridLogMessage << "C++ read I/O " << file << " : " << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ifstream fin; fin.open(file, std::ios::binary | std::ios::in); @@ -444,48 +444,56 @@ class BinaryIO { assert(0); #endif } else { + + std::cout << GridLogMessage << "C++ write I/O " << file << " : " + << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ofstream fout; - fout.exceptions ( std::fstream::failbit | std::fstream::badbit ); - try { - fout.open(file,std::ios::binary|std::ios::out|std::ios::in); - } catch (const std::fstream::failure& exc) { - std::cout << GridLogError << "Error in opening the file " << file << " for output" < &Umu, std::string file, munger munge, - int offset, + Integer offset, const std::string &format, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -552,7 +560,7 @@ class BinaryIO { static inline void writeLatticeObject(Lattice &Umu, std::string file, munger munge, - int offset, + Integer offset, const std::string &format, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -589,7 +597,7 @@ class BinaryIO { static inline void readRNG(GridSerialRNG &serial, GridParallelRNG ¶llel, std::string file, - int offset, + Integer offset, uint32_t &nersc_csum, uint32_t &scidac_csuma, uint32_t &scidac_csumb) @@ -651,7 +659,7 @@ class BinaryIO { static inline void writeRNG(GridSerialRNG &serial, GridParallelRNG ¶llel, std::string file, - int offset, + Integer offset, uint32_t &nersc_csum, uint32_t &scidac_csuma, uint32_t &scidac_csumb) From 66295b99aada692f68c6547ce7d435e8d7df9e66 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:50:05 +0100 Subject: [PATCH 037/145] Bit less verbose SciDAC IO --- lib/parallelIO/IldgIO.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/parallelIO/IldgIO.h b/lib/parallelIO/IldgIO.h index ba71153d..1f2b7c90 100644 --- a/lib/parallelIO/IldgIO.h +++ b/lib/parallelIO/IldgIO.h @@ -147,7 +147,7 @@ namespace QCD { _scidacRecord = sr; - std::cout << GridLogMessage << "Build SciDAC datatype " <_gsites; createLimeRecordHeader(record_name, 0, 0, PayloadSize); - // std::cout << "W sizeof(sobj)" <_gsites< xmlc(nbytes+1,'\0'); limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR); - std::cout << GridLogMessage<< "Non binary record :" < Date: Wed, 25 Oct 2017 23:50:37 +0100 Subject: [PATCH 038/145] Better error messaging --- lib/serialisation/XmlIO.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/serialisation/XmlIO.cc b/lib/serialisation/XmlIO.cc index a132a2f0..c0c45adc 100644 --- a/lib/serialisation/XmlIO.cc +++ b/lib/serialisation/XmlIO.cc @@ -68,10 +68,10 @@ std::string XmlWriter::XmlString(void) XmlReader::XmlReader(const char *xmlstring,string toplev) : fileName_("") { pugi::xml_parse_result result; - result = doc_.load_string(xmlstring); + result = doc_.load_file(xmlstring); if ( !result ) { - cerr << "XML error description: " << result.description() << "\n"; - cerr << "XML error offset : " << result.offset << "\n"; + cerr << "XML error description: char * " << result.description() << " "<< xmlstring << "\n"; + cerr << "XML error offset : char * " << result.offset << " "< Date: Wed, 25 Oct 2017 23:51:18 +0100 Subject: [PATCH 039/145] Red black friendly coarsening --- lib/lattice/Lattice_transfer.h | 54 ++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index 713a8788..48688e43 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -109,8 +109,8 @@ inline void blockProject(Lattice > &coarseData, coarseData=zero; - // Loop with a cache friendly loop ordering - for(int sf=0;sfoSites();sf++){ + // Loop over coars parallel, and then loop over fine associated with coarse. + parallel_for(int sf=0;sfoSites();sf++){ int sc; std::vector coor_c(_ndimension); @@ -119,8 +119,9 @@ inline void blockProject(Lattice > &coarseData, for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); +PARALLEL_CRITICAL for(int i=0;i &fineZ, GridBase * coarse= coarseA._grid; fineZ.checkerboard=fineX.checkerboard; + assert(fineX.checkerboard==fineY.checkerboard); subdivides(coarse,fine); // require they map conformable(fineX,fineY); conformable(fineX,fineZ); @@ -180,9 +182,10 @@ template GridBase *coarse(CoarseInner._grid); GridBase *fine (fineX._grid); - Lattice fine_inner(fine); + Lattice fine_inner(fine); fine_inner.checkerboard = fineX.checkerboard; Lattice coarse_inner(coarse); + // Precision promotion? fine_inner = localInnerProduct(fineX,fineY); blockSum(coarse_inner,fine_inner); parallel_for(int ss=0;ssoSites();ss++){ @@ -193,7 +196,7 @@ template inline void blockNormalise(Lattice &ip,Lattice &fineX) { GridBase *coarse = ip._grid; - Lattice zz(fineX._grid); zz=zero; + Lattice zz(fineX._grid); zz=zero; zz.checkerboard=fineX.checkerboard; blockInnerProduct(ip,fineX,fineX); ip = pow(ip,-0.5); blockZAXPY(fineX,ip,fineX,zz); @@ -216,19 +219,25 @@ inline void blockSum(Lattice &coarseData,const Lattice &fineData) block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d]; } + // Turn this around to loop threaded over sc and interior loop + // over sf would thread better coarseData=zero; - for(int sf=0;sfoSites();sf++){ - + parallel_region { + int sc; std::vector coor_c(_ndimension); std::vector coor_f(_ndimension); - Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); - for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; - Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); - - coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf]; + parallel_for_internal(int sf=0;sfoSites();sf++){ + + Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); + for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; + Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); + +PARALLEL_CRITICAL + coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf]; + } } return; } @@ -238,7 +247,7 @@ inline void blockPick(GridBase *coarse,const Lattice &unpicked,Lattice zz(fine); + Lattice zz(fine); zz.checkerboard = unpicked.checkerboard; Lattice > fcoor(fine); zz = zero; @@ -303,20 +312,21 @@ inline void blockPromote(const Lattice > &coarseData, } // Loop with a cache friendly loop ordering - for(int sf=0;sfoSites();sf++){ - + parallel_region { int sc; std::vector coor_c(_ndimension); std::vector coor_f(_ndimension); - Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); - for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; - Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); - - for(int i=0;ioSites();sf++){ + Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); + for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; + Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); + + for(int i=0;i Date: Wed, 25 Oct 2017 23:52:47 +0100 Subject: [PATCH 040/145] Use existing functionality where possible --- tests/lanczos/FieldBasisVector.h | 81 ++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 tests/lanczos/FieldBasisVector.h diff --git a/tests/lanczos/FieldBasisVector.h b/tests/lanczos/FieldBasisVector.h new file mode 100644 index 00000000..9a21aa46 --- /dev/null +++ b/tests/lanczos/FieldBasisVector.h @@ -0,0 +1,81 @@ +namespace Grid { + +template +class BasisFieldVector { + public: + int _Nm; + + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + + std::vector _v; // _Nfull vectors + + void report(int n,GridBase* value) { + + std::cout << GridLogMessage << "BasisFieldVector allocated:\n"; + std::cout << GridLogMessage << " Delta N = " << n << "\n"; + std::cout << GridLogMessage << " Size of full vectors (size) = " << + ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n"; + std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl; + + value->Barrier(); + +#ifdef __linux + if (value->IsBoss()) { + system("cat /proc/meminfo"); + } +#endif + + value->Barrier(); + + } + + BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) { + report(Nm,value); + } + + ~BasisFieldVector() { + } + + Field& operator[](int i) { + return _v[i]; + } + + void orthogonalize(Field& w, int k) { + basisOrthogonalize(_v,w,k); + } + + void rotate(Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) { + basisRotate(_v,Qt,j0,j1,k0,k1,Nm); + } + + size_t size() const { + return _Nm; + } + + void resize(int n) { + if (n > _Nm) + _v.reserve(n); + + _v.resize(n,_v[0]._grid); + + if (n < _Nm) + _v.shrink_to_fit(); + + report(n - _Nm,_v[0]._grid); + + _Nm = n; + } + + void sortInPlace(std::vector& sort_vals, bool reverse) { + basisSortInPlace(_v,sort_vals,reverse); + } + + void deflate(const std::vector& eval,const Field& src_orig,Field& result) { + basisDeflate(_v,eval,src_orig,result); + } + + }; +} From e4d461cb03ee3b039345c3c4ec29704dec5c8d94 Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:53:19 +0100 Subject: [PATCH 041/145] Messagign --- tests/lanczos/Test_dwf_compressed_lanczos.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/lanczos/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc index 10d6c3ae..a6eb95e9 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -26,9 +26,9 @@ // The following are now decoupled from the Lanczos and deal with grids. // Safe to replace functionality ///////////////////////////////////////////////////////////////////////////// -#include -#include -#include +#include "BlockedGrid.h" +#include "FieldBasisVector.h" +#include "BlockProjector.h" #include "FieldVectorIO.h" #include "Params.h" @@ -431,6 +431,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo auto result = src_orig; // undeflated solve + std::cout << GridLogMessage << " Undeflated solve "<IsBoss()) @@ -438,6 +439,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo // CG.ResHistory.clear(); // deflated solve with all eigenvectors + std::cout << GridLogMessage << " Deflated solve with all evectors"<& pr,RealD alpha2,RealD beta,int Npo // CG.ResHistory.clear(); // deflated solve with non-blocked eigenvectors + std::cout << GridLogMessage << " Deflated solve with non-blocked evectors"<& pr,RealD alpha2,RealD beta,int Npo // CG.ResHistory.clear(); // deflated solve with all eigenvectors and original eigenvalues from proj + std::cout << GridLogMessage << " Deflated solve with all eigenvectors and original eigenvalues from proj"< Date: Wed, 25 Oct 2017 23:53:44 +0100 Subject: [PATCH 042/145] Faster converge time --- tests/solver/Test_dwf_mrhs_cg.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/solver/Test_dwf_mrhs_cg.cc b/tests/solver/Test_dwf_mrhs_cg.cc index d9215db2..079fa85a 100644 --- a/tests/solver/Test_dwf_mrhs_cg.cc +++ b/tests/solver/Test_dwf_mrhs_cg.cc @@ -190,7 +190,7 @@ int main (int argc, char ** argv) MdagMLinearOperator HermOp(Ddwf); MdagMLinearOperator HermOpCk(Dchk); - ConjugateGradient CG((1.0e-8/(me+1)),10000); + ConjugateGradient CG((1.0e-5/(me+1)),10000); s_res = zero; CG(HermOp,s_src,s_res); From d577211cc376303d88355df5bb101ff8aaf6f9ab Mon Sep 17 00:00:00 2001 From: paboyle Date: Wed, 25 Oct 2017 23:57:54 +0100 Subject: [PATCH 043/145] Relax stoppign condition --- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index 90969b85..fbc6dd32 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -113,7 +113,7 @@ int main (int argc, char ** argv) MdagMLinearOperator HermOp(Ddwf); MdagMLinearOperator HermOpCk(Dchk); - ConjugateGradient CG((1.0e-8/(me+1)),10000); + ConjugateGradient CG((1.0e-5/(me+1)),10000); s_res = zero; CG(HermOp,s_src,s_res); From e9be293444039051630aca103ae861b51cf242a5 Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 26 Oct 2017 01:59:30 +0100 Subject: [PATCH 044/145] Better messaging --- lib/parallelIO/BinaryIO.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index a2abc9be..b40a75af 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -356,7 +356,7 @@ class BinaryIO { if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) { #ifdef USE_MPI_IO - std::cout<< GridLogMessage<< "MPI read I/O "<< file<< std::endl; + std::cout<< GridLogMessage<<"IOobject: MPI read I/O "<< file<< std::endl; ierr=MPI_File_open(grid->communicator,(char *) file.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); assert(ierr==0); ierr=MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL); assert(ierr==0); ierr=MPI_File_read_all(fh, &iodata[0], 1, localArray, &status); assert(ierr==0); @@ -367,7 +367,7 @@ class BinaryIO { assert(0); #endif } else { - std::cout << GridLogMessage << "C++ read I/O " << file << " : " + std::cout << GridLogMessage <<"IOobject: C++ read I/O " << file << " : " << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ifstream fin; fin.open(file, std::ios::binary | std::ios::in); @@ -413,9 +413,9 @@ class BinaryIO { timer.Start(); if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) { #ifdef USE_MPI_IO - std::cout << GridLogMessage << "MPI write I/O " << file << std::endl; + std::cout << GridLogMessage <<"IOobject: MPI write I/O " << file << std::endl; ierr = MPI_File_open(grid->communicator, (char *)file.c_str(), MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh); - std::cout << GridLogMessage << "Checking for errors" << std::endl; + // std::cout << GridLogMessage << "Checking for errors" << std::endl; if (ierr != MPI_SUCCESS) { char error_string[BUFSIZ]; @@ -445,7 +445,7 @@ class BinaryIO { #endif } else { - std::cout << GridLogMessage << "C++ write I/O " << file << " : " + std::cout << GridLogMessage << "IOobject: C++ write I/O " << file << " : " << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ofstream fout; From ccd20df8276fa1951f7d6489bce95c3a65de57eb Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 26 Oct 2017 01:59:59 +0100 Subject: [PATCH 045/145] Better IRL interface --- tests/lanczos/BlockProjector.h | 143 +++++++ tests/lanczos/BlockedGrid.h | 401 ++++++++++++++++++ tests/lanczos/Test_dwf_compressed_lanczos.cc | 4 +- .../Test_dwf_compressed_lanczos_reorg.cc | 21 +- 4 files changed, 557 insertions(+), 12 deletions(-) create mode 100644 tests/lanczos/BlockProjector.h create mode 100644 tests/lanczos/BlockedGrid.h diff --git a/tests/lanczos/BlockProjector.h b/tests/lanczos/BlockProjector.h new file mode 100644 index 00000000..6becaa66 --- /dev/null +++ b/tests/lanczos/BlockProjector.h @@ -0,0 +1,143 @@ +namespace Grid { + +/* + BlockProjector + + If _HP_BLOCK_PROJECTORS_ is defined, we assume that _evec is a basis that is not + fully orthonormalized (to the precision of the coarse field) and we allow for higher-precision + coarse field than basis field. + +*/ +//#define _HP_BLOCK_PROJECTORS_ + +template +class BlockProjector { +public: + + BasisFieldVector& _evec; + BlockedGrid& _bgrid; + + BlockProjector(BasisFieldVector& evec, BlockedGrid& bgrid) : _evec(evec), _bgrid(bgrid) { + } + + void createOrthonormalBasis(RealD thres = 0.0) { + + GridStopWatch sw; + sw.Start(); + + int cnt = 0; + +#pragma omp parallel shared(cnt) + { + int lcnt = 0; + +#pragma omp for + for (int b=0;b<_bgrid._o_blocks;b++) { + + for (int i=0;i<_evec._Nm;i++) { + + auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]); + + // |i> -= |j> + for (int j=0;j + void coarseToFine(const CoarseField& in, Field& out) { + + out = zero; + out.checkerboard = _evec._v[0].checkerboard; + + int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + _bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out); + } + } + + } + + template + void fineToCoarse(const Field& in, CoarseField& out) { + + out = zero; + + int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + + + Field tmp(_bgrid._grid); + tmp = in; + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + // |rhs> -= |j> + auto c = _bgrid.block_sp(b,_evec._v[j],tmp); + _bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable + out._odata[b]._internal._internal[j] = c; + } + } + + } + + template + void deflateFine(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + result = zero; + for (int i=0;i + void deflateCoarse(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + CoarseField src_coarse(_coef._v[0]._grid); + CoarseField result_coarse = src_coarse; + result_coarse = zero; + fineToCoarse(src_orig,src_coarse); + for (int i=0;i + void deflate(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + // Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now. + deflateCoarse(_coef,eval,N,src_orig,result); + } + +}; +} diff --git a/tests/lanczos/BlockedGrid.h b/tests/lanczos/BlockedGrid.h new file mode 100644 index 00000000..821272de --- /dev/null +++ b/tests/lanczos/BlockedGrid.h @@ -0,0 +1,401 @@ +namespace Grid { + +template +class BlockedGrid { +public: + GridBase* _grid; + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + + std::vector _bs; // block size + std::vector _nb; // number of blocks + std::vector _l; // local dimensions irrespective of cb + std::vector _l_cb; // local dimensions of checkerboarded vector + std::vector _l_cb_o; // local dimensions of inner checkerboarded vector + std::vector _bs_cb; // block size in checkerboarded vector + std::vector _nb_o; // number of blocks of simd o-sites + + int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites; + + BlockedGrid(GridBase* grid, const std::vector& block_size) : + _grid(grid), _bs(block_size), _nd((int)_bs.size()), + _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size), + _l_cb_o(block_size), _bs_cb(block_size) { + + _blocks = 1; + _o_blocks = 1; + _l = grid->FullDimensions(); + _l_cb = grid->LocalDimensions(); + _l_cb_o = grid->_rdimensions; + + _cf_size = 1; + _block_sites = 1; + for (int i=0;i<_nd;i++) { + _l[i] /= grid->_processors[i]; + + assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize + + int r = _l[i] / _l_cb[i]; + assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize + _bs_cb[i] = _bs[i] / r; + _block_sites *= _bs_cb[i]; + _nb[i] = _l[i] / _bs[i]; + _nb_o[i] = _nb[i] / _grid->_simd_layout[i]; + if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize + std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl; + assert(0); + } + _blocks *= _nb[i]; + _o_blocks *= _nb_o[i]; + _cf_size *= _l[i]; + } + + _cf_size *= 12 / 2; + _cf_block_size = _cf_size / _blocks; + _cf_o_block_size = _cf_size / _o_blocks; + + std::cout << GridLogMessage << "BlockedGrid:" << std::endl; + std::cout << GridLogMessage << " _l = " << _l << std::endl; + std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl; + std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl; + std::cout << GridLogMessage << " _bs = " << _bs << std::endl; + std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl; + + std::cout << GridLogMessage << " _nb = " << _nb << std::endl; + std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl; + std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl; + std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl; + std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl; + std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl; + std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl; + std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl; + std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl; + + // _grid->Barrier(); + //abort(); + } + + void block_to_coor(int b, std::vector& x0) { + + std::vector bcoor; + bcoor.resize(_nd); + x0.resize(_nd); + assert(b < _o_blocks); + Lexicographic::CoorFromIndex(bcoor,b,_nb_o); + int i; + + for (i=0;i<_nd;i++) { + x0[i] = bcoor[i]*_bs_cb[i]; + } + + //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl; + + } + + void block_site_to_o_coor(const std::vector& x0, std::vector& coor, int i) { + Lexicographic::CoorFromIndex(coor,i,_bs_cb); + for (int j=0;j<_nd;j++) + coor[j] += x0[j]; + } + + int block_site_to_o_site(const std::vector& x0, int i) { + std::vector coor; coor.resize(_nd); + block_site_to_o_coor(x0,coor,i); + Lexicographic::IndexFromCoor(coor,i,_l_cb_o); + return i; + } + + vCoeff_t block_sp(int b, const Field& x, const Field& y) { + + std::vector x0; + block_to_coor(b,x0); + + vCoeff_t ret = 0.0; + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss])); + } + + return ret; + + } + + vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) { + + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + std::vector< ComplexD > ret(nsimd); + for (int i=0;i + void vcaxpy(iScalar& r,const vCoeff_t& a,const iScalar& x,const iScalar& y) { + vcaxpy(r._internal,a,x._internal,y._internal); + } + + template + void vcaxpy(iVector& r,const vCoeff_t& a,const iVector& x,const iVector& y) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]); + } + + } + + void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) { + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + int n = lsize / nsimd; + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l + void vcscale(iScalar& r,const vCoeff_t& a,const iScalar& x) { + vcscale(r._internal,a,x._internal); + } + + template + void vcscale(iVector& r,const vCoeff_t& a,const iVector& x) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcscale(ret._odata[ss],a,ret._odata[ss]); + } + } + + void getCanonicalBlockOffset(int cb, std::vector& x0) { + const int ndim = 5; + assert(_nb.size() == ndim); + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + x0.resize(ndim); + + assert(cb >= 0); + assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]); + + Lexicographic::CoorFromIndex(x0,cb,_nbc); + int i; + + for (i=0;i& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]); + } + } + } + } + + void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + + buf.resize(_cf_block_size * 2); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + buf[2*ti+0] = ld.real(); + buf[2*ti+1] = ld.imag(); + } + } + } + } + + int globalToLocalCanonicalBlock(int slot,const std::vector& src_nodes,int nb) { + // processor coordinate + int _nd = (int)src_nodes.size(); + std::vector _src_nodes = src_nodes; + std::vector pco(_nd); + Lexicographic::CoorFromIndex(pco,slot,_src_nodes); + std::vector cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] }; + + // get local block + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + assert(_nd == 5); + std::vector c_src_local_blocks(_nd); + for (int i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0); + c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i]; + } + std::vector cbcoor(_nd); // coordinate of block in slot in canonical form + Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks); + + // cpco, cbcoor + std::vector clbcoor(_nd); + for (int i=0;i<_nd;i++) { + int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate + int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid + int tpcoor = _grid->_processor_coor[(i+1)%5]; + if (pcoor != tpcoor) + return -1; + clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i + } + + int lnb; + Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc); + //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl; + return lnb; + } + + + }; + +} diff --git a/tests/lanczos/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc index a6eb95e9..45690f05 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -331,7 +331,7 @@ void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npo ) { - IRL2.calc(eval2,coef._v,src_coarse,Nconv,true,SkipTest2); + IRL2.calc(eval2,coef._v,src_coarse,Nconv,true); coef.resize(Nstop2); eval2.resize(Nstop2); @@ -635,7 +635,7 @@ int main (int argc, char ** argv) { if (simple_krylov_basis) { quick_krylov_basis(evec,src,Op1,Nstop1); } else { - IRL1.calc(eval1,evec._v,src,Nconv,false,1); + IRL1.calc(eval1,evec._v,src,Nconv,false); } evec.resize(Nstop1); // and throw away superfluous eval1.resize(Nstop1); diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc index a0691116..8fbbacbc 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc @@ -56,6 +56,7 @@ struct CompressedLanczosParams : Serializable { LanczosParams, FineParams, LanczosParams, CoarseParams, ChebyParams, Smoother, + RealD , coarse_relax_tol, std::vector, blockSize, std::string, config, std::vector < std::complex >, omega, @@ -137,12 +138,13 @@ class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanc OperatorFunction & _smoother; LinearOperatorBase &_Linop; Aggregation &_Aggregate; - + RealD _coarse_relax_tol; ImplicitlyRestartedLanczosSmoothedTester(LinearFunction &Poly, OperatorFunction &smoother, LinearOperatorBase &Linop, - Aggregation &Aggregate) - : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly) { }; + Aggregation &Aggregate, + RealD coarse_relax_tol=5.0e3) + : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly), _coarse_relax_tol(coarse_relax_tol) { }; int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) { @@ -196,7 +198,7 @@ class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanc <<"eval = "< nbasis ) eresid = eresid*_coarse_relax_tol; if( (vv=nbasis); @@ -345,7 +347,7 @@ public: evals_fine.resize(nbasis); _Aggregate.subspace.resize(nbasis,_FineGrid); } - void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth, + void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax, int Nstop, int Nk, int Nm,RealD resid, RealD MaxIt, RealD betastp, int MinRes) { @@ -357,8 +359,7 @@ public: ////////////////////////////////////////////////////////////////////////////////////////////////// Chebyshev ChebySmooth(cheby_smooth); - ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate); - + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); evals_coarse.resize(Nm); evec_coarse.resize(Nm,_CoarseGrid); @@ -367,7 +368,7 @@ public: ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); int Nconv=0; - IRL.calc(evals_coarse,evec_coarse,src,Nconv,false,1); + IRL.calc(evals_coarse,evec_coarse,src,Nconv,false); assert(Nconv>=Nstop); for (int i=0;i Date: Thu, 26 Oct 2017 07:45:56 +0100 Subject: [PATCH 046/145] Update to IRL; getting close to the structure I would like. --- .../iterative/ImplicitlyRestartedLanczos.h | 234 +++++++++++------- 1 file changed, 142 insertions(+), 92 deletions(-) diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index 6d3e0755..4be2715a 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -71,6 +71,23 @@ void basisRotate(std::vector &basis,Eigen::MatrixXd& Qt,int j0, int j1, i } } +// Extract a single rotated vector +template +void basisRotateJ(Field &result,std::vector &basis,Eigen::MatrixXd& Qt,int j, int k0,int k1,int Nm) +{ + typedef typename Field::vector_object vobj; + GridBase* grid = basis[0]._grid; + + result.checkerboard = basis[0].checkerboard; + parallel_for(int ss=0;ss < grid->oSites();ss++){ + vobj B = zero; + for(int k=k0; k void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, std::vector& idx) { @@ -87,9 +104,7 @@ void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, s assert(idx[i] > i); ////////////////////////////////////// // idx[i] is a table of desired sources giving a permutation. - // // Swap v[i] with v[idx[i]]. - // // Find j>i for which _vnew[j] = _vold[i], // track the move idx[j] => idx[i] // track the move idx[i] => i @@ -155,6 +170,49 @@ enum IRLdiagonalisation { ///////////////////////////////////////////////////////////// // Implicitly restarted lanczos ///////////////////////////////////////////////////////////// +template class ImplicitlyRestartedLanczosTester +{ + public: + virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox); + virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox); +}; + +template class ImplicitlyRestartedLanczosHermOpTester : public ImplicitlyRestartedLanczosTester +{ + public: + LinearFunction &_HermOpTest; + ImplicitlyRestartedLanczosHermOpTester(LinearFunction &HermOpTest) : _HermOpTest(HermOpTest) { }; + int ReconstructEval(int j,RealD resid,Field &B, RealD &eval,RealD evalMaxApprox) + { + return TestConvergence(j,resid,B,eval,evalMaxApprox); + } + int TestConvergence(int j,RealD eresid,Field &B, RealD &eval,RealD evalMaxApprox) + { + Field v(B); + RealD eval_poly = eval; + // Apply operator + _HermOpTest(B,v); + + RealD vnum = real(innerProduct(B,v)); // HermOp. + RealD vden = norm2(B); + RealD vv0 = norm2(v); + eval = vnum/vden; + v -= eval*B; + + RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); + + std::cout.precision(13); + std::cout< class ImplicitlyRestartedLanczos { @@ -174,14 +232,19 @@ class ImplicitlyRestartedLanczos { //////////////////////////////// // Embedded objects //////////////////////////////// - LinearFunction &_HermOp; - LinearFunction &_HermOpTest; + LinearFunction &_HermOp; + LinearFunction &_HermOpTest; + ImplicitlyRestartedLanczosTester &_Tester; + // Default tester provided (we need a ref to something in default case) + ImplicitlyRestartedLanczosHermOpTester SimpleTester; ///////////////////////// // Constructor ///////////////////////// + public: ImplicitlyRestartedLanczos(LinearFunction & HermOp, LinearFunction & HermOpTest, + ImplicitlyRestartedLanczosTester & Tester, int _Nstop, // sought vecs int _Nk, // sought vecs int _Nm, // spare vecs @@ -190,7 +253,23 @@ public: RealD _betastp=0.0, // if beta(k) < betastp: converged int _MinRestart=1, int _orth_period = 1, IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : - _HermOp(HermOp), _HermOpTest(HermOpTest), + SimpleTester(HermOpTest), _HermOp(HermOp), _HermOpTest(HermOpTest), _Tester(Tester), + Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), + eresid(_eresid), betastp(_betastp), + MaxIter(_MaxIter) , MinRestart(_MinRestart), + orth_period(_orth_period), diagonalisation(_diagonalisation) { }; + + ImplicitlyRestartedLanczos(LinearFunction & HermOp, + LinearFunction & HermOpTest, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + int _MaxIter, // Max iterations + RealD _betastp=0.0, // if beta(k) < betastp: converged + int _MinRestart=1, int _orth_period = 1, + IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : + SimpleTester(HermOpTest), _HermOp(HermOp), _HermOpTest(HermOpTest), _Tester(SimpleTester), Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), eresid(_eresid), betastp(_betastp), MaxIter(_MaxIter) , MinRestart(_MinRestart), @@ -232,7 +311,7 @@ repeat →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM until convergence */ - void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=true, int SkipTest=0) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=true) { GridBase *grid = src._grid; assert(grid == evec[0]._grid); @@ -335,11 +414,18 @@ until convergence ////////////////////////////////// eval2_copy = eval2; - // _sort.push(eval2,Nm); - std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end()); + std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end(),std::greater()); std::cout<0); - basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + assert(k20); + basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis std::cout<= MinRestart) { - std::cout << GridLogIRL << "Rotation to test convergence " << std::endl; - - Field ev0_orig(grid); - ev0_orig = evec[0]; - - basisRotate(evec,Qt,0,Nk,0,Nk,Nm); - { - std::cout << GridLogIRL << "Test convergence" << std::endl; - Field B(grid); - - for(int j = 0; j Nconv ) { + Nconv=j+1; + jj=Nstop; // Terminate the scan } } - - // test if we converged, if so, terminate - std::cout<=Nstop || beta_k < betastp){ - goto converged; - } - - //B[j] +=Qt[k+_Nm*j] * _v[k]._odata[ss]; - { - Eigen::MatrixXd qm = Eigen::MatrixXd::Zero(Nk,Nk); // Restrict Qt to Nk x Nk - for (int k=0;k= "<=Nstop || beta_k < betastp){ + if( Nconv>=Nstop){ + goto converged; + } + } else { std::cout << GridLogIRL << "iter < MinRestart: do not yet test for convergence\n"; } // end of iter loop @@ -461,24 +510,28 @@ until convergence converged: - if (SkipTest == 1) { - eval = eval2; - } else { - ////////////////////////////////////////////// - // test quickly - // PAB -- what precisely does this test? Don't like this eval2, eval2_copy etc... - ////////////////////////////////////////////// - for (int j=0;j0) w -= lme[k-1] * evec[k-1]; @@ -529,8 +581,6 @@ until convergence lmd[k] = alph; lme[k] = beta; - std::cout<0 && k % orth_period == 0) { orthogonalize(w,evec,k); // orthonormalise std::cout< Date: Thu, 26 Oct 2017 07:47:42 +0100 Subject: [PATCH 047/145] Moving these out of algorithms --- .../BlockProjector.h | 143 ------- .../BlockedGrid.h | 401 ------------------ .../FieldBasisVector.h | 162 ------- 3 files changed, 706 deletions(-) delete mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h delete mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h delete mode 100644 lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h deleted file mode 100644 index 6becaa66..00000000 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockProjector.h +++ /dev/null @@ -1,143 +0,0 @@ -namespace Grid { - -/* - BlockProjector - - If _HP_BLOCK_PROJECTORS_ is defined, we assume that _evec is a basis that is not - fully orthonormalized (to the precision of the coarse field) and we allow for higher-precision - coarse field than basis field. - -*/ -//#define _HP_BLOCK_PROJECTORS_ - -template -class BlockProjector { -public: - - BasisFieldVector& _evec; - BlockedGrid& _bgrid; - - BlockProjector(BasisFieldVector& evec, BlockedGrid& bgrid) : _evec(evec), _bgrid(bgrid) { - } - - void createOrthonormalBasis(RealD thres = 0.0) { - - GridStopWatch sw; - sw.Start(); - - int cnt = 0; - -#pragma omp parallel shared(cnt) - { - int lcnt = 0; - -#pragma omp for - for (int b=0;b<_bgrid._o_blocks;b++) { - - for (int i=0;i<_evec._Nm;i++) { - - auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]); - - // |i> -= |j> - for (int j=0;j - void coarseToFine(const CoarseField& in, Field& out) { - - out = zero; - out.checkerboard = _evec._v[0].checkerboard; - - int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]); - assert(Nbasis == _evec._Nm); - -#pragma omp parallel for - for (int b=0;b<_bgrid._o_blocks;b++) { - for (int j=0;j<_evec._Nm;j++) { - _bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out); - } - } - - } - - template - void fineToCoarse(const Field& in, CoarseField& out) { - - out = zero; - - int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]); - assert(Nbasis == _evec._Nm); - - - Field tmp(_bgrid._grid); - tmp = in; - -#pragma omp parallel for - for (int b=0;b<_bgrid._o_blocks;b++) { - for (int j=0;j<_evec._Nm;j++) { - // |rhs> -= |j> - auto c = _bgrid.block_sp(b,_evec._v[j],tmp); - _bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable - out._odata[b]._internal._internal[j] = c; - } - } - - } - - template - void deflateFine(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { - result = zero; - for (int i=0;i - void deflateCoarse(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { - CoarseField src_coarse(_coef._v[0]._grid); - CoarseField result_coarse = src_coarse; - result_coarse = zero; - fineToCoarse(src_orig,src_coarse); - for (int i=0;i - void deflate(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { - // Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now. - deflateCoarse(_coef,eval,N,src_orig,result); - } - -}; -} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h deleted file mode 100644 index 821272de..00000000 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/BlockedGrid.h +++ /dev/null @@ -1,401 +0,0 @@ -namespace Grid { - -template -class BlockedGrid { -public: - GridBase* _grid; - typedef typename Field::scalar_type Coeff_t; - typedef typename Field::vector_type vCoeff_t; - - std::vector _bs; // block size - std::vector _nb; // number of blocks - std::vector _l; // local dimensions irrespective of cb - std::vector _l_cb; // local dimensions of checkerboarded vector - std::vector _l_cb_o; // local dimensions of inner checkerboarded vector - std::vector _bs_cb; // block size in checkerboarded vector - std::vector _nb_o; // number of blocks of simd o-sites - - int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites; - - BlockedGrid(GridBase* grid, const std::vector& block_size) : - _grid(grid), _bs(block_size), _nd((int)_bs.size()), - _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size), - _l_cb_o(block_size), _bs_cb(block_size) { - - _blocks = 1; - _o_blocks = 1; - _l = grid->FullDimensions(); - _l_cb = grid->LocalDimensions(); - _l_cb_o = grid->_rdimensions; - - _cf_size = 1; - _block_sites = 1; - for (int i=0;i<_nd;i++) { - _l[i] /= grid->_processors[i]; - - assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize - - int r = _l[i] / _l_cb[i]; - assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize - _bs_cb[i] = _bs[i] / r; - _block_sites *= _bs_cb[i]; - _nb[i] = _l[i] / _bs[i]; - _nb_o[i] = _nb[i] / _grid->_simd_layout[i]; - if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize - std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl; - assert(0); - } - _blocks *= _nb[i]; - _o_blocks *= _nb_o[i]; - _cf_size *= _l[i]; - } - - _cf_size *= 12 / 2; - _cf_block_size = _cf_size / _blocks; - _cf_o_block_size = _cf_size / _o_blocks; - - std::cout << GridLogMessage << "BlockedGrid:" << std::endl; - std::cout << GridLogMessage << " _l = " << _l << std::endl; - std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl; - std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl; - std::cout << GridLogMessage << " _bs = " << _bs << std::endl; - std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl; - - std::cout << GridLogMessage << " _nb = " << _nb << std::endl; - std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl; - std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl; - std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl; - std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl; - std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl; - std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl; - std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl; - std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl; - - // _grid->Barrier(); - //abort(); - } - - void block_to_coor(int b, std::vector& x0) { - - std::vector bcoor; - bcoor.resize(_nd); - x0.resize(_nd); - assert(b < _o_blocks); - Lexicographic::CoorFromIndex(bcoor,b,_nb_o); - int i; - - for (i=0;i<_nd;i++) { - x0[i] = bcoor[i]*_bs_cb[i]; - } - - //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl; - - } - - void block_site_to_o_coor(const std::vector& x0, std::vector& coor, int i) { - Lexicographic::CoorFromIndex(coor,i,_bs_cb); - for (int j=0;j<_nd;j++) - coor[j] += x0[j]; - } - - int block_site_to_o_site(const std::vector& x0, int i) { - std::vector coor; coor.resize(_nd); - block_site_to_o_coor(x0,coor,i); - Lexicographic::IndexFromCoor(coor,i,_l_cb_o); - return i; - } - - vCoeff_t block_sp(int b, const Field& x, const Field& y) { - - std::vector x0; - block_to_coor(b,x0); - - vCoeff_t ret = 0.0; - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss])); - } - - return ret; - - } - - vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) { - - std::vector x0; - block_to_coor(b,x0); - - constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); - int lsize = _cf_o_block_size / _block_sites; - - std::vector< ComplexD > ret(nsimd); - for (int i=0;i - void vcaxpy(iScalar& r,const vCoeff_t& a,const iScalar& x,const iScalar& y) { - vcaxpy(r._internal,a,x._internal,y._internal); - } - - template - void vcaxpy(iVector& r,const vCoeff_t& a,const iVector& x,const iVector& y) { - for (int i=0;i x0; - block_to_coor(b,x0); - - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]); - } - - } - - void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) { - std::vector x0; - block_to_coor(b,x0); - - constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); - int lsize = _cf_o_block_size / _block_sites; - - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - - int n = lsize / nsimd; - for (int l=0;l& x) { - std::vector x0; - block_to_coor(b,x0); - - int lsize = _cf_o_block_size / _block_sites; - - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - - for (int l=0;l& x) { - std::vector x0; - block_to_coor(b,x0); - - int lsize = _cf_o_block_size / _block_sites; - - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - - for (int l=0;l - void vcscale(iScalar& r,const vCoeff_t& a,const iScalar& x) { - vcscale(r._internal,a,x._internal); - } - - template - void vcscale(iVector& r,const vCoeff_t& a,const iVector& x) { - for (int i=0;i x0; - block_to_coor(b,x0); - - for (int i=0;i<_block_sites;i++) { // only odd sites - int ss = block_site_to_o_site(x0,i); - vcscale(ret._odata[ss],a,ret._odata[ss]); - } - } - - void getCanonicalBlockOffset(int cb, std::vector& x0) { - const int ndim = 5; - assert(_nb.size() == ndim); - std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; - std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; - x0.resize(ndim); - - assert(cb >= 0); - assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]); - - Lexicographic::CoorFromIndex(x0,cb,_nbc); - int i; - - for (i=0;i& buf) { - std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; - std::vector ldim = v._grid->LocalDimensions(); - std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; - const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; - // take canonical block cb of v and put it in canonical ordering in buf - std::vector cx0; - getCanonicalBlockOffset(cb,cx0); - -#pragma omp parallel - { - std::vector co0,cl0; - co0=cx0; cl0=cx0; - -#pragma omp for - for (int i=0;i<_nbsc;i++) { - Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo - for (int j=0;j<(int)_bsc.size();j++) - cl0[j] = cx0[j] + co0[j]; - - std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; - int oi = v._grid->oIndex(l0); - int ii = v._grid->iIndex(l0); - int lti = i; - - //if (cb < 2 && i<2) - // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; - - for (int s=0;s<4;s++) - for (int c=0;c<3;c++) { - Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; - int ti = 12*lti + 3*s + c; - ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]); - } - } - } - } - - void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector& buf) { - std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; - std::vector ldim = v._grid->LocalDimensions(); - std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; - const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; - // take canonical block cb of v and put it in canonical ordering in buf - std::vector cx0; - getCanonicalBlockOffset(cb,cx0); - - buf.resize(_cf_block_size * 2); - -#pragma omp parallel - { - std::vector co0,cl0; - co0=cx0; cl0=cx0; - -#pragma omp for - for (int i=0;i<_nbsc;i++) { - Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo - for (int j=0;j<(int)_bsc.size();j++) - cl0[j] = cx0[j] + co0[j]; - - std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; - int oi = v._grid->oIndex(l0); - int ii = v._grid->iIndex(l0); - int lti = i; - - //if (cb < 2 && i<2) - // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; - - for (int s=0;s<4;s++) - for (int c=0;c<3;c++) { - Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; - int ti = 12*lti + 3*s + c; - buf[2*ti+0] = ld.real(); - buf[2*ti+1] = ld.imag(); - } - } - } - } - - int globalToLocalCanonicalBlock(int slot,const std::vector& src_nodes,int nb) { - // processor coordinate - int _nd = (int)src_nodes.size(); - std::vector _src_nodes = src_nodes; - std::vector pco(_nd); - Lexicographic::CoorFromIndex(pco,slot,_src_nodes); - std::vector cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] }; - - // get local block - std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; - assert(_nd == 5); - std::vector c_src_local_blocks(_nd); - for (int i=0;i<_nd;i++) { - assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0); - c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i]; - } - std::vector cbcoor(_nd); // coordinate of block in slot in canonical form - Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks); - - // cpco, cbcoor - std::vector clbcoor(_nd); - for (int i=0;i<_nd;i++) { - int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate - int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid - int tpcoor = _grid->_processor_coor[(i+1)%5]; - if (pcoor != tpcoor) - return -1; - clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i - } - - int lnb; - Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc); - //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl; - return lnb; - } - - - }; - -} diff --git a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h b/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h deleted file mode 100644 index 3ad516ef..00000000 --- a/lib/algorithms/iterative/BlockImplicitlyRestartedLanczos/FieldBasisVector.h +++ /dev/null @@ -1,162 +0,0 @@ -namespace Grid { - -template -class BasisFieldVector { - public: - int _Nm; - - typedef typename Field::scalar_type Coeff_t; - typedef typename Field::vector_type vCoeff_t; - typedef typename Field::vector_object vobj; - typedef typename vobj::scalar_object sobj; - - std::vector _v; // _Nfull vectors - - void report(int n,GridBase* value) { - - std::cout << GridLogMessage << "BasisFieldVector allocated:\n"; - std::cout << GridLogMessage << " Delta N = " << n << "\n"; - std::cout << GridLogMessage << " Size of full vectors (size) = " << - ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n"; - std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl; - - value->Barrier(); - - if (value->IsBoss()) { - system("cat /proc/meminfo"); - } - - value->Barrier(); - - } - - BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) { - report(Nm,value); - } - - ~BasisFieldVector() { - } - - Field& operator[](int i) { - return _v[i]; - } - - void orthogonalize(Field& w, int k) { - for(int j=0; j B(Nm); - -#pragma omp for - for(int ss=0;ss < grid->oSites();ss++){ - for(int j=j0; j _Nm) - _v.reserve(n); - - _v.resize(n,_v[0]._grid); - - if (n < _Nm) - _v.shrink_to_fit(); - - report(n - _Nm,_v[0]._grid); - - _Nm = n; - } - - std::vector getIndex(std::vector& sort_vals) { - - std::vector idx(sort_vals.size()); - iota(idx.begin(), idx.end(), 0); - - // sort indexes based on comparing values in v - sort(idx.begin(), idx.end(), - [&sort_vals](int i1, int i2) {return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]);}); - - return idx; - } - - void reorderInPlace(std::vector& sort_vals, std::vector& idx) { - GridStopWatch gsw; - gsw.Start(); - - int nswaps = 0; - for (size_t i=0;i& sort_vals, bool reverse) { - - std::vector idx = getIndex(sort_vals); - if (reverse) - std::reverse(idx.begin(), idx.end()); - - reorderInPlace(sort_vals,idx); - - } - - void deflate(const std::vector& eval,const Field& src_orig,Field& result) { - result = zero; - int N = (int)_v.size(); - for (int i=0;i Date: Thu, 26 Oct 2017 07:48:03 +0100 Subject: [PATCH 048/145] Test for split/unsplit in isolation --- tests/solver/Test_split_grid.cc | 144 ++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 tests/solver/Test_split_grid.cc diff --git a/tests/solver/Test_split_grid.cc b/tests/solver/Test_split_grid.cc new file mode 100644 index 00000000..90969b85 --- /dev/null +++ b/tests/solver/Test_split_grid.cc @@ -0,0 +1,144 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_dwf_mrhs_cg.cc + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Set up N-solvers as trivially parallel + /////////////////////////////////////////////////////////////// + RealD mass=0.01; + RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); + DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< Date: Thu, 26 Oct 2017 16:25:01 +0100 Subject: [PATCH 049/145] Final? candidate for push back on the lanczos reorg feature --- .../Test_dwf_compressed_lanczos_reorg.cc | 33 ++----------------- 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc index 8fbbacbc..ad1aaa47 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc @@ -374,20 +374,6 @@ public: for (int i=0;i Date: Thu, 26 Oct 2017 20:58:46 +0100 Subject: [PATCH 050/145] Staggered updates : Schur fixed and added a unit test for Test_staggered_cg_schur.cc giving stronger check --- lib/algorithms/LinearOperator.h | 2 +- lib/algorithms/iterative/SchurRedBlack.h | 15 ++++- lib/communicator/Communicator_mpi3.cc | 7 ++- lib/communicator/Communicator_mpit.cc | 18 +++--- tests/solver/Test_staggered_cg_prec.cc | 1 - tests/solver/Test_staggered_cg_schur.cc | 76 ++++++++++++++++++++++++ 6 files changed, 103 insertions(+), 16 deletions(-) create mode 100644 tests/solver/Test_staggered_cg_schur.cc diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index f1b8820e..2a757352 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -319,7 +319,7 @@ namespace Grid { Field tmp(in._grid); _Mat.Meooe(in,tmp); _Mat.MooeeInv(tmp,out); - _Mat.MeooeDag(out,tmp); + _Mat.Meooe(out,tmp); _Mat.Mooee(in,out); return axpy_norm(out,-1.0,tmp,out); } diff --git a/lib/algorithms/iterative/SchurRedBlack.h b/lib/algorithms/iterative/SchurRedBlack.h index a309386b..a0fd86a6 100644 --- a/lib/algorithms/iterative/SchurRedBlack.h +++ b/lib/algorithms/iterative/SchurRedBlack.h @@ -55,7 +55,15 @@ Author: Peter Boyle *Odd * i) D_oo psi_o = L^{-1} eta_o * eta_o' = (D_oo)^dag (eta_o - Moe Mee^{-1} eta_e) + * + * Wilson: * (D_oo)^{\dag} D_oo psi_o = (D_oo)^dag L^{-1} eta_o + * Stag: + * D_oo psi_o = L^{-1} eta = (eta_o - Moe Mee^{-1} eta_e) + * + * L^-1 eta_o= (1 0 ) (e + * (-MoeMee^{-1} 1 ) + * *Even * ii) Mee psi_e + Meo psi_o = src_e * @@ -122,18 +130,19 @@ namespace Grid { pickCheckerboard(Odd ,sol_o,out); ///////////////////////////////////////////////////// - // src_o = Mdag * (source_o - Moe MeeInv source_e) + // src_o = (source_o - Moe MeeInv source_e) ///////////////////////////////////////////////////// _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); - _Matrix.Mooee(tmp,src_o); assert(src_o.checkerboard ==Odd); + src_o = tmp; assert(src_o.checkerboard ==Odd); + // _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source ////////////////////////////////////////////////////////////// // Call the red-black solver ////////////////////////////////////////////////////////////// - std::cout< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + +int main (int argc, char ** argv) +{ + typedef typename ImprovedStaggeredFermionR::FermionField FermionField; + typename ImprovedStaggeredFermionR::ImplParams params; + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + GridCartesian Grid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); + + std::vector seeds({1,2,3,4}); + GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); + + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); + + FermionField src(&Grid); random(pRNG,src); + FermionField result(&Grid); result=zero; + FermionField resid(&Grid); + + RealD mass=0.1; + ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass); + + ConjugateGradient CG(1.0e-8,10000); + SchurRedBlackStaggeredSolve SchurSolver(CG); + + SchurSolver(Ds,src,result); + + Grid_finalize(); +} From 0f3e9ae57d4a0cc6f7f8ec1d0fa8e922335aab72 Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 26 Oct 2017 23:29:59 +0100 Subject: [PATCH 051/145] Gsites error. Only appeared (so far) in I/O code for even odd fields --- lib/cartesian/Cartesian_red_black.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/cartesian/Cartesian_red_black.h b/lib/cartesian/Cartesian_red_black.h index f89cacc5..5c50f062 100644 --- a/lib/cartesian/Cartesian_red_black.h +++ b/lib/cartesian/Cartesian_red_black.h @@ -205,6 +205,7 @@ public: { assert((_gdimensions[d] & 0x1) == 0); _gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard + _gsites /= 2; } _ldimensions[d] = _gdimensions[d] / _processors[d]; assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); From 00ebc150ad6a6db27000829c6830ea8b855bacfe Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 26 Oct 2017 23:30:37 +0100 Subject: [PATCH 052/145] Mistake in string parse; interface is ambiguous and must fix. Is char * a file, or a XML buffer ? --- lib/serialisation/XmlIO.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/serialisation/XmlIO.cc b/lib/serialisation/XmlIO.cc index c0c45adc..260611a5 100644 --- a/lib/serialisation/XmlIO.cc +++ b/lib/serialisation/XmlIO.cc @@ -68,10 +68,10 @@ std::string XmlWriter::XmlString(void) XmlReader::XmlReader(const char *xmlstring,string toplev) : fileName_("") { pugi::xml_parse_result result; - result = doc_.load_file(xmlstring); + result = doc_.load_string(xmlstring); if ( !result ) { - cerr << "XML error description: char * " << result.description() << " "<< xmlstring << "\n"; - cerr << "XML error offset : char * " << result.offset << " "< Date: Thu, 26 Oct 2017 23:31:46 +0100 Subject: [PATCH 053/145] Cleaning up --- .../iterative/ImplicitlyRestartedLanczos.h | 48 +++++++++++-------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index 4be2715a..089e7ff3 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -37,6 +37,9 @@ Author: Christoph Lehner namespace Grid { + //////////////////////////////////////////////////////// + // Move following 100 LOC to lattice/Lattice_basis.h + //////////////////////////////////////////////////////// template void basisOrthogonalize(std::vector &basis,Field &w,int k) { @@ -101,7 +104,6 @@ void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, s if (idx[i] != i) { - assert(idx[i] > i); ////////////////////////////////////// // idx[i] is a table of desired sources giving a permutation. // Swap v[i] with v[idx[i]]. @@ -114,8 +116,7 @@ void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, s if (idx[j]==i) break; - assert(j!=idx.size()); - assert(idx[j]==i); + assert(idx[i] > i); assert(j!=idx.size()); assert(idx[j]==i); std::swap(_v[i]._odata,_v[idx[i]]._odata); // should use vector move constructor, no data copy std::swap(sort_vals[i],sort_vals[idx[i]]); @@ -161,12 +162,6 @@ void basisDeflate(const std::vector &_v,const std::vector& eval,co } } -enum IRLdiagonalisation { - IRLdiagonaliseWithDSTEGR, - IRLdiagonaliseWithQR, - IRLdiagonaliseWithEigen -}; - ///////////////////////////////////////////////////////////// // Implicitly restarted lanczos ///////////////////////////////////////////////////////////// @@ -177,6 +172,12 @@ template class ImplicitlyRestartedLanczosTester virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox); }; +enum IRLdiagonalisation { + IRLdiagonaliseWithDSTEGR, + IRLdiagonaliseWithQR, + IRLdiagonaliseWithEigen +}; + template class ImplicitlyRestartedLanczosHermOpTester : public ImplicitlyRestartedLanczosTester { public: @@ -242,6 +243,17 @@ class ImplicitlyRestartedLanczos { ///////////////////////// public: + ////////////////////////////////////////////////////////////////// + // PAB: + ////////////////////////////////////////////////////////////////// + // Too many options & knobs. Do we really need orth_period + // What is the theoretical basis & guarantees of betastp ? + // Nstop=Nk viable? + // MinRestart avoidable with new convergence test? + // Could cut to HermOp, HermOpTest, Tester, Nk, Nm, resid, maxiter (+diagonalisation) + // HermOpTest could be eliminated if we dropped the Power method for max eval. + // -- also: The eval, eval2, eval2_copy stuff is still unnecessarily unclear + ////////////////////////////////////////////////////////////////// ImplicitlyRestartedLanczos(LinearFunction & HermOp, LinearFunction & HermOpTest, ImplicitlyRestartedLanczosTester & Tester, @@ -413,16 +425,14 @@ until convergence // sorting ////////////////////////////////// eval2_copy = eval2; - std::partial_sort(eval2.begin(),eval2.begin()+Nm,eval2.end(),std::greater()); - std::cout<0); basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis - std::cout<& lmd, // Nm std::vector& lme, // Nm int Nk, int Nm, // Nk, Nm From 9ec9850bdb49548238b1cb253c82bfeee3823683 Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 26 Oct 2017 23:34:31 +0100 Subject: [PATCH 054/145] 64bit ftello update --- lib/parallelIO/IldgIO.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/lib/parallelIO/IldgIO.h b/lib/parallelIO/IldgIO.h index 1f2b7c90..36ecbd1b 100644 --- a/lib/parallelIO/IldgIO.h +++ b/lib/parallelIO/IldgIO.h @@ -224,7 +224,7 @@ class GridLimeReader : public BinaryIO { assert(PayloadSize == file_bytes);// Must match or user error - off_t offset= ftell(File); + uint64_t offset= ftello(File); // std::cout << " ReadLatticeObject from offset "< munge; BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); @@ -253,16 +253,13 @@ class GridLimeReader : public BinaryIO { while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { // std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" < xmlc(nbytes+1,'\0'); limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR); - // std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <=0); err=limeWriterCloseRecord(LimeW); assert(err>=0); limeDestroyHeader(h); - // std::cout << " File offset is now"<(); BinarySimpleMunger munge; BinaryIO::writeLatticeObject(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); + // fseek(File,0,SEEK_END); offset = ftello(File);std::cout << " offset now "<=0); + //////////////////////////////////////// // Write checksum element, propagaing forward from the BinaryIO // Always pair a checksum with a binary object, and close message @@ -703,8 +702,7 @@ class IldgReader : public GridLimeReader { // Binary data ///////////////////////////////// std::cout << GridLogMessage << "ILDG Binary record found : " ILDG_BINARY_DATA << std::endl; - off_t offset= ftell(File); - + uint64_t offset= ftello(File); if ( format == std::string("IEEE64BIG") ) { GaugeSimpleMunger munge; BinaryIO::readLatticeObject< vobj, dobj >(Umu, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); From 7fab183c0eebfd82e006eca2130d809131a36074 Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 27 Oct 2017 08:17:49 +0100 Subject: [PATCH 055/145] Better read test --- lib/parallelIO/IldgIO.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/parallelIO/IldgIO.h b/lib/parallelIO/IldgIO.h index 36ecbd1b..b86e250f 100644 --- a/lib/parallelIO/IldgIO.h +++ b/lib/parallelIO/IldgIO.h @@ -159,7 +159,7 @@ namespace QCD { uint32_t scidac_checksumb = stoull(scidacChecksum_.sumb,0,16); if ( scidac_csuma !=scidac_checksuma) return 0; if ( scidac_csumb !=scidac_checksumb) return 0; - return 1; + return 1; } //////////////////////////////////////////////////////////////////////////////////// @@ -237,7 +237,7 @@ class GridLimeReader : public BinaryIO { ///////////////////////////////////////////// // Verify checksums ///////////////////////////////////////////// - scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb); + assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1); return; } } From fa04b6d3c233d6057fb5133c8e5627bc2d941aba Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 27 Oct 2017 08:18:29 +0100 Subject: [PATCH 056/145] Finished ? Verifying coarse evec restore --- .../Test_dwf_compressed_lanczos_reorg.cc | 145 +++++++++++++----- 1 file changed, 109 insertions(+), 36 deletions(-) diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc index ad1aaa47..42814e2f 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc @@ -50,9 +50,13 @@ struct LanczosParams : Serializable { int, MinRes); // Must restart }; -struct CompressedLanczosParams : Serializable { +struct LocalCoherenceLanczosParams : Serializable { public: - GRID_SERIALIZABLE_CLASS_MEMBERS(CompressedLanczosParams, + GRID_SERIALIZABLE_CLASS_MEMBERS(bool, doFine, + bool, doFineRead, + bool, doCoarse, + bool, doCoarseRead, + LocalCoherenceLanczosParams, LanczosParams, FineParams, LanczosParams, CoarseParams, ChebyParams, Smoother, @@ -61,8 +65,7 @@ struct CompressedLanczosParams : Serializable { std::string, config, std::vector < std::complex >, omega, RealD, mass, - RealD, M5 - ); + RealD, M5); }; // Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function @@ -209,7 +212,7 @@ class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanc // Make serializable Lanczos params //////////////////////////////////////////// template -class CoarseFineIRL +class LocalCoherenceLanczos { public: typedef iVector CoarseSiteVector; @@ -230,7 +233,7 @@ private: std::vector evals_coarse; std::vector evec_coarse; public: - CoarseFineIRL(GridBase *FineGrid, + LocalCoherenceLanczos(GridBase *FineGrid, GridBase *CoarseGrid, LinearOperatorBase &FineOp, int checkerboard) : @@ -253,7 +256,7 @@ public: return nn; } - void testFine(void) + void fakeFine(void) { int Nk = nbasis; _Aggregate.subspace.resize(Nk,_FineGrid); @@ -286,6 +289,42 @@ public: write(WR,"evals",evals_fine); } } + + void checkpointFineRestore(std::string evecs_file,std::string evals_file) + { + evals_fine.resize(nbasis); + _Aggregate.subspace.resize(nbasis,_FineGrid); + { + std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "< Op(_FineOp); + ImplicitlyRestartedLanczosHermOpTester SimpleTester(Op); + for(int k=0;k ChebySmooth(cheby_smooth); + ProjectedFunctionHermOp ChebyOp (ChebySmooth,_FineOp,_Aggregate); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); + + for(int k=0;k=Nstop); - + evals_coarse.resize(Nstop); + evec_coarse.resize (Nstop,_CoarseGrid); for (int i=0;i IRL(FrbGrid,CoarseGrid5rb,HermOp,Odd); - std::cout << GridLogMessage << "Constructed CoarseFine IRL" << std::endl; + LocalCoherenceLanczos _LocalCoherenceLanczos(FrbGrid,CoarseGrid5rb,HermOp,Odd); + std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl; - int do_fine = 1; - int do_coarse = 0; - int do_smooth = 0; - if ( do_fine ) { + if ( Params.doCoarse ) { + assert( (Params.doFine)||(Params.doFineRead)); + } + + if ( Params.doFine ) { std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "< Date: Fri, 27 Oct 2017 09:04:31 +0100 Subject: [PATCH 057/145] Move the local coherence lanczos into algorithms. Keep the I/O in the tester. Other people can copy this method to write other I/O formats. --- .../iterative/LocalCoherenceLanczos.h | 348 ++++++++++++++ .../Test_dwf_compressed_lanczos_reorg.cc | 436 +++--------------- 2 files changed, 410 insertions(+), 374 deletions(-) create mode 100644 lib/algorithms/iterative/LocalCoherenceLanczos.h diff --git a/lib/algorithms/iterative/LocalCoherenceLanczos.h b/lib/algorithms/iterative/LocalCoherenceLanczos.h new file mode 100644 index 00000000..6b8fe62c --- /dev/null +++ b/lib/algorithms/iterative/LocalCoherenceLanczos.h @@ -0,0 +1,348 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/algorithms/iterative/LocalCoherenceLanczos.h + + Copyright (C) 2015 + +Author: Christoph Lehner +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_LOCAL_COHERENCE_IRL_H +#define GRID_LOCAL_COHERENCE_IRL_H +namespace Grid { +struct LanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams, + ChebyParams, Cheby,/*Chebyshev*/ + int, Nstop, /*Vecs in Lanczos must converge Nstop < Nk < Nm*/ + int, Nk, /*Vecs in Lanczos seek converge*/ + int, Nm, /*Total vecs in Lanczos include restart*/ + RealD, resid, /*residual*/ + int, MaxIt, + RealD, betastp, /* ? */ + int, MinRes); // Must restart +}; + +struct LocalCoherenceLanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosParams, + bool, doFine, + bool, doFineRead, + bool, doCoarse, + bool, doCoarseRead, + LanczosParams, FineParams, + LanczosParams, CoarseParams, + ChebyParams, Smoother, + RealD , coarse_relax_tol, + std::vector, blockSize, + std::string, config, + std::vector < std::complex >, omega, + RealD, mass, + RealD, M5); +}; + +// Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function +template +class ProjectedHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedHermOp(LinearOperatorBase& linop, Aggregation &aggregate) : + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + FineField fin(FineGrid); + FineField fout(FineGrid); + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ProjectedFunctionHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, + Aggregation &aggregate) : + _poly(poly), + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + + FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; + FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanczosTester > > +{ + public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearFunction & _Poly; + OperatorFunction & _smoother; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + RealD _coarse_relax_tol; + ImplicitlyRestartedLanczosSmoothedTester(LinearFunction &Poly, + OperatorFunction &smoother, + LinearOperatorBase &Linop, + Aggregation &Aggregate, + RealD coarse_relax_tol=5.0e3) + : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly), _coarse_relax_tol(coarse_relax_tol) { }; + + int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) + { + CoarseField v(B); + RealD eval_poly = eval; + // Apply operator + _Poly(B,v); + + RealD vnum = real(innerProduct(B,v)); // HermOp. + RealD vden = norm2(B); + RealD vv0 = norm2(v); + eval = vnum/vden; + v -= eval*B; + + RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); + + std::cout.precision(13); + std::cout< nbasis ) eresid = eresid*_coarse_relax_tol; + if( (vv +class LocalCoherenceLanczos +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice CoarseField; + typedef Lattice FineField; + +protected: + GridBase *_CoarseGrid; + GridBase *_FineGrid; + int _checkerboard; + LinearOperatorBase & _FineOp; + + // FIXME replace Aggregation with vector of fine; the code reuse is too small for + // the hassle and complexity of cross coupling. + Aggregation _Aggregate; + std::vector evals_fine; + std::vector evals_coarse; + std::vector evec_coarse; +public: + LocalCoherenceLanczos(GridBase *FineGrid, + GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) : + _CoarseGrid(CoarseGrid), + _FineGrid(FineGrid), + _Aggregate(CoarseGrid,FineGrid,checkerboard), + _FineOp(FineOp), + _checkerboard(checkerboard) + { + evals_fine.resize(0); + evals_coarse.resize(0); + }; + void Orthogonalise(void ) { _Aggregate.Orthogonalise(); } + + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = ::sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void fakeFine(void) + { + int Nk = nbasis; + _Aggregate.subspace.resize(Nk,_FineGrid); + _Aggregate.subspace[0]=1.0; + _Aggregate.subspace[0].checkerboard=_checkerboard; + normalise(_Aggregate.subspace[0]); + PlainHermOp Op(_FineOp); + for(int k=1;k Op(_FineOp); + ImplicitlyRestartedLanczosHermOpTester SimpleTester(Op); + for(int k=0;k ChebySmooth(cheby_smooth); + ProjectedFunctionHermOp ChebyOp (ChebySmooth,_FineOp,_Aggregate); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); + + for(int k=0;k Cheby(cheby_parms); + FunctionHermOp ChebyOp(Cheby,_FineOp); + PlainHermOp Op(_FineOp); + + evals_fine.resize(Nm); + _Aggregate.subspace.resize(Nm,_FineGrid); + + ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + + FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; + + int Nconv; + IRL.calc(evals_fine,_Aggregate.subspace,src,Nconv,false); + + // Shrink down to number saved + assert(Nstop>=nbasis); + assert(Nconv>=nbasis); + evals_fine.resize(nbasis); + _Aggregate.subspace.resize(nbasis,_FineGrid); + } + void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax, + int Nstop, int Nk, int Nm,RealD resid, + RealD MaxIt, RealD betastp, int MinRes) + { + Chebyshev Cheby(cheby_op); + ProjectedHermOp Op(_FineOp,_Aggregate); + ProjectedFunctionHermOp ChebyOp (Cheby,_FineOp,_Aggregate); + ////////////////////////////////////////////////////////////////////////////////////////////////// + // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL + ////////////////////////////////////////////////////////////////////////////////////////////////// + + Chebyshev ChebySmooth(cheby_smooth); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); + + evals_coarse.resize(Nm); + evec_coarse.resize(Nm,_CoarseGrid); + + CoarseField src(_CoarseGrid); src=1.0; + + ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + int Nconv=0; + IRL.calc(evals_coarse,evec_coarse,src,Nconv,false); + assert(Nconv>=Nstop); + evals_coarse.resize(Nstop); + evec_coarse.resize (Nstop,_CoarseGrid); + for (int i=0;i -class ProjectedFunctionHermOp : public LinearFunction > > { -public: - typedef iVector CoarseSiteVector; - typedef Lattice CoarseField; - typedef Lattice CoarseScalar; // used for inner products on fine field - typedef Lattice FineField; - - - OperatorFunction & _poly; - LinearOperatorBase &_Linop; - Aggregation &_Aggregate; - - ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, - Aggregation &aggregate) : - _poly(poly), - _Linop(linop), - _Aggregate(aggregate) { }; - - void operator()(const CoarseField& in, CoarseField& out) { - - GridBase *FineGrid = _Aggregate.FineGrid; - - FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; - FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; - - _Aggregate.PromoteFromSubspace(in,fin); std::cout< -class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanczosTester > > -{ - public: - typedef iVector CoarseSiteVector; - typedef Lattice CoarseField; - typedef Lattice CoarseScalar; // used for inner products on fine field - typedef Lattice FineField; - - LinearFunction & _Poly; - OperatorFunction & _smoother; - LinearOperatorBase &_Linop; - Aggregation &_Aggregate; - RealD _coarse_relax_tol; - ImplicitlyRestartedLanczosSmoothedTester(LinearFunction &Poly, - OperatorFunction &smoother, - LinearOperatorBase &Linop, - Aggregation &Aggregate, - RealD coarse_relax_tol=5.0e3) - : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly), _coarse_relax_tol(coarse_relax_tol) { }; - - int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) - { - CoarseField v(B); - RealD eval_poly = eval; - // Apply operator - _Poly(B,v); - - RealD vnum = real(innerProduct(B,v)); // HermOp. - RealD vden = norm2(B); - RealD vv0 = norm2(v); - eval = vnum/vden; - v -= eval*B; - - RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); - - std::cout.precision(13); - std::cout< nbasis ) eresid = eresid*_coarse_relax_tol; - if( (vv -class LocalCoherenceLanczos -{ -public: - typedef iVector CoarseSiteVector; - typedef Lattice CoarseScalar; // used for inner products on fine field - typedef Lattice CoarseField; - typedef Lattice FineField; - -private: - GridBase *_CoarseGrid; - GridBase *_FineGrid; - int _checkerboard; - LinearOperatorBase & _FineOp; - - // FIXME replace Aggregation with vector of fine; the code reuse is too small for - // the hassle and complexity of cross coupling. - Aggregation _Aggregate; - std::vector evals_fine; - std::vector evals_coarse; - std::vector evec_coarse; -public: - LocalCoherenceLanczos(GridBase *FineGrid, - GridBase *CoarseGrid, - LinearOperatorBase &FineOp, - int checkerboard) : - _CoarseGrid(CoarseGrid), - _FineGrid(FineGrid), - _Aggregate(CoarseGrid,FineGrid,checkerboard), - _FineOp(FineOp), - _checkerboard(checkerboard) - { - evals_fine.resize(0); - evals_coarse.resize(0); - }; - void Orthogonalise(void ) { _Aggregate.Orthogonalise(); } - - template static RealD normalise(T& v) - { - RealD nn = norm2(v); - nn = ::sqrt(nn); - v = v * (1.0/nn); - return nn; - } - - void fakeFine(void) - { - int Nk = nbasis; - _Aggregate.subspace.resize(Nk,_FineGrid); - _Aggregate.subspace[0]=1.0; - _Aggregate.subspace[0].checkerboard=_checkerboard; - normalise(_Aggregate.subspace[0]); - PlainHermOp Op(_FineOp); - for(int k=1;k &FineOp, + int checkerboard) + // Base constructor + : LocalCoherenceLanczos(FineGrid,CoarseGrid,FineOp,checkerboard) + {}; void checkpointFine(std::string evecs_file,std::string evals_file) { - assert(_Aggregate.subspace.size()==nbasis); + assert(this->_Aggregate.subspace.size()==nbasis); emptyUserRecord record; - { - ScidacWriter WR; - WR.open(evecs_file); - for(int k=0;k_Aggregate.subspace[k],record); } + WR.close(); + + XmlWriter WRx(evals_file); + write(WRx,"evals",this->evals_fine); } void checkpointFineRestore(std::string evecs_file,std::string evals_file) { - evals_fine.resize(nbasis); - _Aggregate.subspace.resize(nbasis,_FineGrid); - { - std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "<evals_fine.resize(nbasis); + this->_Aggregate.subspace.resize(nbasis,this->_FineGrid); + + std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "<evals_fine); + + assert(this->evals_fine.size()==nbasis); + + std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "< Op(_FineOp); - ImplicitlyRestartedLanczosHermOpTester SimpleTester(Op); - for(int k=0;k_Aggregate.subspace[k].checkerboard=this->_checkerboard; + RD.readScidacFieldRecord(this->_Aggregate.subspace[k],record); + } + RD.close(); } void checkpointCoarse(std::string evecs_file,std::string evals_file) { - int n = evec_coarse.size(); + int n = this->evec_coarse.size(); emptyUserRecord record; - { - ScidacWriter WR; - WR.open(evecs_file); - for(int k=0;kevec_coarse[k],record); } + WR.close(); + + XmlWriter WRx(evals_file); + write(WRx,"evals",this->evals_coarse); } + void checkpointCoarseRestore(std::string evecs_file,std::string evals_file,int nvec) { std::cout << " resizing to " << nvec<< std::endl; - evals_coarse.resize(nvec); - evec_coarse.resize(nvec,_CoarseGrid); - { - std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<evals_coarse.resize(nvec); + this->evec_coarse.resize(nvec,this->_CoarseGrid); + std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<evals_coarse); + assert(this->evals_coarse.size()==nvec); emptyUserRecord record; - { - std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "< ChebySmooth(cheby_smooth); - ProjectedFunctionHermOp ChebyOp (ChebySmooth,_FineOp,_Aggregate); - ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); - - for(int k=0;k Cheby(cheby_parms); - FunctionHermOp ChebyOp(Cheby,_FineOp); - PlainHermOp Op(_FineOp); - - evals_fine.resize(Nm); - _Aggregate.subspace.resize(Nm,_FineGrid); - - ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); - - FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; - - int Nconv; - IRL.calc(evals_fine,_Aggregate.subspace,src,Nconv,false); - - // Shrink down to number saved - assert(Nstop>=nbasis); - assert(Nconv>=nbasis); - evals_fine.resize(nbasis); - _Aggregate.subspace.resize(nbasis,_FineGrid); - } - void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax, - int Nstop, int Nk, int Nm,RealD resid, - RealD MaxIt, RealD betastp, int MinRes) - { - Chebyshev Cheby(cheby_op); - ProjectedHermOp Op(_FineOp,_Aggregate); - ProjectedFunctionHermOp ChebyOp (Cheby,_FineOp,_Aggregate); - ////////////////////////////////////////////////////////////////////////////////////////////////// - // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL - ////////////////////////////////////////////////////////////////////////////////////////////////// - - Chebyshev ChebySmooth(cheby_smooth); - ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); - - evals_coarse.resize(Nm); - evec_coarse.resize(Nm,_CoarseGrid); - - CoarseField src(_CoarseGrid); src=1.0; - - ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); - int Nconv=0; - IRL.calc(evals_coarse,evec_coarse,src,Nconv,false); - assert(Nconv>=Nstop); - evals_coarse.resize(Nstop); - evec_coarse.resize (Nstop,_CoarseGrid); - for (int i=0;ievec_coarse[k],record); } + RD.close(); } }; - int main (int argc, char ** argv) { Grid_init(&argc,&argv); @@ -465,7 +153,9 @@ int main (int argc, char ** argv) { std::vector blockSize = Params.blockSize; // Grids - GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); @@ -516,12 +206,10 @@ int main (int argc, char ** argv) { const int nbasis= 60; assert(nbasis==Ns1); - LocalCoherenceLanczos _LocalCoherenceLanczos(FrbGrid,CoarseGrid5rb,HermOp,Odd); + LocalCoherenceLanczosScidac _LocalCoherenceLanczos(FrbGrid,CoarseGrid5rb,HermOp,Odd); std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl; - if ( Params.doCoarse ) { - assert( (Params.doFine)||(Params.doFineRead)); - } + assert( (Params.doFine)||(Params.doFineRead)); if ( Params.doFine ) { std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "< Date: Fri, 27 Oct 2017 09:43:22 +0100 Subject: [PATCH 058/145] Passes reload of coarse basis --- lib/algorithms/iterative/LocalCoherenceLanczos.h | 6 +++++- tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/algorithms/iterative/LocalCoherenceLanczos.h b/lib/algorithms/iterative/LocalCoherenceLanczos.h index 6b8fe62c..d5d1bbc2 100644 --- a/lib/algorithms/iterative/LocalCoherenceLanczos.h +++ b/lib/algorithms/iterative/LocalCoherenceLanczos.h @@ -285,7 +285,11 @@ public: ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); for(int k=0;k Date: Fri, 27 Oct 2017 10:29:34 +0100 Subject: [PATCH 059/145] Bug fix in the coarse restore... Think this is nearly there --- tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc index 0824cfa4..4c702a33 100644 --- a/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg.cc @@ -109,7 +109,7 @@ public: void checkpointCoarseRestore(std::string evecs_file,std::string evals_file,int nvec) { - std::cout << " resizing to " << nvec<< std::endl; + std::cout << "resizing coarse vecs to " << nvec<< std::endl; this->evals_coarse.resize(nvec); this->evec_coarse.resize(nvec,this->_CoarseGrid); std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<evec_coarse[k],record); } RD.close(); From 1ef424b1392038df12130b1ce2f855c8b1cc1dbd Mon Sep 17 00:00:00 2001 From: paboyle Date: Fri, 27 Oct 2017 14:20:35 +0100 Subject: [PATCH 060/145] Split grid Y2K bug fix attempt --- lib/communicator/Communicator_base.h | 14 +++++++++----- lib/communicator/Communicator_mpi.cc | 26 +++++++++++++++++++++----- lib/communicator/Communicator_none.cc | 8 ++++++-- lib/lattice/Lattice_transfer.h | 8 ++++---- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 22c9e4d0..ff054497 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -274,12 +274,16 @@ class CartesianCommunicator { // std::cerr << " AllToAll in.size() "< void Broadcast(int root,obj &data) { diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 5a2dc4d0..ef612f98 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -55,7 +55,9 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { CartesianCommunicator::~CartesianCommunicator() { - if (communicator && !MPI::Is_finalized()) + int MPI_is_finalised; + MPI_Finalized(&MPI_is_finalised); + if (communicator && MPI_is_finalised) MPI_Comm_free(&communicator); } @@ -195,7 +197,7 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) communicator); assert(ierr==0); } -void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes) { std::vector row(_ndimension,1); assert(dim>=0 && dim<_ndimension); @@ -204,11 +206,25 @@ void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) row[dim] = _processors[dim]; CartesianCommunicator Comm(row,*this); - Comm.AllToAll(in,out,bytes); + Comm.AllToAll(in,out,words,bytes); } -void CartesianCommunicator::AllToAll(void *in,void *out,int bytes) +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) { - MPI_Alltoall(in ,bytes,MPI_BYTE,out,bytes,MPI_BYTE,communicator); + // MPI is a pain and uses "int" arguments + // 64*64*64*128*16 == 500Million elements of data. + // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. + // (Turns up on 32^3 x 64 Gparity too) + MPI_Datatype object; + int iwords; + int ibytes; + iwords = words; + ibytes = bytes; + assert(words == iwords); // safe to cast to int ? + assert(bytes == ibytes); // safe to cast to int ? + MPI_Type_contiguous(ibytes,MPI_BYTE,&object); + MPI_Type_commit(&object); + MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); + MPI_Type_free(&object); } /////////////////////////////////////////////////////// // Should only be used prior to Grid Init finished. diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 629a3e4a..a862d52a 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -100,9 +100,13 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector & { assert(0); } -void CartesianCommunicator::AllToAll(int dim,void *in,void *out,int bytes) +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes) { - bcopy(in,out,bytes); + bcopy(in,out,bytes*words); +} +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) +{ + bcopy(in,out,bytes*words); } int CartesianCommunicator::RankWorld(void){return 0;} diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index 713a8788..bc59e9eb 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -790,8 +790,8 @@ void Grid_split(std::vector > & full,Lattice & split) ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d]; } - int lsites = full_grid->lSites(); - Integer sz = lsites * nvector; + uint64_t lsites = full_grid->lSites(); + uint64_t sz = lsites * nvector; std::vector tmpdata(sz); std::vector alldata(sz); std::vector scalardata(lsites); @@ -908,8 +908,8 @@ void Grid_unsplit(std::vector > & full,Lattice & split) ratio[d] = full_grid->_processors[d]/ split_grid->_processors[d]; } - int lsites = full_grid->lSites(); - Integer sz = lsites * nvector; + uint64_t lsites = full_grid->lSites(); + uint64_t sz = lsites * nvector; std::vector tmpdata(sz); std::vector alldata(sz); std::vector scalardata(lsites); From 689323f4eec85b159d82fe4b2b7097ff4312c70c Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:03:15 +0000 Subject: [PATCH 061/145] Reverse dim ordering lexico support --- lib/util/Lexicographic.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/util/Lexicographic.h b/lib/util/Lexicographic.h index b922dba5..f5c55b74 100644 --- a/lib/util/Lexicographic.h +++ b/lib/util/Lexicographic.h @@ -26,6 +26,25 @@ namespace Grid{ } } + static inline void IndexFromCoorReversed (const std::vector& coor,int &index,const std::vector &dims){ + int nd=dims.size(); + int stride=1; + index=0; + for(int d=nd-1;d>=0;d--){ + index = index+stride*coor[d]; + stride=stride*dims[d]; + } + } + static inline void CoorFromIndexReversed (std::vector& coor,int index,const std::vector &dims){ + int nd= dims.size(); + coor.resize(nd); + for(int d=nd-1;d>=0;d--){ + coor[d] = index % dims[d]; + index = index / dims[d]; + } + } + + }; } From 4a699b4da340280d0502fcaab6d31b598e924f93 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:04:14 +0000 Subject: [PATCH 062/145] New rank can be found out --- lib/cartesian/Cartesian_base.h | 9 +++++++-- lib/cartesian/Cartesian_full.h | 11 +++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/cartesian/Cartesian_base.h b/lib/cartesian/Cartesian_base.h index 6aa0e3c7..acc870de 100644 --- a/lib/cartesian/Cartesian_base.h +++ b/lib/cartesian/Cartesian_base.h @@ -44,13 +44,18 @@ namespace Grid{ class GridBase : public CartesianCommunicator , public GridThread { public: - + int dummy; // Give Lattice access template friend class Lattice; GridBase(const std::vector & processor_grid) : CartesianCommunicator(processor_grid) {}; GridBase(const std::vector & processor_grid, - const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {}; + const CartesianCommunicator &parent, + int &split_rank) + : CartesianCommunicator(processor_grid,parent,split_rank) {}; + GridBase(const std::vector & processor_grid, + const CartesianCommunicator &parent) + : CartesianCommunicator(processor_grid,parent,dummy) {}; virtual ~GridBase() = default; diff --git a/lib/cartesian/Cartesian_full.h b/lib/cartesian/Cartesian_full.h index c7ea68c9..9273abf3 100644 --- a/lib/cartesian/Cartesian_full.h +++ b/lib/cartesian/Cartesian_full.h @@ -38,7 +38,7 @@ namespace Grid{ class GridCartesian: public GridBase { public: - + int dummy; virtual int CheckerBoardFromOindexTable (int Oindex) { return 0; } @@ -67,7 +67,14 @@ public: GridCartesian(const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid, - const GridCartesian &parent) : GridBase(processor_grid,parent) + const GridCartesian &parent) : GridBase(processor_grid,parent,dummy) + { + Init(dimensions,simd_layout,processor_grid); + } + GridCartesian(const std::vector &dimensions, + const std::vector &simd_layout, + const std::vector &processor_grid, + const GridCartesian &parent,int &split_rank) : GridBase(processor_grid,parent,split_rank) { Init(dimensions,simd_layout,processor_grid); } From fe4d9b003ca9c38ff6ec15e7445c22b0f4a72ade Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:04:47 +0000 Subject: [PATCH 063/145] More digits --- lib/algorithms/iterative/ConjugateGradient.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/algorithms/iterative/ConjugateGradient.h b/lib/algorithms/iterative/ConjugateGradient.h index 5c968e04..0d4e51c7 100644 --- a/lib/algorithms/iterative/ConjugateGradient.h +++ b/lib/algorithms/iterative/ConjugateGradient.h @@ -78,12 +78,12 @@ class ConjugateGradient : public OperatorFunction { cp = a; ssq = norm2(src); - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: guess " << guess << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: src " << ssq << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: mp " << d << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: mmp " << b << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: cp,r " << cp << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: p " << a << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: guess " << guess << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: src " << ssq << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mp " << d << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mmp " << b << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: cp,r " << cp << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: p " << a << std::endl; RealD rsq = Tolerance * Tolerance * ssq; @@ -92,7 +92,7 @@ class ConjugateGradient : public OperatorFunction { return; } - std::cout << GridLogIterative << std::setprecision(4) + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl; GridStopWatch LinalgTimer; From 5bf42e1e150cb0e9116e427653955cb4398b1326 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:05:21 +0000 Subject: [PATCH 064/145] Update --- tests/solver/Test_dwf_hdcr.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/solver/Test_dwf_hdcr.cc b/tests/solver/Test_dwf_hdcr.cc index c553ba0a..b3373238 100644 --- a/tests/solver/Test_dwf_hdcr.cc +++ b/tests/solver/Test_dwf_hdcr.cc @@ -555,13 +555,13 @@ int main (int argc, char ** argv) std::cout< HermDefOp(Ddwf); - Subspace Aggregates(Coarse5d,FGrid); + Subspace Aggregates(Coarse5d,FGrid,0); // Aggregates.CreateSubspace(RNG5,HermDefOp,nbasis); assert ( (nbasis & 0x1)==0); int nb=nbasis/2; std::cout< Date: Mon, 30 Oct 2017 00:16:12 +0000 Subject: [PATCH 065/145] Communicator updates for split grid --- lib/communicator/Communicator_base.cc | 63 +++++++++++++++++++-------- lib/communicator/Communicator_base.h | 2 +- lib/communicator/Communicator_mpi.cc | 3 +- 3 files changed, 47 insertions(+), 21 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index ce9a3cf0..a72c75fe 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -97,9 +97,9 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) } -#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) +#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) { _ndimension = processors.size(); assert(_ndimension = parent._ndimension); @@ -124,33 +124,51 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors, for(int d=0;d<_ndimension;d++){ ccoor[d] = parent._processor_coor[d] % processors[d]; scoor[d] = parent._processor_coor[d] / processors[d]; - ssize[d] = parent._processors[d]/ processors[d]; + ssize[d] = parent._processors[d] / processors[d]; } - int crank,srank; // rank within subcomm ; rank of subcomm within blocks of subcomms - Lexicographic::IndexFromCoor(ccoor,crank,processors); - Lexicographic::IndexFromCoor(scoor,srank,ssize); + int crank; // rank within subcomm ; srank is rank of subcomm within blocks of subcomms + // Mpi uses the reverse Lexico convention to us + Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); + Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); MPI_Comm comm_split; if ( Nchild > 1 ) { - // std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, ////////////////////////////////////////////////////////////////////////////////////////////////////// void CartesianCommunicator::InitFromMPICommunicator(const std::vector &processors, MPI_Comm communicator_base) { - // if ( communicator_base != communicator_world ) { - // std::cout << "Cartesian communicator created with a non-world communicator"< &proc } std::vector periodic(_ndimension,1); - MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator); + MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator); MPI_Comm_rank(communicator,&_processor); MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); + if ( communicator_base != communicator_world ) { + std::cout << "Cartesian communicator created with a non-world communicator"< &processors,const CartesianCommunicator &parent); + CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank); CartesianCommunicator(const std::vector &pdimensions_in); virtual ~CartesianCommunicator(); diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index ef612f98..5593aa8b 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -205,7 +205,8 @@ void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words, // Split the communicator row[dim] = _processors[dim]; - CartesianCommunicator Comm(row,*this); + int me; + CartesianCommunicator Comm(row,*this,me); Comm.AllToAll(in,out,words,bytes); } void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) From a7f72eb9946d782e48fe315be066ca95b5c097b6 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:22:06 +0000 Subject: [PATCH 066/145] SHaking out --- lib/lattice/Lattice_transfer.h | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index 962cdeb1..1b09217b 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -757,6 +757,7 @@ void precisionChange(Lattice &out, const Lattice &in){ // NB: Easiest to programme if keep in lex order. // ///////////////////////////////////////////////////////// + template void Grid_split(std::vector > & full,Lattice & split) { @@ -805,6 +806,7 @@ void Grid_split(std::vector > & full,Lattice & split) std::vector tmpdata(sz); std::vector alldata(sz); std::vector scalardata(lsites); + for(int v=0;v > & full,Lattice & split) std::vector ldims = full_grid->_ldimensions; std::vector lcoor(ndim); - for(int d=0;d=0;d--){ if ( ratio[d] != 1 ) { full_grid ->AllToAll(d,alldata,tmpdata); - + // std::cout << GridLogMessage << "Grid_split: dim " <_processors[d]< > & full,Lattice & split) int rmul=nvec*lsites; int vmul= lsites; alldata[rsite] = tmpdata[lsite+r*rmul+v*vmul]; - + // if ( lsite==0 ) { + // std::cout << "Grid_split: grow alldata["< > & full,Lattice & split) } } } - vectorizeFromLexOrdArray(alldata,split); } @@ -936,10 +944,12 @@ void Grid_unsplit(std::vector > & full,Lattice & split) lsites = split_grid->lSites(); std::vector ldims = split_grid->_ldimensions; - for(int d=ndim-1;d>=0;d--){ + // for(int d=ndim-1;d>=0;d--){ + for(int d=0;d_processors[d] > 1 ) { tmpdata = alldata; split_grid->AllToAll(d,tmpdata,alldata); @@ -985,13 +995,11 @@ void Grid_unsplit(std::vector > & full,Lattice & split) lsites = full_grid->lSites(); for(int v=0;v Date: Mon, 30 Oct 2017 00:22:52 +0000 Subject: [PATCH 067/145] : --- tests/solver/Test_dwf_mrhs_cg.cc | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/solver/Test_dwf_mrhs_cg.cc b/tests/solver/Test_dwf_mrhs_cg.cc index 079fa85a..207e1331 100644 --- a/tests/solver/Test_dwf_mrhs_cg.cc +++ b/tests/solver/Test_dwf_mrhs_cg.cc @@ -52,15 +52,28 @@ int main (int argc, char ** argv) GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); - int nrhs = UGrid->RankCount() ; - ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// + for(int i=0;i> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + int me; + for(int i=0;i seeds({1,2,3,4}); - GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); std::vector src(nrhs,FGrid); @@ -93,7 +105,7 @@ int main (int argc, char ** argv) emptyUserRecord record; std::string file("./scratch.scidac"); std::string filef("./scratch.scidac.ferm"); - int me = UGrid->ThisRank(); + LatticeGaugeField s_Umu(SGrid); FermionField s_src(SFGrid); FermionField s_src_split(SFGrid); @@ -169,7 +181,7 @@ int main (int argc, char ** argv) for(int n=0;nBarrier(); if ( n==me ) { - std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<Barrier(); } @@ -218,7 +230,6 @@ int main (int argc, char ** argv) std::cout << " diff " < Date: Mon, 30 Oct 2017 00:23:34 +0000 Subject: [PATCH 068/145] Extended sub comm supported --- tests/solver/Test_split_grid.cc | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tests/solver/Test_split_grid.cc b/tests/solver/Test_split_grid.cc index 90969b85..2b6a4bf7 100644 --- a/tests/solver/Test_split_grid.cc +++ b/tests/solver/Test_split_grid.cc @@ -52,11 +52,24 @@ int main (int argc, char ** argv) GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); - int nrhs = UGrid->RankCount() ; - ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// + + for(int i=0;i> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + for(int i=0;i Date: Mon, 30 Oct 2017 00:24:11 +0000 Subject: [PATCH 069/145] Get subrank info from communicator constructor --- tests/solver/Test_dwf_mrhs_cg_mpieo.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpieo.cc b/tests/solver/Test_dwf_mrhs_cg_mpieo.cc index 14115b59..a6dfcd57 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpieo.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpieo.cc @@ -47,7 +47,9 @@ int main (int argc, char ** argv) std::vector mpi_layout = GridDefaultMpi(); std::vector mpi_split (mpi_layout.size(),1); - GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); @@ -57,10 +59,11 @@ int main (int argc, char ** argv) ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// + int me; GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), mpi_split, - *UGrid); + *UGrid,me); GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); @@ -89,8 +92,6 @@ int main (int argc, char ** argv) ///////////////// // MPI only sends ///////////////// - int me = UGrid->ThisRank(); - LatticeGaugeField s_Umu(SGrid); FermionField s_src(SFGrid); FermionField s_src_e(SFrbGrid); From 78e8704eacb41fae706e50c24ae0baa6b17b9481 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 30 Oct 2017 00:25:31 +0000 Subject: [PATCH 070/145] Shaking out --- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 99 +++++++++++++++++++++++++--- 1 file changed, 89 insertions(+), 10 deletions(-) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index fbc6dd32..f640edff 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -1,4 +1,4 @@ - /************************************************************************************* + /************************************************************************************* Grid physics library, www.github.com/paboyle/Grid @@ -47,20 +47,36 @@ int main (int argc, char ** argv) std::vector mpi_layout = GridDefaultMpi(); std::vector mpi_split (mpi_layout.size(),1); - GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); - int nrhs = UGrid->RankCount() ; - ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// + + for(int i=0;i> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + int me; + for(int i=0;i result(nrhs,FGrid); FermionField tmp(FGrid); - for(int s=0;sThisRank(); - LatticeGaugeField s_Umu(SGrid); FermionField s_src(SFGrid); FermionField s_tmp(SFGrid); @@ -98,6 +144,36 @@ int main (int argc, char ** argv) /////////////////////////////////////////////////////////////// Grid_split (Umu,s_Umu); Grid_split (src,s_src); + std::cout << " split rank " < HermOp(Ddwf); MdagMLinearOperator HermOpCk(Dchk); - ConjugateGradient CG((1.0e-5/(me+1)),10000); + ConjugateGradient CG((1.0e-5),10000); s_res = zero; CG(HermOp,s_src,s_res); + std::cout << " s_res norm "< Date: Mon, 30 Oct 2017 01:14:11 +0000 Subject: [PATCH 071/145] No compile on comms == none fix --- lib/communicator/Communicator_none.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index a862d52a..26b330a7 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -38,8 +38,8 @@ void CartesianCommunicator::Init(int *argc, char *** arv) ShmInitGeneric(); } -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) - : CartesianCommunicator(processors) {} +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) + : CartesianCommunicator(processors) { srank=0;} CartesianCommunicator::CartesianCommunicator(const std::vector &processors) { From 360efd0088847727e2fabe034dc5f18a09430cff Mon Sep 17 00:00:00 2001 From: paboyle Date: Thu, 2 Nov 2017 22:05:31 +0000 Subject: [PATCH 072/145] Improved treatment of reverse asked for by chris. Truncate the basis. Power method renormalises --- .../iterative/ImplicitlyRestartedLanczos.h | 44 ++++++++++++------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index 089e7ff3..7a0760c9 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -181,8 +181,8 @@ enum IRLdiagonalisation { template class ImplicitlyRestartedLanczosHermOpTester : public ImplicitlyRestartedLanczosTester { public: - LinearFunction &_HermOpTest; - ImplicitlyRestartedLanczosHermOpTester(LinearFunction &HermOpTest) : _HermOpTest(HermOpTest) { }; + LinearFunction &_HermOp; + ImplicitlyRestartedLanczosHermOpTester(LinearFunction &HermOp) : _HermOp(HermOp) { }; int ReconstructEval(int j,RealD resid,Field &B, RealD &eval,RealD evalMaxApprox) { return TestConvergence(j,resid,B,eval,evalMaxApprox); @@ -192,7 +192,7 @@ template class ImplicitlyRestartedLanczosHermOpTester : public Imp Field v(B); RealD eval_poly = eval; // Apply operator - _HermOpTest(B,v); + _HermOp(B,v); RealD vnum = real(innerProduct(B,v)); // HermOp. RealD vden = norm2(B); @@ -233,8 +233,8 @@ class ImplicitlyRestartedLanczos { //////////////////////////////// // Embedded objects //////////////////////////////// + LinearFunction &_PolyOp; LinearFunction &_HermOp; - LinearFunction &_HermOpTest; ImplicitlyRestartedLanczosTester &_Tester; // Default tester provided (we need a ref to something in default case) ImplicitlyRestartedLanczosHermOpTester SimpleTester; @@ -246,16 +246,22 @@ public: ////////////////////////////////////////////////////////////////// // PAB: ////////////////////////////////////////////////////////////////// - // Too many options & knobs. Do we really need orth_period + // Too many options & knobs. + // Eliminate: + // orth_period + // betastp + // MinRestart + // + // Do we really need orth_period // What is the theoretical basis & guarantees of betastp ? // Nstop=Nk viable? // MinRestart avoidable with new convergence test? - // Could cut to HermOp, HermOpTest, Tester, Nk, Nm, resid, maxiter (+diagonalisation) - // HermOpTest could be eliminated if we dropped the Power method for max eval. + // Could cut to PolyOp, HermOp, Tester, Nk, Nm, resid, maxiter (+diagonalisation) + // HermOp could be eliminated if we dropped the Power method for max eval. // -- also: The eval, eval2, eval2_copy stuff is still unnecessarily unclear ////////////////////////////////////////////////////////////////// - ImplicitlyRestartedLanczos(LinearFunction & HermOp, - LinearFunction & HermOpTest, + ImplicitlyRestartedLanczos(LinearFunction & PolyOp, + LinearFunction & HermOp, ImplicitlyRestartedLanczosTester & Tester, int _Nstop, // sought vecs int _Nk, // sought vecs @@ -265,14 +271,14 @@ public: RealD _betastp=0.0, // if beta(k) < betastp: converged int _MinRestart=1, int _orth_period = 1, IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : - SimpleTester(HermOpTest), _HermOp(HermOp), _HermOpTest(HermOpTest), _Tester(Tester), + SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(Tester), Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), eresid(_eresid), betastp(_betastp), MaxIter(_MaxIter) , MinRestart(_MinRestart), orth_period(_orth_period), diagonalisation(_diagonalisation) { }; - ImplicitlyRestartedLanczos(LinearFunction & HermOp, - LinearFunction & HermOpTest, + ImplicitlyRestartedLanczos(LinearFunction & PolyOp, + LinearFunction & HermOp, int _Nstop, // sought vecs int _Nk, // sought vecs int _Nm, // spare vecs @@ -281,7 +287,7 @@ public: RealD _betastp=0.0, // if beta(k) < betastp: converged int _MinRestart=1, int _orth_period = 1, IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : - SimpleTester(HermOpTest), _HermOp(HermOp), _HermOpTest(HermOpTest), _Tester(SimpleTester), + SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(SimpleTester), Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), eresid(_eresid), betastp(_betastp), MaxIter(_MaxIter) , MinRestart(_MinRestart), @@ -323,7 +329,7 @@ repeat →AVK =VKHK +fKe†K † Extend to an M = K + P step factorization AVM = VMHM + fMeM until convergence */ - void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=true) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=false) { GridBase *grid = src._grid; assert(grid == evec[0]._grid); @@ -355,7 +361,8 @@ until convergence auto tmp = src; const int _MAX_ITER_IRL_MEVAPP_ = 50; for (int i=0;i<_MAX_ITER_IRL_MEVAPP_;i++) { - _HermOpTest(src_n,tmp); + normalise(src_n); + _HermOp(src_n,tmp); RealD vnum = real(innerProduct(src_n,tmp)); // HermOp. RealD vden = norm2(src_n); RealD na = vnum/vden; @@ -536,7 +543,10 @@ until convergence std::cout << GridLogIRL << "Nconv ("<0) w -= lme[k-1] * evec[k-1]; From 69929f20bbea5bcd125d9d296fd395e04e0e4dd2 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 6 Nov 2017 23:45:00 +0000 Subject: [PATCH 073/145] Destructor fix. Split Grid and MPI3 will not yet work without more effort from me. --- lib/communicator/Communicator_base.cc | 41 +++++++++++++++++++++++++-- lib/communicator/Communicator_base.h | 2 +- lib/communicator/Communicator_mpi.cc | 30 -------------------- lib/communicator/Communicator_mpi3.cc | 17 ++++++++++- lib/communicator/Communicator_mpit.cc | 8 +++++- 5 files changed, 62 insertions(+), 36 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index a72c75fe..531dd358 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -98,7 +98,39 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N) #if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes) +{ + std::vector row(_ndimension,1); + assert(dim>=0 && dim<_ndimension); + // Split the communicator + row[dim] = _processors[dim]; + + int me; + CartesianCommunicator Comm(row,*this,me); + Comm.AllToAll(in,out,words,bytes); +} +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) +{ + // MPI is a pain and uses "int" arguments + // 64*64*64*128*16 == 500Million elements of data. + // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. + // (Turns up on 32^3 x 64 Gparity too) + MPI_Datatype object; + int iwords; + int ibytes; + iwords = words; + ibytes = bytes; + assert(words == iwords); // safe to cast to int ? + assert(bytes == ibytes); // safe to cast to int ? + MPI_Type_contiguous(ibytes,MPI_BYTE,&object); + MPI_Type_commit(&object); + MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); + MPI_Type_free(&object); +} +#endif + +#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) { _ndimension = processors.size(); @@ -176,6 +208,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors, ////////////////////////////////////////////////////////////////////////////////////////////////////// InitFromMPICommunicator(processors,comm_split); } + ////////////////////////////////////////////////////////////////////////////////////////////////////// // Take an MPI_Comm and self assemble ////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -199,7 +232,7 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector &proc MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); if ( communicator_base != communicator_world ) { - std::cout << "Cartesian communicator created with a non-world communicator"< &proc int Size; MPI_Comm_size(communicator,&Size); -#ifdef GRID_COMMS_MPIT +#if defined(GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) communicator_halo.resize (2*_ndimension); for(int i=0;i<_ndimension*2;i++){ MPI_Comm_dup(communicator,&communicator_halo[i]); @@ -220,7 +253,9 @@ void CartesianCommunicator::InitFromMPICommunicator(const std::vector &proc assert(Size==_Nprocessors); } +#endif +#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) CartesianCommunicator::CartesianCommunicator(const std::vector &processors) { InitFromMPICommunicator(processors,communicator_world); @@ -229,10 +264,10 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) #endif #if !defined( GRID_COMMS_MPI3) - int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; int CartesianCommunicator::RankCount(void) { return ProcessorCount();}; #endif + #if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT) double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, int xmit_to_rank, diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 4374ac93..73ea6165 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -158,7 +158,7 @@ class CartesianCommunicator { virtual ~CartesianCommunicator(); private: -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) +#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) //////////////////////////////////////////////// // Private initialise from an MPI communicator // Can use after an MPI_Comm_split, but hidden from user so private diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 5593aa8b..f7b2a460 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -196,36 +196,6 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes) root, communicator); assert(ierr==0); -} -void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes) -{ - std::vector row(_ndimension,1); - assert(dim>=0 && dim<_ndimension); - - // Split the communicator - row[dim] = _processors[dim]; - - int me; - CartesianCommunicator Comm(row,*this,me); - Comm.AllToAll(in,out,words,bytes); -} -void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) -{ - // MPI is a pain and uses "int" arguments - // 64*64*64*128*16 == 500Million elements of data. - // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. - // (Turns up on 32^3 x 64 Gparity too) - MPI_Datatype object; - int iwords; - int ibytes; - iwords = words; - ibytes = bytes; - assert(words == iwords); // safe to cast to int ? - assert(bytes == ibytes); // safe to cast to int ? - MPI_Type_contiguous(ibytes,MPI_BYTE,&object); - MPI_Type_commit(&object); - MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); - MPI_Type_free(&object); } /////////////////////////////////////////////////////// // Should only be used prior to Grid Init finished. diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 3cac726c..9e023fef 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -454,11 +454,15 @@ void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &c ////////////////////////////////// // Try to subdivide communicator ////////////////////////////////// -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) +/* + * Use default in MPI compile + */ +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) : CartesianCommunicator(processors) { std::cout << "Attempts to split MPI3 communicators will fail until implemented" < &processors) { int ierr; @@ -596,6 +600,17 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } }; +CartesianCommunicator::~CartesianCommunicator() +{ + int MPI_is_finalised; + MPI_Finalized(&MPI_is_finalised); + if (communicator && MPI_is_finalised) { + MPI_Comm_free(&communicator); + for(int i=0;i< communicator_halo.size();i++){ + MPI_Comm_free(&communicator_halo[i]); + } + } +} void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index 56f96c20..31f786ac 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -55,8 +55,14 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { CartesianCommunicator::~CartesianCommunicator() { - if (communicator && !MPI::Is_finalized()) + int MPI_is_finalised; + MPI_Finalized(&MPI_is_finalised); + if (communicator && MPI_is_finalised) { MPI_Comm_free(&communicator); + for(int i=0;i< communicator_halo.size();i++){ + MPI_Comm_free(&communicator_halo[i]); + } + } } From c519aab19dcfb559ab21585d9f9221b1fc193a60 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Tue, 7 Nov 2017 13:55:37 +0000 Subject: [PATCH 074/145] Fixing the MPI memory leak in the communicators --- lib/communicator/Communicator_mpi.cc | 2 +- lib/communicator/Communicator_mpi3.cc | 8 ++++++++ lib/communicator/Communicator_mpit.cc | 5 +++-- lib/communicator/Communicator_shmem.cc | 2 ++ 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc index 5593aa8b..1c1ae6c5 100644 --- a/lib/communicator/Communicator_mpi.cc +++ b/lib/communicator/Communicator_mpi.cc @@ -57,7 +57,7 @@ CartesianCommunicator::~CartesianCommunicator() { int MPI_is_finalised; MPI_Finalized(&MPI_is_finalised); - if (communicator && MPI_is_finalised) + if (communicator && !MPI_is_finalised) MPI_Comm_free(&communicator); } diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index 3cac726c..52f65c34 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -596,6 +596,14 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) } } }; +CartesianCommunicator::~CartesianCommunicator() +{ + int MPI_is_finalised; + MPI_Finalized(&MPI_is_finalised); + if (communicator && !MPI_is_finalised) + MPI_Comm_free(&communicator); +} + void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index 56f96c20..1c24433b 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -55,11 +55,12 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { CartesianCommunicator::~CartesianCommunicator() { - if (communicator && !MPI::Is_finalized()) + int MPI_is_finalised; + MPI_Finalized(&MPI_is_finalised); + if (communicator && !MPI_is_finalised) MPI_Comm_free(&communicator); } - void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc index ed49285d..03e3173e 100644 --- a/lib/communicator/Communicator_shmem.cc +++ b/lib/communicator/Communicator_shmem.cc @@ -75,6 +75,8 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { ShmInitGeneric(); } +CartesianCommunicator::~CartesianCommunicator(){} + CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) : CartesianCommunicator(processors) { From 9b8d1cc3da4769f250665cc8e05fb305c794bc5d Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Tue, 7 Nov 2017 14:48:45 +0000 Subject: [PATCH 075/145] Staggered Schur decomposed matrix norm changed to not be the Schur anymore :( Carleton wanted this for multimass / multishift --- lib/algorithms/LinearOperator.h | 12 ++++++++++++ lib/algorithms/iterative/SchurRedBlack.h | 6 +++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 9b0e4942..0fa039c8 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -317,11 +317,23 @@ namespace Grid { } virtual RealD Mpc (const Field &in, Field &out) { Field tmp(in._grid); + Field tmp2(in._grid); + + _Mat.Mooee(in,out); + _Mat.Mooee(out,tmp); + + _Mat.Meooe(in,out); + _Mat.Meooe(out,tmp2); + + return axpy_norm(out,-1.0,tmp2,tmp); +#if 0 + //... much prefer conventional Schur norm _Mat.Meooe(in,tmp); _Mat.MooeeInv(tmp,out); _Mat.Meooe(out,tmp); _Mat.Mooee(in,out); return axpy_norm(out,-1.0,tmp,out); +#endif } virtual RealD MpcDag (const Field &in, Field &out){ return Mpc(in,out); diff --git a/lib/algorithms/iterative/SchurRedBlack.h b/lib/algorithms/iterative/SchurRedBlack.h index a0fd86a6..b9767aa8 100644 --- a/lib/algorithms/iterative/SchurRedBlack.h +++ b/lib/algorithms/iterative/SchurRedBlack.h @@ -90,7 +90,7 @@ namespace Grid { // Take a matrix and form a Red Black solver calling a Herm solver // Use of RB info prevents making SchurRedBlackSolve conform to standard interface /////////////////////////////////////////////////////////////////////////////////////////////////////// - + // Now make the norm reflect extra factor of Mee template class SchurRedBlackStaggeredSolve { private: OperatorFunction & _HermitianRBSolver; @@ -136,8 +136,8 @@ namespace Grid { _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); - src_o = tmp; assert(src_o.checkerboard ==Odd); - // _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source + //src_o = tmp; assert(src_o.checkerboard ==Odd); + _Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm. ////////////////////////////////////////////////////////////// // Call the red-black solver From 1860b1698c4b204a332e2fad4dea86c97ffb0abc Mon Sep 17 00:00:00 2001 From: Azusa Yamaguchi Date: Wed, 8 Nov 2017 09:03:01 +0000 Subject: [PATCH 076/145] Fixed the bag on MPI_T at Cam --- lib/communicator/Communicator_mpit.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc index b2fb71a6..bceea0d8 100644 --- a/lib/communicator/Communicator_mpit.cc +++ b/lib/communicator/Communicator_mpit.cc @@ -57,7 +57,7 @@ CartesianCommunicator::~CartesianCommunicator() { int MPI_is_finalised; MPI_Finalized(&MPI_is_finalised); - if (communicator && !MPI_is_finalised) + if (communicator && !MPI_is_finalised){ MPI_Comm_free(&communicator); for(int i=0;i< communicator_halo.size();i++){ MPI_Comm_free(&communicator_halo[i]); @@ -246,7 +246,7 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector Date: Thu, 9 Nov 2017 19:46:57 +0000 Subject: [PATCH 077/145] Declaring virtual functions as pure virtual functions. --- lib/algorithms/iterative/ImplicitlyRestartedLanczos.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index 7a0760c9..7b85c095 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -168,8 +168,8 @@ void basisDeflate(const std::vector &_v,const std::vector& eval,co template class ImplicitlyRestartedLanczosTester { public: - virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox); - virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox); + virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0; + virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0; }; enum IRLdiagonalisation { From 1f1d77b01a308a7716d429b09bb0ffa01cd7f835 Mon Sep 17 00:00:00 2001 From: Guido Cossu Date: Tue, 14 Nov 2017 10:01:48 +0000 Subject: [PATCH 078/145] Performance metrics for the Scalar Action force term --- lib/qcd/action/scalar/ScalarImpl.h | 4 ++ .../action/scalar/ScalarInteractionAction.h | 62 ++++++++++++++----- lib/qcd/hmc/GenericHMCrunner.h | 2 +- 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/lib/qcd/action/scalar/ScalarImpl.h b/lib/qcd/action/scalar/ScalarImpl.h index 650f4d17..55f5049d 100644 --- a/lib/qcd/action/scalar/ScalarImpl.h +++ b/lib/qcd/action/scalar/ScalarImpl.h @@ -168,7 +168,11 @@ class ScalarImplTypes { static inline void update_field(Field &P, Field &U, double ep) { #ifndef USE_FFT_ACCELERATION + double t0=usecond(); U += P * ep; + double t1=usecond(); + double total_time = (t1-t0)/1e6; + std::cout << GridLogIntegrator << "Total time for updating field (s) : " << total_time << std::endl; #else // FFT transform P(x) -> P(p) // divide by (M^2+p^2) M external parameter (how to pass?) diff --git a/lib/qcd/action/scalar/ScalarInteractionAction.h b/lib/qcd/action/scalar/ScalarInteractionAction.h index 3848751d..8738b647 100644 --- a/lib/qcd/action/scalar/ScalarInteractionAction.h +++ b/lib/qcd/action/scalar/ScalarInteractionAction.h @@ -44,18 +44,18 @@ public: INHERIT_FIELD_TYPES(Impl); private: - RealD mass_square; - RealD lambda; - RealD g; - const unsigned int N = Impl::Group::Dimension; + RealD mass_square; + RealD lambda; + RealD g; + const unsigned int N = Impl::Group::Dimension; typedef typename Field::vector_object vobj; typedef CartesianStencil Stencil; SimpleCompressor compressor; int npoint = 2 * Ndim; - std::vector directions; // = {0,1,2,3,0,1,2,3}; // forcing 4 dimensions - std::vector displacements; // = {1,1,1,1, -1,-1,-1,-1}; + std::vector directions; // + std::vector displacements; // public: ScalarInteractionAction(RealD ms, RealD l, RealD gval) : mass_square(ms), lambda(l), g(gval), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) @@ -124,39 +124,55 @@ public: } // NB the trace in the algebra is normalised to 1/2 // minus sign coming from the antihermitian fields - return -(TensorRemove(sum(trace(action)))).real()*N/g; + return -(TensorRemove(sum(trace(action)))).real() * N / g; }; virtual void deriv(const Field &p, Field &force) { + double t0 = usecond(); assert(p._grid->Nd() == Ndim); force = (2. * Ndim + mass_square) * p - 2. * lambda * p * p * p; + double interm_t = usecond(); + // move this outside static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); - phiStencil.HaloExchange(p, compressor); + phiStencil.HaloExchange(p, compressor); + double halo_t = usecond(); + int chunk = 128; //for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1); + + // inverting the order of the loops slows down the code(! g++ 7) + // cannot try to reduce the number of force writes by factor npoint... + // use cache blocking for (int point = 0; point < npoint; point++) { - parallel_for(int i = 0; i < p._grid->oSites(); i++) - { - const vobj *temp; - vobj temp2; + +#pragma omp parallel +{ int permute_type; StencilEntry *SE; + const vobj *temp; + +#pragma omp for schedule(static, chunk) + for (int i = 0; i < p._grid->oSites(); i++) + { SE = phiStencil.GetEntry(permute_type, point, i); + // prefetch next p? if (SE->_is_local) { temp = &p._odata[SE->_offset]; + if (SE->_permute) { + vobj temp2; permute(temp2, *temp, permute_type); force._odata[i] -= temp2; } else { - force._odata[i] -= *temp; + force._odata[i] -= *temp; // slow part. Dominated by this read/write (BW) } } else @@ -164,9 +180,27 @@ public: force._odata[i] -= phiStencil.CommBuf()[SE->_offset]; } } + } - force *= N/g; } + force *= N / g; + + double t1 = usecond(); + double total_time = (t1 - t0) / 1e6; + double interm_time = (interm_t - t0) / 1e6; + double halo_time = (halo_t - interm_t) / 1e6; + double stencil_time = (t1 - halo_t) / 1e6; + std::cout << GridLogIntegrator << "Total time for force computation (s) : " << total_time << std::endl; + std::cout << GridLogIntegrator << "Intermediate time for force computation (s): " << interm_time << std::endl; + std::cout << GridLogIntegrator << "Halo time in force computation (s) : " << halo_time << std::endl; + std::cout << GridLogIntegrator << "Stencil time in force computation (s) : " << stencil_time << std::endl; + double flops = p._grid->gSites() * (14 * N * N * N + 18 * N * N + 2); + double flops_no_stencil = p._grid->gSites() * (14 * N * N * N + 6 * N * N + 2); + double Gflops = flops / (total_time * 1e9); + double Gflops_no_stencil = flops_no_stencil / (interm_time * 1e9); + std::cout << GridLogIntegrator << "Flops: " << flops << " - Gflop/s : " << Gflops << std::endl; + std::cout << GridLogIntegrator << "Flops NS: " << flops_no_stencil << " - Gflop/s NS: " << Gflops_no_stencil << std::endl; +} }; } // namespace Grid diff --git a/lib/qcd/hmc/GenericHMCrunner.h b/lib/qcd/hmc/GenericHMCrunner.h index 4f6c1af0..26fec3d5 100644 --- a/lib/qcd/hmc/GenericHMCrunner.h +++ b/lib/qcd/hmc/GenericHMCrunner.h @@ -211,7 +211,7 @@ typedef HMCWrapperTemplate ScalarAdjGenericHMCRunner; template -using ScalarNxNAdjGenericHMCRunner = HMCWrapperTemplate < ScalarNxNAdjImplR, MinimumNorm2, ScalarNxNMatrixFields >; +using ScalarNxNAdjGenericHMCRunner = HMCWrapperTemplate < ScalarNxNAdjImplR, ForceGradient, ScalarNxNMatrixFields >; } // namespace QCD } // namespace Grid From 94b8fb56862289c0663453c0e2d82fa6da310f38 Mon Sep 17 00:00:00 2001 From: paboyle Date: Sun, 19 Nov 2017 01:39:04 +0000 Subject: [PATCH 079/145] Debug in progress --- lib/communicator/Communicator_base.cc | 47 ++++++++++++------- lib/communicator/Communicator_mpi3.cc | 2 +- lib/lattice/Lattice_transfer.h | 67 +++++++++++++++++++++++++++ tests/solver/Test_dwf_mrhs_cg_mpi.cc | 6 +-- 4 files changed, 102 insertions(+), 20 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index 531dd358..223b07fd 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -134,8 +134,18 @@ void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) { _ndimension = processors.size(); - assert(_ndimension = parent._ndimension); - + + int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension); + std::vector parent_processor_coor(_ndimension,0); + std::vector parent_processors (_ndimension,1); + + // Can make 5d grid from 4d etc... + int pad = _ndimension-parent_ndimension; + for(int d=0;d &processors, std::vector ssize(_ndimension); // coor of split within parent for(int d=0;d<_ndimension;d++){ - ccoor[d] = parent._processor_coor[d] % processors[d]; - scoor[d] = parent._processor_coor[d] / processors[d]; - ssize[d] = parent._processors[d] / processors[d]; + ccoor[d] = parent_processor_coor[d] % processors[d]; + scoor[d] = parent_processor_coor[d] / processors[d]; + ssize[d] = parent_processors[d] / processors[d]; } int crank; // rank within subcomm ; srank is rank of subcomm within blocks of subcomms // Mpi uses the reverse Lexico convention to us @@ -166,38 +176,34 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors, MPI_Comm comm_split; if ( Nchild > 1 ) { - /* std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, // Set up from the new split communicator ////////////////////////////////////////////////////////////////////////////////////////////////////// InitFromMPICommunicator(processors,comm_split); + + std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl; + for(int d=0;d &out, const Lattice &in){ // NB: Easiest to programme if keep in lex order. // ///////////////////////////////////////////////////////// +/* +[0,0,0,0,0] S {V<4>{V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)}}} +[0,0,0,0,1] S {V<4>{V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)}}} +[0,0,0,0,2] S {V<4>{V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)}}} +[0,0,0,0,3] S {V<4>{V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)}}} +[0,0,0,0,4] S {V<4>{V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)}}} +[0,0,0,0,5] S {V<4>{V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)}}} +[0,0,0,0,6] S {V<4>{V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)}}} +[0,0,0,0,7] S {V<4>{V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)}}} +[0,0,0,0,8] S {V<4>{V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)}}} +[0,0,0,0,9] S {V<4>{V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)}}} +[0,0,0,0,10] S {V<4>{V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)}}} +[0,0,0,0,11] S {V<4>{V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)}}} +[0,0,0,0,12] S {V<4>{V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)}}} +[0,0,0,0,13] S {V<4>{V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)}}} +[0,0,0,0,14] S {V<4>{V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)}}} +[0,0,0,0,15] S {V<4>{V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)}}} + + +Process decomp +[A(0 1) A(2 3) B(0 1) B(2 3)] [ A(4 5) A(6 7) B(4 5) B(6 7)] [ A(8 9) A(10 11) B(8 9) B(10 11)] [A(12 13) A(14 15) B(12 13) B(14 15)] + +A2A(Full) + -- divides M*fL into fP segments of size M*fL/fP = fL/sP + -- total is fP * fL/sP = M * fL + A(0 1) A(4 5) A(8 9) A(12 13) + A(2 3) A(6 7) A(10 11) A(14 15) + B(0 1) B(4 5) B(8 9) B(12 13) + B(2 3) B(6 7) B(10 11) B(14 15) + + +A2A(Split) + A(0 1) A(4 5) A(2 3) A(6 7) + A(8 9) A(12 13) A(10 11) A(14 15) + B(0 1) B(2 3) B(4 5) B(6 7) + B(8 9) B(10 11) B(12 13) B(14 15) + +-------------------- +-- General case +-------------------- +G global lattice +fP - procs +sP - Procs in split grid +M - subdivisions/vectors - M*sP = fP ** constraint 1 +fL = G/fP per node (full) +sL = G/sP per node split + +[ G * M ] total = G*fP/sP. +[ Subdivide fL*M by fP => fL *M / fP = fL/fP *fP/sP = fL/sP ] +-------------------- +-- 1st A2A chunk is fL*M/fP = G/fP *fP/sP /fP = fL/sP +-- Let cL = fL/sP chunk. ( Divide into fP/sP = M chunks ) + +-- node 0 1st cL of node 0,1,... fP-1 ; vector 0 +-- node 1 2nd cL of node 0,1,... fP-1 +-- node 2 3nd cL of node 0,1,... fP-1 +-- node 3 4th cL of node 0,1,... fP-1 +... when node > sP get vector 1 etc... + +-- 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M) +-- node 0 1st cL of node 0M..(1M-1); 2nd cL of node 0M..(1M-1)).. +-- node 1 1st cL of node 1M..(2M-1); 2nd cL of node 1M..(2M-1).. +-- node 2 1st cL of node 2M..(3M-1); 2nd cL of node 2M..(3M-1).. +-- node 3 1st cL of node 3M..(3M-1); 2nd cL of node 2M..(3M-1).. +-- +-- Insert correctly + */ template void Grid_split(std::vector > & full,Lattice & split) { diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index f640edff..d380f91e 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -95,7 +95,7 @@ int main (int argc, char ** argv) FermionField tmp(FGrid); for(int s=0;s HermOp(Ddwf); MdagMLinearOperator HermOpCk(Dchk); - ConjugateGradient CG((1.0e-5),10000); + ConjugateGradient CG((1.0e-2),10000); s_res = zero; CG(HermOp,s_src,s_res); From f403ab01336b6ec2cdc4260f698a0f5001bb0a3d Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 22 Nov 2017 17:13:09 +0000 Subject: [PATCH 080/145] gitignore update --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d743ee06..399f2f6b 100644 --- a/.gitignore +++ b/.gitignore @@ -93,6 +93,7 @@ build*/* *.xcodeproj/* build.sh .vscode +*.code-workspace # Eigen source # ################ @@ -122,4 +123,3 @@ make-bin-BUCK.sh ##################### lib/qcd/spin/gamma-gen/*.h lib/qcd/spin/gamma-gen/*.cc - From a3fe874a5ba649fecec0a273e4f5a0dd52995a03 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 22 Nov 2017 23:27:19 +0000 Subject: [PATCH 081/145] Hadrons: everything is broken, repairing while implementing the new memory model --- extras/Hadrons/Application.cc | 14 +++ extras/Hadrons/Application.hpp | 2 + extras/Hadrons/Environment.cc | 106 ++++-------------- extras/Hadrons/Environment.hpp | 118 ++++++-------------- extras/Hadrons/Module.hpp | 6 + extras/Hadrons/Modules.hpp | 52 ++++----- extras/Hadrons/Modules/MAction/DWF.hpp | 25 ++--- extras/Hadrons/Modules/MAction/Wilson.hpp | 30 ++--- extras/Hadrons/Modules/MGauge/Unit.cc | 4 +- extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 27 ++--- extras/Hadrons/modules.inc | 74 ++++++------ 11 files changed, 174 insertions(+), 284 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 90ebcfd7..a94b617c 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -316,3 +316,17 @@ void Application::configLoop(void) LOG(Message) << BIG_SEP << " End of measurement " << BIG_SEP << std::endl; env().freeAll(); } + +// memory profile ////////////////////////////////////////////////////////////// +void Application::memoryProfile(void) +{ + auto graph = env().makeModuleGraph(); + auto program = graph.topoSort(); + bool msg; + + msg = HadronsLogMessage.isActive(); + HadronsLogMessage.Active(false); + + HadronsLogMessage.Active(msg); +} + diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index fce9b6eb..8b11b0c7 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -101,6 +101,8 @@ public: private: // environment shortcut Environment & env(void) const; + // memory profile + void memoryProfile(void); private: long unsigned int locVol_; std::string parameterFileName_{""}; diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 0e7a4326..eb0a6f70 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -67,6 +67,16 @@ bool Environment::isDryRun(void) const return dryRun_; } +void Environment::memoryProfile(const bool doMemoryProfile) +{ + memoryProfile_ = doMemoryProfile; +} + +bool Environment::doMemoryProfile(void) const +{ + return memoryProfile_; +} + // trajectory number /////////////////////////////////////////////////////////// void Environment::setTrajectory(const unsigned int traj) { @@ -349,10 +359,10 @@ Environment::executeProgram(const std::vector &p) auto it = std::find_if(p.rbegin(), p.rend(), pred); if (it != p.rend()) { - freeProg[p.rend() - it - 1].insert(i); + freeProg[std::distance(p.rend(), it) - 1].insert(i); } } - + // program execution for (unsigned int i = 0; i < p.size(); ++i) { @@ -448,6 +458,7 @@ void Environment::addObject(const std::string name, const int moduleAddress) info.name = name; info.module = moduleAddress; + info.data = nullptr; object_.push_back(std::move(info)); objectAddress_[name] = static_cast(object_.size() - 1); } @@ -457,39 +468,6 @@ void Environment::addObject(const std::string name, const int moduleAddress) } } -void Environment::registerObject(const unsigned int address, - const unsigned int size, const unsigned int Ls) -{ - if (!hasRegisteredObject(address)) - { - if (hasObject(address)) - { - object_[address].size = size; - object_[address].Ls = Ls; - object_[address].isRegistered = true; - } - else - { - HADRON_ERROR("no object with address " + std::to_string(address)); - } - } - else - { - HADRON_ERROR("object with address " + std::to_string(address) - + " already registered"); - } -} - -void Environment::registerObject(const std::string name, - const unsigned int size, const unsigned int Ls) -{ - if (!hasObject(name)) - { - addObject(name); - } - registerObject(getObjectAddress(name), size, Ls); -} - unsigned int Environment::getObjectAddress(const std::string name) const { if (hasObject(name)) @@ -516,7 +494,7 @@ std::string Environment::getObjectName(const unsigned int address) const std::string Environment::getObjectType(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { if (object_[address].type) { @@ -527,11 +505,6 @@ std::string Environment::getObjectType(const unsigned int address) const return ""; } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { HADRON_ERROR("no object with address " + std::to_string(address)); @@ -545,15 +518,10 @@ std::string Environment::getObjectType(const std::string name) const Environment::Size Environment::getObjectSize(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { return object_[address].size; } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { HADRON_ERROR("no object with address " + std::to_string(address)); @@ -584,15 +552,10 @@ unsigned int Environment::getObjectModule(const std::string name) const unsigned int Environment::getObjectLs(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { return object_[address].Ls; } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { HADRON_ERROR("no object with address " + std::to_string(address)); @@ -616,30 +579,6 @@ bool Environment::hasObject(const std::string name) const return ((it != objectAddress_.end()) and hasObject(it->second)); } -bool Environment::hasRegisteredObject(const unsigned int address) const -{ - if (hasObject(address)) - { - return object_[address].isRegistered; - } - else - { - return false; - } -} - -bool Environment::hasRegisteredObject(const std::string name) const -{ - if (hasObject(name)) - { - return hasRegisteredObject(getObjectAddress(name)); - } - else - { - return false; - } -} - bool Environment::hasCreatedObject(const unsigned int address) const { if (hasObject(address)) @@ -680,10 +619,7 @@ Environment::Size Environment::getTotalSize(void) const for (auto &o: object_) { - if (o.isRegistered) - { - size += o.size; - } + size += o.size; } return size; @@ -738,7 +674,7 @@ bool Environment::freeObject(const unsigned int address) { if (!hasOwners(address)) { - if (!isDryRun() and object_[address].isRegistered) + if (!isDryRun()) { LOG(Message) << "Destroying object '" << object_[address].name << "'" << std::endl; @@ -747,10 +683,8 @@ bool Environment::freeObject(const unsigned int address) { object_[p].owners.erase(address); } - object_[address].size = 0; - object_[address].Ls = 0; - object_[address].isRegistered = false; - object_[address].type = nullptr; + object_[address].size = 0; + object_[address].type = nullptr; object_[address].owners.clear(); object_[address].properties.clear(); object_[address].data.reset(nullptr); diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 13264bd5..b426fb27 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -83,12 +83,12 @@ private: std::string name; ModPt data{nullptr}; std::vector input; + size_t maxAllocated; }; struct ObjInfo { Size size{0}; unsigned int Ls{0}; - bool isRegistered{false}; const std::type_info *type{nullptr}; std::string name; int module{-1}; @@ -99,6 +99,8 @@ public: // dry run void dryRun(const bool isDry); bool isDryRun(void) const; + void memoryProfile(const bool doMemoryProfile); + bool doMemoryProfile(void) const; // trajectory number void setTrajectory(const unsigned int traj); unsigned int getTrajectory(void) const; @@ -143,32 +145,17 @@ public: // general memory management void addObject(const std::string name, const int moduleAddress = -1); - void registerObject(const unsigned int address, - const unsigned int size, - const unsigned int Ls = 1); - void registerObject(const std::string name, - const unsigned int size, - const unsigned int Ls = 1); + template + void createObject(const std::string name, + const unsigned int Ls, + Ts ... args); template - unsigned int lattice4dSize(void) const; - template - void registerLattice(const unsigned int address, - const unsigned int Ls = 1); - template - void registerLattice(const std::string name, - const unsigned int Ls = 1); - template - void setObject(const unsigned int address, T *object); - template - void setObject(const std::string name, T *object); + void createLattice(const std::string name, + const unsigned int Ls = 1); template T * getObject(const unsigned int address) const; template T * getObject(const std::string name) const; - template - T * createLattice(const unsigned int address); - template - T * createLattice(const std::string name); unsigned int getObjectAddress(const std::string name) const; std::string getObjectName(const unsigned int address) const; std::string getObjectType(const unsigned int address) const; @@ -181,8 +168,6 @@ public: unsigned int getObjectLs(const std::string name) const; bool hasObject(const unsigned int address) const; bool hasObject(const std::string name) const; - bool hasRegisteredObject(const unsigned int address) const; - bool hasRegisteredObject(const std::string name) const; bool hasCreatedObject(const unsigned int address) const; bool hasCreatedObject(const std::string name) const; bool isObject5d(const unsigned int address) const; @@ -204,7 +189,7 @@ public: void printContent(void); private: // general - bool dryRun_{false}; + bool dryRun_{false}, memoryProfile_{false}; unsigned int traj_, locVol_; // grids std::vector dim_; @@ -296,56 +281,45 @@ M * Environment::getModule(const std::string name) const return getModule(getModuleAddress(name)); } -template -unsigned int Environment::lattice4dSize(void) const +template +void Environment::createObject(const std::string name, const unsigned int Ls, + Ts ... args) { - return sizeof(typename T::vector_object)/getGrid()->Nsimd(); -} - -template -void Environment::registerLattice(const unsigned int address, - const unsigned int Ls) -{ - createGrid(Ls); - registerObject(address, Ls*lattice4dSize(), Ls); -} - -template -void Environment::registerLattice(const std::string name, const unsigned int Ls) -{ - createGrid(Ls); - registerObject(name, Ls*lattice4dSize(), Ls); -} - -template -void Environment::setObject(const unsigned int address, T *object) -{ - if (hasRegisteredObject(address)) + if (!hasObject(name)) { - object_[address].data.reset(new Holder(object)); - object_[address].type = &typeid(T); + addObject(name); } - else if (hasObject(address)) + + unsigned int address = getObjectAddress(name); + + if (!object_[address].data) { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); + MemoryStats memStats; + + MemoryProfiler::stats = &memStats; + object_[address].Ls = Ls; + object_[address].data.reset(new Holder(new T(args...))); + object_[address].size = memStats.totalAllocated; + object_[address].type = &typeid(T); } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + HADRON_ERROR("object '" + name + "' already allocated"); } } template -void Environment::setObject(const std::string name, T *object) +void Environment::createLattice(const std::string name, const unsigned int Ls) { - setObject(getObjectAddress(name), object); + GridCartesian *g = getGrid(Ls); + + createObject(name, Ls, g); } template T * Environment::getObject(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { if (auto h = dynamic_cast *>(object_[address].data.get())) { @@ -358,11 +332,6 @@ T * Environment::getObject(const unsigned int address) const "' (has type '" + getObjectType(address) + "')"); } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); - } else { HADRON_ERROR("no object with address " + std::to_string(address)); @@ -375,26 +344,10 @@ T * Environment::getObject(const std::string name) const return getObject(getObjectAddress(name)); } -template -T * Environment::createLattice(const unsigned int address) -{ - GridCartesian *g = getGrid(getObjectLs(address)); - - setObject(address, new T(g)); - - return getObject(address); -} - -template -T * Environment::createLattice(const std::string name) -{ - return createLattice(getObjectAddress(name)); -} - template bool Environment::isObjectOfType(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { if (auto h = dynamic_cast *>(object_[address].data.get())) { @@ -405,11 +358,6 @@ bool Environment::isObjectOfType(const unsigned int address) const return false; } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); - } else { HADRON_ERROR("no object with address " + std::to_string(address)); diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 071e254a..5500bf36 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -88,6 +88,12 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define ARG(...) __VA_ARGS__ +#define mCreateObj(type, name, Ls, ...)\ +env().template createObject(name, Ls, __VA_ARGS__) + +#define mGetObj(type, name)\ +*env().template getObject(name) + /****************************************************************************** * Module class * ******************************************************************************/ diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index e1f06f32..08678671 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -30,31 +30,31 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include +// #include #include -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include #include -#include -#include -#include -#include -#include -#include -#include +// #include +// #include +// #include +// #include +// #include +// #include +// #include diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 78e0916c..a2ed063b 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -102,37 +102,30 @@ std::vector TDWF::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// template void TDWF::setup(void) -{ - unsigned int size; - - size = 2*env().template lattice4dSize(); - env().registerObject(getName(), size, par().Ls); -} - -// execution /////////////////////////////////////////////////////////////////// -template -void TDWF::execute(void) { LOG(Message) << "Setting up domain wall fermion matrix with m= " << par().mass << ", M5= " << par().M5 << " and Ls= " << par().Ls << " using gauge field '" << par().gauge << "'" << std::endl; - LOG(Message) << "Fermion boundary conditions: " << par().boundary + LOG(Message) << "Fermion boundary conditions: " << par().boundary << std::endl; env().createGrid(par().Ls); - auto &U = *env().template getObject(par().gauge); + auto &U = mGetObj(LatticeGaugeField, par().gauge); auto &g4 = *env().getGrid(); auto &grb4 = *env().getRbGrid(); auto &g5 = *env().getGrid(par().Ls); auto &grb5 = *env().getRbGrid(par().Ls); std::vector boundary = strToVec(par().boundary); typename DomainWallFermion::ImplParams implParams(boundary); - FMat *fMatPt = new DomainWallFermion(U, g5, grb5, g4, grb4, - par().mass, par().M5, - implParams); - env().setObject(getName(), fMatPt); + mCreateObj(DomainWallFermion, getName(), par().Ls, + U, g5, grb5, g4, grb4, par().mass, par().M5, implParams); } +// execution /////////////////////////////////////////////////////////////////// +template +void TDWF::execute(void) +{} + END_MODULE_NAMESPACE END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index aab54245..bc892daf 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -101,29 +101,23 @@ std::vector TWilson::getOutput(void) template void TWilson::setup(void) { - unsigned int size; - - size = 2*env().template lattice4dSize(); - env().registerObject(getName(), size); + LOG(Message) << "Setting up TWilson fermion matrix with m= " << par().mass + << " using gauge field '" << par().gauge << "'" << std::endl; + LOG(Message) << "Fermion boundary conditions: " << par().boundary + << std::endl; + auto &U = mGetObj(LatticeGaugeField, par().gauge); + auto &grid = *env().getGrid(); + auto &gridRb = *env().getRbGrid(); + std::vector boundary = strToVec(par().boundary); + typename WilsonFermion::ImplParams implParams(boundary); + mCreateObj(WilsonFermion, getName(), 1, U, grid, gridRb, par().mass, + implParams); } // execution /////////////////////////////////////////////////////////////////// template void TWilson::execute() -{ - LOG(Message) << "Setting up TWilson fermion matrix with m= " << par().mass - << " using gauge field '" << par().gauge << "'" << std::endl; - LOG(Message) << "Fermion boundary conditions: " << par().boundary - << std::endl; - auto &U = *env().template getObject(par().gauge); - auto &grid = *env().getGrid(); - auto &gridRb = *env().getRbGrid(); - std::vector boundary = strToVec(par().boundary); - typename WilsonFermion::ImplParams implParams(boundary); - FMat *fMatPt = new WilsonFermion(U, grid, gridRb, par().mass, - implParams); - env().setObject(getName(), fMatPt); -} +{} END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index 18d75c59..b259b7d5 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -57,13 +57,13 @@ std::vector TUnit::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TUnit::setup(void) { - env().registerLattice(getName()); + mCreateObj(LatticeGaugeField, getName(), 1, env().getGrid()); } // execution /////////////////////////////////////////////////////////////////// void TUnit::execute(void) { LOG(Message) << "Creating unit gauge configuration" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + auto &U = mGetObj(LatticeGaugeField, getName()); SU3::ColdConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index b1f63a5d..fe6992fc 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -100,17 +100,12 @@ std::vector TRBPrecCG::getOutput(void) template void TRBPrecCG::setup(void) { - auto Ls = env().getObjectLs(par().action); - - env().registerObject(getName(), 0, Ls); - env().addOwnership(getName(), par().action); -} + LOG(Message) << "setting up Schur red-black preconditioned CG for" + << " action '" << par().action << "' with residual " + << par().residual << std::endl; -// execution /////////////////////////////////////////////////////////////////// -template -void TRBPrecCG::execute(void) -{ - auto &mat = *(env().template getObject(par().action)); + auto Ls = env().getObjectLs(par().action); + auto &mat = mGetObj(FMat, par().action); auto solver = [&mat, this](FermionField &sol, const FermionField &source) { ConjugateGradient cg(par().residual, 10000); @@ -118,13 +113,15 @@ void TRBPrecCG::execute(void) schurSolver(mat, source, sol); }; - - LOG(Message) << "setting up Schur red-black preconditioned CG for" - << " action '" << par().action << "' with residual " - << par().residual << std::endl; - env().setObject(getName(), new SolverFn(solver)); + mCreateObj(SolverFn, getName(), Ls, solver); + env().addOwnership(getName(), par().action); } +// execution /////////////////////////////////////////////////////////////////// +template +void TRBPrecCG::execute(void) +{} + END_MODULE_NAMESPACE END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index fbbb2eb9..63745baf 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -1,42 +1,44 @@ modules_cc =\ - Modules/MContraction/WeakHamiltonianEye.cc \ - Modules/MContraction/WeakHamiltonianNonEye.cc \ - Modules/MContraction/WeakNeutral4ptDisc.cc \ - Modules/MGauge/Load.cc \ - Modules/MGauge/Random.cc \ - Modules/MGauge/StochEm.cc \ - Modules/MGauge/Unit.cc \ - Modules/MScalar/ChargedProp.cc \ - Modules/MScalar/FreeProp.cc + Modules/MGauge/Unit.cc + # Modules/MContraction/WeakHamiltonianEye.cc \ + # Modules/MContraction/WeakHamiltonianNonEye.cc \ + # Modules/MContraction/WeakNeutral4ptDisc.cc \ + # Modules/MGauge/Load.cc \ + # Modules/MGauge/Random.cc \ + # Modules/MGauge/StochEm.cc \ + # Modules/MScalar/ChargedProp.cc \ + # Modules/MScalar/FreeProp.cc modules_hpp =\ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ - Modules/MContraction/Baryon.hpp \ - Modules/MContraction/DiscLoop.hpp \ - Modules/MContraction/Gamma3pt.hpp \ - Modules/MContraction/Meson.hpp \ - Modules/MContraction/WardIdentity.hpp \ - Modules/MContraction/WeakHamiltonian.hpp \ - Modules/MContraction/WeakHamiltonianEye.hpp \ - Modules/MContraction/WeakHamiltonianNonEye.hpp \ - Modules/MContraction/WeakNeutral4ptDisc.hpp \ - Modules/MFermion/GaugeProp.hpp \ - Modules/MGauge/Load.hpp \ - Modules/MGauge/Random.hpp \ - Modules/MGauge/StochEm.hpp \ Modules/MGauge/Unit.hpp \ - Modules/MLoop/NoiseLoop.hpp \ - Modules/MScalar/ChargedProp.hpp \ - Modules/MScalar/FreeProp.hpp \ - Modules/MScalar/Scalar.hpp \ - Modules/MSink/Point.hpp \ - Modules/MSink/Smear.hpp \ - Modules/MSolver/RBPrecCG.hpp \ - Modules/MSource/Point.hpp \ - Modules/MSource/SeqConserved.hpp \ - Modules/MSource/SeqGamma.hpp \ - Modules/MSource/Wall.hpp \ - Modules/MSource/Z2.hpp \ - Modules/MUtilities/TestSeqConserved.hpp \ - Modules/MUtilities/TestSeqGamma.hpp + Modules/MSolver/RBPrecCG.hpp + + # Modules/MContraction/Baryon.hpp \ + # Modules/MContraction/DiscLoop.hpp \ + # Modules/MContraction/Gamma3pt.hpp \ + # Modules/MContraction/Meson.hpp \ + # Modules/MContraction/WardIdentity.hpp \ + # Modules/MContraction/WeakHamiltonian.hpp \ + # Modules/MContraction/WeakHamiltonianEye.hpp \ + # Modules/MContraction/WeakHamiltonianNonEye.hpp \ + # Modules/MContraction/WeakNeutral4ptDisc.hpp \ + # Modules/MFermion/GaugeProp.hpp \ + # Modules/MGauge/Load.hpp \ + # Modules/MGauge/Random.hpp \ + # Modules/MGauge/StochEm.hpp \ + # Modules/MLoop/NoiseLoop.hpp \ + # Modules/MScalar/ChargedProp.hpp \ + # Modules/MScalar/FreeProp.hpp \ + # Modules/MScalar/Scalar.hpp \ + # Modules/MSink/Point.hpp \ + # Modules/MSink/Smear.hpp \ + # Modules/MSolver/RBPrecCG.hpp \ + # Modules/MSource/Point.hpp \ + # Modules/MSource/SeqConserved.hpp \ + # Modules/MSource/SeqGamma.hpp \ + # Modules/MSource/Wall.hpp \ + # Modules/MSource/Z2.hpp \ + # Modules/MUtilities/TestSeqConserved.hpp \ + # Modules/MUtilities/TestSeqGamma.hpp From e55397bc134ead26dbac8e2ef244406a8b9d6a3b Mon Sep 17 00:00:00 2001 From: azusayamaguchi Date: Fri, 24 Nov 2017 14:18:30 +0000 Subject: [PATCH 082/145] Staggerd cg --- lib/algorithms/LinearOperator.h | 20 +++++++++++--------- lib/algorithms/iterative/SchurRedBlack.h | 9 +++++++-- lib/lattice/Lattice_transfer.h | 16 ++++++---------- tests/solver/Test_staggered_cg_schur.cc | 14 ++++++++++++++ 4 files changed, 38 insertions(+), 21 deletions(-) diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 0fa039c8..26746e6e 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -308,32 +308,34 @@ namespace Grid { public: SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + GridLogIterative.TimingMode(1); + std::cout << GridLogIterative << " HermOpAndNorm "< inline void pickCheckerboard(int cb,Lattice &half,const Lattice &full){ half.checkerboard = cb; - int ssh=0; - //parallel_for - for(int ss=0;ssoSites();ss++){ - std::vector coor; + + parallel_for(int ss=0;ssoSites();ss++){ int cbos; - + std::vector coor; full._grid->oCoorFromOindex(coor,ss); cbos=half._grid->CheckerBoard(coor); if (cbos==cb) { + int ssh=half._grid->oIndex(coor); half._odata[ssh] = full._odata[ss]; - ssh++; } } } template inline void setCheckerboard(Lattice &full,const Lattice &half){ int cb = half.checkerboard; - int ssh=0; - //parallel_for - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ std::vector coor; int cbos; @@ -77,8 +73,8 @@ inline void subdivides(GridBase *coarse,GridBase *fine) cbos=half._grid->CheckerBoard(coor); if (cbos==cb) { + int ssh=half._grid->oIndex(coor); full._odata[ss]=half._odata[ssh]; - ssh++; } } } diff --git a/tests/solver/Test_staggered_cg_schur.cc b/tests/solver/Test_staggered_cg_schur.cc index 09044995..a5c25b85 100644 --- a/tests/solver/Test_staggered_cg_schur.cc +++ b/tests/solver/Test_staggered_cg_schur.cc @@ -70,7 +70,21 @@ int main (int argc, char ** argv) ConjugateGradient CG(1.0e-8,10000); SchurRedBlackStaggeredSolve SchurSolver(CG); + double volume=1.0; + for(int mu=0;mu volume * 1146 + double ncall=CG.IterationsToComplete; + double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146 + + std::cout< &processors, ////////////////////////////////////////////////////////////////////////////////////////////////////// InitFromMPICommunicator(processors,comm_split); - std::cout << " ndim " <<_ndimension<<" " << parent._ndimension << std::endl; - for(int d=0;d &proc MPI_Comm_rank(communicator,&_processor); MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); - if ( communicator_base != communicator_world ) { + if ( 0 && (communicator_base != communicator_world) ) { std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"< Date: Mon, 27 Nov 2017 12:33:08 +0000 Subject: [PATCH 084/145] Believe split/unsplit works, but need to make pretty --- lib/lattice/Lattice_transfer.h | 201 ++++++++++++++++++++++----------- 1 file changed, 133 insertions(+), 68 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index dd03fb4f..3d9289d6 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -890,50 +890,85 @@ void Grid_split(std::vector > & full,Lattice & split) if ( ratio[d] != 1 ) { full_grid ->AllToAll(d,alldata,tmpdata); - // std::cout << GridLogMessage << "Grid_split: dim " <_processors[d]<_processors[d] > 1 ) { + alldata=tmpdata; + split_grid->AllToAll(d,alldata,tmpdata); + } - auto rdims = ldims; rdims[d] *= ratio[d]; - auto rsites= lsites*ratio[d]; - for(int v=0;v_processors[d]; + int fP = full_grid->_processors[d]; - for(int r=0;r_processors[d] > 1 ) { - tmpdata = alldata; - split_grid->AllToAll(d,tmpdata,alldata); - } } } vectorizeFromLexOrdArray(alldata,split); @@ -1008,55 +1043,84 @@ void Grid_unsplit(std::vector > & full,Lattice & split) std::vector rcoor(ndim); int nvec = 1; - lsites = split_grid->lSites(); - std::vector ldims = split_grid->_ldimensions; + uint64_t rsites = split_grid->lSites(); + std::vector rdims = split_grid->_ldimensions; - // for(int d=ndim-1;d>=0;d--){ for(int d=0;d_processors[d]; + int fP = full_grid->_processors[d]; - if ( split_grid->_processors[d] > 1 ) { - tmpdata = alldata; - split_grid->AllToAll(d,tmpdata,alldata); - } + int M = ratio[d]; + auto ldims = rdims; ldims[d] /= M; // Decrease local dims by same factor + auto lsites= rsites/M; // Decreases rsites by M - ////////////////////////////////////////// - //Local volume for this dimension is expanded by ratio of processor extents - // Number of vectors is decreased by same factor - // Rearrange to lexico for bigger volume - ////////////////////////////////////////// - auto rsites= lsites/ratio[d]; - auto rdims = ldims; rdims[d]/=ratio[d]; + int fvol = lsites; + int svol = rsites; + int chunk = (nvec*fvol)/sP; + int cL = (nvec*ldims[d])/sP; + + for(int c=0;c= tmpdata.size() ) { - // rsite, rcoor --> smaller local volume - // lsite, lcoor --> bigger original (single node?) volume - // For loop over each site within smaller subvol - for(int rsite=0;rsiteAllToAll(d,tmpdata,alldata); + if ( split_grid->_processors[d] > 1 ) { + split_grid->AllToAll(d,tmpdata,alldata); + tmpdata=alldata; + } + full_grid ->AllToAll(d,tmpdata,alldata); + + rdims[d]/= M; + rsites /= M; + nvec *= M; // Increase nvec by subdivision factor + } } } @@ -1064,12 +1128,13 @@ void Grid_unsplit(std::vector > & full,Lattice & split) for(int v=0;v Date: Mon, 27 Nov 2017 12:34:25 +0000 Subject: [PATCH 085/145] Clean on multinode target after split 1 1 2 4 -> 1 1 2 2 --- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index d380f91e..b3611e01 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -121,12 +121,12 @@ int main (int argc, char ** argv) random(pRNG5,src[s]); tmp = 100.0*s; src[s] = (src[s] * 0.1) + tmp; - std::cout << " src ["< Date: Mon, 27 Nov 2017 15:10:22 +0000 Subject: [PATCH 086/145] Debug --- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index b3611e01..06df58c6 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -173,6 +173,7 @@ int main (int argc, char ** argv) // std::cout << " s_src \n" << s_src << std::endl; // std::cout << " s_src_tmp \n" << s_src_tmp << std::endl; // std::cout << " s_src_diff \n" << s_src_diff << std::endl; + // exit(0); #endif /////////////////////////////////////////////////////////////// From 28ceacec45e052578cf6b4fa1f394c87f417d1d2 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 27 Nov 2017 15:13:29 +0000 Subject: [PATCH 087/145] Split/Unsplit working --- lib/lattice/Lattice_transfer.h | 275 ++++++++------------------------- 1 file changed, 65 insertions(+), 210 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index 78b80ba4..c7e2a507 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -694,30 +694,6 @@ void precisionChange(Lattice &out, const Lattice &in){ //////////////////////////////////////////////////////////////////////////////// // Communicate between grids //////////////////////////////////////////////////////////////////////////////// -// -// All to all plan -// -// Subvolume on fine grid is v. Vectors a,b,c,d -// -/////////////////////////////////////////////////////////////////////////////////////////////////////////// -// SIMPLEST CASE: -/////////////////////////////////////////////////////////////////////////////////////////////////////////// -// Mesh of nodes (2) ; subdivide to 1 subdivisions -// -// Lex ord: -// N0 va0 vb0 N1 va1 vb1 -// -// For each dimension do an all to all -// -// full AllToAll(0) -// N0 va0 va1 N1 vb0 vb1 -// -// REARRANGE -// N0 va01 N1 vb01 -// -// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". -// NB: Easiest to programme if keep in lex order. -// /////////////////////////////////////////////////////////////////////////////////////////////////////////// // SIMPLE CASE: /////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -751,75 +727,16 @@ void precisionChange(Lattice &out, const Lattice &in){ // // Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". // NB: Easiest to programme if keep in lex order. -// -///////////////////////////////////////////////////////// /* - -[0,0,0,0,0] S {V<4>{V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)},V<3>{(0,0),(0,0),(0,0)}}} -[0,0,0,0,1] S {V<4>{V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)},V<3>{(1,0),(1,0),(1,0)}}} -[0,0,0,0,2] S {V<4>{V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)},V<3>{(4,0),(4,0),(4,0)}}} -[0,0,0,0,3] S {V<4>{V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)},V<3>{(5,0),(5,0),(5,0)}}} -[0,0,0,0,4] S {V<4>{V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)},V<3>{(2,0),(2,0),(2,0)}}} -[0,0,0,0,5] S {V<4>{V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)},V<3>{(3,0),(3,0),(3,0)}}} -[0,0,0,0,6] S {V<4>{V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)},V<3>{(6,0),(6,0),(6,0)}}} -[0,0,0,0,7] S {V<4>{V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)},V<3>{(7,0),(7,0),(7,0)}}} -[0,0,0,0,8] S {V<4>{V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)},V<3>{(8,0),(8,0),(8,0)}}} -[0,0,0,0,9] S {V<4>{V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)},V<3>{(9,0),(9,0),(9,0)}}} -[0,0,0,0,10] S {V<4>{V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)},V<3>{(12,0),(12,0),(12,0)}}} -[0,0,0,0,11] S {V<4>{V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)},V<3>{(13,0),(13,0),(13,0)}}} -[0,0,0,0,12] S {V<4>{V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)},V<3>{(10,0),(10,0),(10,0)}}} -[0,0,0,0,13] S {V<4>{V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)},V<3>{(11,0),(11,0),(11,0)}}} -[0,0,0,0,14] S {V<4>{V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)},V<3>{(14,0),(14,0),(14,0)}}} -[0,0,0,0,15] S {V<4>{V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)},V<3>{(15,0),(15,0),(15,0)}}} - - -Process decomp -[A(0 1) A(2 3) B(0 1) B(2 3)] [ A(4 5) A(6 7) B(4 5) B(6 7)] [ A(8 9) A(10 11) B(8 9) B(10 11)] [A(12 13) A(14 15) B(12 13) B(14 15)] - -A2A(Full) - -- divides M*fL into fP segments of size M*fL/fP = fL/sP - -- total is fP * fL/sP = M * fL - A(0 1) A(4 5) A(8 9) A(12 13) - A(2 3) A(6 7) A(10 11) A(14 15) - B(0 1) B(4 5) B(8 9) B(12 13) - B(2 3) B(6 7) B(10 11) B(14 15) - - -A2A(Split) - A(0 1) A(4 5) A(2 3) A(6 7) - A(8 9) A(12 13) A(10 11) A(14 15) - B(0 1) B(2 3) B(4 5) B(6 7) - B(8 9) B(10 11) B(12 13) B(14 15) - --------------------- --- General case --------------------- -G global lattice -fP - procs -sP - Procs in split grid -M - subdivisions/vectors - M*sP = fP ** constraint 1 -fL = G/fP per node (full) -sL = G/sP per node split - -[ G * M ] total = G*fP/sP. -[ Subdivide fL*M by fP => fL *M / fP = fL/fP *fP/sP = fL/sP ] --------------------- --- 1st A2A chunk is fL*M/fP = G/fP *fP/sP /fP = fL/sP --- Let cL = fL/sP chunk. ( Divide into fP/sP = M chunks ) - --- node 0 1st cL of node 0,1,... fP-1 ; vector 0 --- node 1 2nd cL of node 0,1,... fP-1 --- node 2 3nd cL of node 0,1,... fP-1 --- node 3 4th cL of node 0,1,... fP-1 -... when node > sP get vector 1 etc... - --- 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M) --- node 0 1st cL of node 0M..(1M-1); 2nd cL of node 0M..(1M-1)).. --- node 1 1st cL of node 1M..(2M-1); 2nd cL of node 1M..(2M-1).. --- node 2 1st cL of node 2M..(3M-1); 2nd cL of node 2M..(3M-1).. --- node 3 1st cL of node 3M..(3M-1); 2nd cL of node 2M..(3M-1).. --- --- Insert correctly + * Let chunk = (fvol*nvec)/sP be size of a chunk. ( Divide lexico vol * nvec into fP/sP = M chunks ) + * + * 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M) + * + * node 0 1st chunk of node 0M..(1M-1); 2nd chunk of node 0M..(1M-1).. data chunk x M x sP = fL / sP * M * sP = fL * M growth + * node 1 1st chunk of node 1M..(2M-1); 2nd chunk of node 1M..(2M-1).. + * node 2 1st chunk of node 2M..(3M-1); 2nd chunk of node 2M..(3M-1).. + * node 3 1st chunk of node 3M..(3M-1); 2nd chunk of node 2M..(3M-1).. + * etc... */ template void Grid_split(std::vector > & full,Lattice & split) @@ -879,7 +796,6 @@ void Grid_split(std::vector > & full,Lattice & split) int nvec = nvector; // Counts down to 1 as we collapse dims std::vector ldims = full_grid->_ldimensions; - std::vector lcoor(ndim); for(int d=ndim-1;d>=0;d--){ @@ -891,73 +807,40 @@ void Grid_split(std::vector > & full,Lattice & split) split_grid->AllToAll(d,alldata,tmpdata); } - /* --- Let chunk = (fL*nvec)/sP chunk. ( Divide into fP/sP = M chunks ) --- --- 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M) --- --- node 0 1st chunk of node 0M..(1M-1); 2nd chunk of node 0M..(1M-1).. data chunk x M x sP = fL / sP * M * sP = fL * M growth --- node 1 1st chunk of node 1M..(2M-1); 2nd chunk of node 1M..(2M-1).. --- node 2 1st chunk of node 2M..(3M-1); 2nd chunk of node 2M..(3M-1).. --- node 3 1st chunk of node 3M..(3M-1); 2nd chunk of node 2M..(3M-1).. --- --- Loop over c = 0..chunk-1 --- Loop over n = 0..M --- Loop over j = 0..sP --- total chunk*M*sP = fL/sP*fP/sP*sP = G/sP = sL --- csite = (c+m*chunk)% --- split into m*chunk+o = lsite*nvec/fP --- Must turn to vec, rsite, - */ - auto rdims = ldims; - int M = ratio[d]; - nvec /= M; // Reduce nvec by subdivision factor - rdims[d] *= M; // increase local dims by same factor + auto M = ratio[d]; auto rsites= lsites*M;// increases rsites by M + nvec /= M; // Reduce nvec by subdivision factor + rdims[d] *= M; // increase local dim by same factor int sP = split_grid->_processors[d]; int fP = full_grid->_processors[d]; int fvol = lsites; - int svol = rsites; - int chunk = (nvec*fvol)/sP; - int cL = (nvec*ldims[d])/sP; - - for(int c=0;c coor(ndim); + Lexicographic::CoorFromIndex(coor, lex_fvol, ldims); + coor[d] += m*ldims[d]; + Lexicographic::IndexFromCoor(coor, lex_r, rdims); + lex_r += lex_vec * rsites; - alldata[rsite] = tmpdata[c+chunk*m+chunk*M*s]; - - if ( 0 - &&(lcoor[0]==0) - &&(lcoor[1]==0) - &&(lcoor[2]==0) - &&(lcoor[3]==0) ) { - - std::cout << GridLogMessage << " SPLIT rcoor[d] = "< > & full,Lattice & split) ///////////////////////////////////////////////////////////////// // Start from split grid and work towards full grid ///////////////////////////////////////////////////////////////// - std::vector lcoor(ndim); - std::vector rcoor(ndim); int nvec = 1; uint64_t rsites = split_grid->lSites(); @@ -1046,77 +927,52 @@ void Grid_unsplit(std::vector > & full,Lattice & split) if ( ratio[d] != 1 ) { - { - int sP = split_grid->_processors[d]; - int fP = full_grid->_processors[d]; + auto M = ratio[d]; - int M = ratio[d]; - auto ldims = rdims; ldims[d] /= M; // Decrease local dims by same factor - auto lsites= rsites/M; // Decreases rsites by M - - int fvol = lsites; - int svol = rsites; - int chunk = (nvec*fvol)/sP; - int cL = (nvec*ldims[d])/sP; + int sP = split_grid->_processors[d]; + int fP = full_grid->_processors[d]; + + auto ldims = rdims; ldims[d] /= M; // Decrease local dims by same factor + auto lsites= rsites/M; // Decreases rsites by M + + int fvol = lsites; + int chunk = (nvec*fvol)/sP; assert(chunk*sP == nvec*fvol); + { + // Loop over reordered data post A2A for(int c=0;c= tmpdata.size() ) { - - std::cout << "c "< coor(ndim); + Lexicographic::CoorFromIndex(coor, lex_fvol, ldims); + coor[d] += m*ldims[d]; + Lexicographic::IndexFromCoor(coor, lex_r, rdims); + lex_r += lex_vec * rsites; + // LexicoFind coordinate & vector number within split lattice + tmpdata[lex_c] = alldata[lex_r]; } } } - - if ( split_grid->_processors[d] > 1 ) { - split_grid->AllToAll(d,tmpdata,alldata); - tmpdata=alldata; - } - full_grid ->AllToAll(d,tmpdata,alldata); - - rdims[d]/= M; - rsites /= M; - nvec *= M; // Increase nvec by subdivision factor } + + if ( split_grid->_processors[d] > 1 ) { + split_grid->AllToAll(d,tmpdata,alldata); + tmpdata=alldata; + } + full_grid ->AllToAll(d,tmpdata,alldata); + rdims[d]/= M; + rsites /= M; + nvec *= M; // Increase nvec by subdivision factor } } @@ -1129,7 +985,6 @@ void Grid_unsplit(std::vector > & full,Lattice & split) } vectorizeFromLexOrdArray(scalardata,full[v]); } - } } From 514993ed17671607a33d4b23873fd9f136b776e1 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 1 Dec 2017 19:38:23 +0000 Subject: [PATCH 088/145] Hadrons: progress on the interface, genetic algorithm freezing --- extras/Hadrons/Application.cc | 15 ++++-- extras/Hadrons/Environment.cc | 27 +++++++--- extras/Hadrons/Environment.hpp | 40 +++++++-------- extras/Hadrons/Module.hpp | 49 +++++++++++++++++-- extras/Hadrons/Modules.hpp | 8 +-- extras/Hadrons/Modules/MAction/DWF.hpp | 7 +-- extras/Hadrons/Modules/MAction/Wilson.hpp | 7 +-- extras/Hadrons/Modules/MContraction/Meson.hpp | 18 +++---- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 29 ++++++----- extras/Hadrons/Modules/MGauge/Unit.cc | 4 +- extras/Hadrons/Modules/MSink/Point.hpp | 36 +++++++++----- extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 4 +- extras/Hadrons/Modules/MSource/Point.hpp | 4 +- extras/Hadrons/modules.inc | 9 ++-- 14 files changed, 164 insertions(+), 93 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index a94b617c..0a7d0290 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -98,6 +98,8 @@ void Application::run(void) { parseParameterFile(parameterFileName_); } + env().checkGraph(); + env().printContent(); if (!scheduled_) { schedule(); @@ -124,8 +126,14 @@ void Application::parseParameterFile(const std::string parameterFileName) LOG(Message) << "Building application from '" << parameterFileName << "'..." << std::endl; read(reader, "parameters", par); setPar(par); - push(reader, "modules"); - push(reader, "module"); + if (!push(reader, "modules")) + { + HADRON_ERROR("Cannot open node 'modules' in parameter file '" + parameterFileName + "'"); + } + if (!push(reader, "module")) + { + HADRON_ERROR("Cannot open node 'modules/module' in parameter file '" + parameterFileName + "'"); + } do { read(reader, "id", id); @@ -186,6 +194,8 @@ void Application::schedule(void) // build module dependency graph LOG(Message) << "Building module graph..." << std::endl; auto graph = env().makeModuleGraph(); + LOG(Debug) << "Module graph:" << std::endl; + LOG(Debug) << graph << std::endl; auto con = graph.getConnectedComponents(); // constrained topological sort using a genetic algorithm @@ -329,4 +339,3 @@ void Application::memoryProfile(void) HadronsLogMessage.Active(msg); } - diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index eb0a6f70..a6855862 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -333,6 +333,17 @@ Graph Environment::makeModuleGraph(void) const return moduleGraph; } +void Environment::checkGraph(void) const +{ + for (auto &o: object_) + { + if (o.module < 0) + { + HADRON_ERROR("object '" + o.name + "' does not have a creator"); + } + } +} + #define BIG_SEP "===============" #define SEP "---------------" #define MEM_MSG(size)\ @@ -346,6 +357,7 @@ Environment::executeProgram(const std::vector &p) bool continueCollect, nothingFreed; // build garbage collection schedule + LOG(Debug) << "Building garbage collection schedule..." << std::endl; freeProg.resize(p.size()); for (unsigned int i = 0; i < object_.size(); ++i) { @@ -359,11 +371,12 @@ Environment::executeProgram(const std::vector &p) auto it = std::find_if(p.rbegin(), p.rend(), pred); if (it != p.rend()) { - freeProg[std::distance(p.rend(), it) - 1].insert(i); + freeProg[std::distance(it, p.rend()) - 1].insert(i); } } // program execution + LOG(Debug) << "Executing program..." << std::endl; for (unsigned int i = 0; i < p.size(); ++i) { // execute module @@ -712,16 +725,16 @@ void Environment::freeAll(void) void Environment::printContent(void) { - LOG(Message) << "Modules: " << std::endl; + LOG(Debug) << "Modules: " << std::endl; for (unsigned int i = 0; i < module_.size(); ++i) { - LOG(Message) << std::setw(4) << i << ": " - << getModuleName(i) << std::endl; + LOG(Debug) << std::setw(4) << i << ": " + << getModuleName(i) << std::endl; } - LOG(Message) << "Objects: " << std::endl; + LOG(Debug) << "Objects: " << std::endl; for (unsigned int i = 0; i < object_.size(); ++i) { - LOG(Message) << std::setw(4) << i << ": " - << getObjectName(i) << std::endl; + LOG(Debug) << std::setw(4) << i << ": " + << getObjectName(i) << std::endl; } } diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index b426fb27..58e035ac 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -76,6 +76,7 @@ public: typedef std::unique_ptr GridRbPt; typedef std::unique_ptr RngPt; typedef std::unique_ptr LatticePt; + enum class Storage {object, cache, temporary}; private: struct ModuleInfo { @@ -88,6 +89,7 @@ private: struct ObjInfo { Size size{0}; + Storage storage{Storage::object}; unsigned int Ls{0}; const std::type_info *type{nullptr}; std::string name; @@ -140,18 +142,17 @@ public: bool hasModule(const unsigned int address) const; bool hasModule(const std::string name) const; Graph makeModuleGraph(void) const; + void checkGraph(void) const; Size executeProgram(const std::vector &p); Size executeProgram(const std::vector &p); // general memory management void addObject(const std::string name, const int moduleAddress = -1); - template + template void createObject(const std::string name, + const Storage storage, const unsigned int Ls, - Ts ... args); - template - void createLattice(const std::string name, - const unsigned int Ls = 1); + P &&pt); template T * getObject(const unsigned int address) const; template @@ -203,6 +204,7 @@ private: // module and related maps std::vector module_; std::map moduleAddress_; + std::string currentModule_{""}; // lattice store std::map lattice_; // object store @@ -281,9 +283,11 @@ M * Environment::getModule(const std::string name) const return getModule(getModuleAddress(name)); } -template -void Environment::createObject(const std::string name, const unsigned int Ls, - Ts ... args) +template +void Environment::createObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + P &&pt) { if (!hasObject(name)) { @@ -296,11 +300,13 @@ void Environment::createObject(const std::string name, const unsigned int Ls, { MemoryStats memStats; - MemoryProfiler::stats = &memStats; - object_[address].Ls = Ls; - object_[address].data.reset(new Holder(new T(args...))); - object_[address].size = memStats.totalAllocated; - object_[address].type = &typeid(T); + MemoryProfiler::stats = &memStats; + object_[address].storage = storage; + object_[address].Ls = Ls; + object_[address].data.reset(new Holder(pt)); + object_[address].size = memStats.totalAllocated; + object_[address].type = &typeid(T); + MemoryProfiler::stats = nullptr; } else { @@ -308,14 +314,6 @@ void Environment::createObject(const std::string name, const unsigned int Ls, } } -template -void Environment::createLattice(const std::string name, const unsigned int Ls) -{ - GridCartesian *g = getGrid(Ls); - - createObject(name, Ls, g); -} - template T * Environment::getObject(const unsigned int address) const { diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 5500bf36..a0b062df 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -87,13 +87,54 @@ public:\ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define ARG(...) __VA_ARGS__ +#define MACRO_REDIRECT(arg1, arg2, arg3, macro, ...) macro -#define mCreateObj(type, name, Ls, ...)\ -env().template createObject(name, Ls, __VA_ARGS__) - -#define mGetObj(type, name)\ +#define envGet(type, name)\ *env().template getObject(name) +#define envGetTmp(type, name)\ +*env().template getObject(getName() + "_tmp_" + name) + +#define envIsType(type, name)\ +env().template getObject(name) + +#define envCreate(type, name, Ls, pt)\ +env().template createObject(name, Environment::Storage::object, Ls, pt) + +#define envCreateLat4(type, name)\ +envCreate(type, name, 1, new type(env().getGrid())) + +#define envCreateLat5(type, name, Ls)\ +envCreate(type, name, Ls, new type(env().getGrid(Ls))) + +#define envCreateLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envCreateLat5, envCreateLat4)(__VA_ARGS__) + +#define envCache(type, name, Ls, pt)\ +env().template createObject(name, Environment::Storage::cache, Ls, pt) + +#define envCacheLat4(type, name)\ +envCache(type, name, 1, new type(env().getGrid())) + +#define envCacheLat5(type, name, Ls)\ +envCache(type, name, Ls, new type(env().getGrid(Ls))) + +#define envCacheLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envCacheLat5, envCacheLat4)(__VA_ARGS__) + +#define envTmp(type, name, Ls, pt)\ +env().template createObject(getName() + "_tmp_" + name, \ + Environment::Storage::temporary, Ls, pt) + +#define envTmpLat4(type, name)\ +envTmp(type, name, 1, new type(env().getGrid())) + +#define envTmpLat5(type, name, Ls)\ +envTmp(type, name, Ls, new type(env().getGrid(Ls))) + +#define envTmpLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envTmpLat5, envTmpLat4)(__VA_ARGS__) + /****************************************************************************** * Module class * ******************************************************************************/ diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 08678671..bb574a14 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -33,13 +33,13 @@ See the full license in the file "LICENSE" in the top level distribution directo // #include // #include // #include -// #include +#include // #include // #include // #include // #include // #include -// #include +#include // #include // #include // #include @@ -48,10 +48,10 @@ See the full license in the file "LICENSE" in the top level distribution directo // #include // #include // #include -// #include +#include // #include #include -// #include +#include // #include // #include // #include diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index a2ed063b..7c82fe8b 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -110,15 +110,16 @@ void TDWF::setup(void) LOG(Message) << "Fermion boundary conditions: " << par().boundary << std::endl; env().createGrid(par().Ls); - auto &U = mGetObj(LatticeGaugeField, par().gauge); + auto &U = envGet(LatticeGaugeField, par().gauge); auto &g4 = *env().getGrid(); auto &grb4 = *env().getRbGrid(); auto &g5 = *env().getGrid(par().Ls); auto &grb5 = *env().getRbGrid(par().Ls); std::vector boundary = strToVec(par().boundary); typename DomainWallFermion::ImplParams implParams(boundary); - mCreateObj(DomainWallFermion, getName(), par().Ls, - U, g5, grb5, g4, grb4, par().mass, par().M5, implParams); + envCreate(FMat, getName(), par().Ls, + new DomainWallFermion(U, g5, grb5, g4, grb4, par().mass, + par().M5, implParams)); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index bc892daf..5c334f8d 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -105,13 +105,14 @@ void TWilson::setup(void) << " using gauge field '" << par().gauge << "'" << std::endl; LOG(Message) << "Fermion boundary conditions: " << par().boundary << std::endl; - auto &U = mGetObj(LatticeGaugeField, par().gauge); + auto &U = envGet(LatticeGaugeField, par().gauge); auto &grid = *env().getGrid(); auto &gridRb = *env().getRbGrid(); std::vector boundary = strToVec(par().boundary); typename WilsonFermion::ImplParams implParams(boundary); - mCreateObj(WilsonFermion, getName(), 1, U, grid, gridRb, par().mass, - implParams); + envCreate(FMat, getName(), 1, new WilsonFermion(U, grid, gridRb, + par().mass, + implParams)); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index b71f7c08..ccc6dc55 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -153,7 +153,6 @@ void TMeson::parseGammaString(std::vector &gammaList) } } - // execution /////////////////////////////////////////////////////////////////// #define mesonConnected(q1, q2, gSnk, gSrc) \ (g5*(gSnk))*(q1)*(adj(gSrc)*g5)*adj(q2) @@ -180,11 +179,11 @@ void TMeson::execute(void) result[i].gamma_src = gammaList[i].second; result[i].corr.resize(nt); } - if (env().template isObjectOfType(par().q1) and - env().template isObjectOfType(par().q2)) + if (envIsType(SlicedPropagator1, par().q1) and + envIsType(SlicedPropagator2, par().q2)) { - SlicedPropagator1 &q1 = *env().template getObject(par().q1); - SlicedPropagator2 &q2 = *env().template getObject(par().q2); + SlicedPropagator1 &q1 = envGet(SlicedPropagator1, par().q1); + SlicedPropagator2 &q2 = envGet(SlicedPropagator2, par().q2); LOG(Message) << "(propagator already sinked)" << std::endl; for (unsigned int i = 0; i < result.size(); ++i) @@ -200,8 +199,8 @@ void TMeson::execute(void) } else { - PropagatorField1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); + PropagatorField1 &q1 = envGet(PropagatorField1, par().q1); + PropagatorField2 &q2 = envGet(PropagatorField2, par().q2); LatticeComplex c(env().getGrid()); LOG(Message) << "(using sink '" << par().sink << "')" << std::endl; @@ -214,15 +213,14 @@ void TMeson::execute(void) ns = env().getModuleNamespace(env().getObjectModule(par().sink)); if (ns == "MSource") { - PropagatorField1 &sink = - *env().template getObject(par().sink); + PropagatorField1 &sink = envGet(PropagatorField1, par().sink); c = trace(mesonConnected(q1, q2, gSnk, gSrc)*sink); sliceSum(c, buf, Tp); } else if (ns == "MSink") { - SinkFnScalar &sink = *env().template getObject(par().sink); + SinkFnScalar &sink = envGet(SinkFnScalar, par().sink); c = trace(mesonConnected(q1, q2, gSnk, gSrc)); buf = sink(c); diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 8add9a00..59994d0d 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -127,10 +127,13 @@ template void TGaugeProp::setup(void) { Ls_ = env().getObjectLs(par().solver); - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envTmpLat(FermionField, "source", Ls_); + envTmpLat(FermionField, "sol", Ls_); + envTmpLat(FermionField, "tmp"); if (Ls_ > 1) { - env().template registerLattice(getName() + "_5d", Ls_); + envCreateLat(PropagatorField, getName() + "_5d", Ls_); } } @@ -139,21 +142,18 @@ template void TGaugeProp::execute(void) { LOG(Message) << "Computing quark propagator '" << getName() << "'" - << std::endl; + << std::endl; - FermionField source(env().getGrid(Ls_)), sol(env().getGrid(Ls_)), - tmp(env().getGrid()); + FermionField &source = envGetTmp(FermionField, "source"); + FermionField &sol = envGetTmp(FermionField, "sol"); + FermionField &tmp = envGetTmp(FermionField, "tmp"); std::string propName = (Ls_ == 1) ? getName() : (getName() + "_5d"); - PropagatorField &prop = *env().template createLattice(propName); - PropagatorField &fullSrc = *env().template getObject(par().source); - SolverFn &solver = *env().template getObject(par().solver); - if (Ls_ > 1) - { - env().template createLattice(getName()); - } + PropagatorField &prop = envGet(PropagatorField, propName); + PropagatorField &fullSrc = envGet(PropagatorField, par().source); + SolverFn &solver = envGet(SolverFn, par().solver); LOG(Message) << "Inverting using solver '" << par().solver - << "' on source '" << par().source << "'" << std::endl; + << "' on source '" << par().source << "'" << std::endl; for (unsigned int s = 0; s < Ns; ++s) for (unsigned int c = 0; c < Nc; ++c) { @@ -190,8 +190,7 @@ void TGaugeProp::execute(void) // create 4D propagators from 5D one if necessary if (Ls_ > 1) { - PropagatorField &p4d = - *env().template getObject(getName()); + PropagatorField &p4d = envGet(PropagatorField, getName()); make_4D(sol, tmp, Ls_); FermToProp(p4d, tmp, s, c); } diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index b259b7d5..b3a7d634 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -57,13 +57,13 @@ std::vector TUnit::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TUnit::setup(void) { - mCreateObj(LatticeGaugeField, getName(), 1, env().getGrid()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TUnit::execute(void) { LOG(Message) << "Creating unit gauge configuration" << std::endl; - auto &U = mGetObj(LatticeGaugeField, getName()); + auto &U = envGet(LatticeGaugeField, getName()); SU3::ColdConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 0761c4c4..b124e2e5 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -65,6 +65,9 @@ public: virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_; }; MODULE_REGISTER_NS(Point, TPoint, MSink); @@ -77,6 +80,7 @@ MODULE_REGISTER_NS(ScalarPoint, TPoint, MSink); template TPoint::TPoint(const std::string name) : Module(name) +, momphName_ (name + "_momph") {} // dependencies/products /////////////////////////////////////////////////////// @@ -100,30 +104,36 @@ std::vector TPoint::getOutput(void) template void TPoint::setup(void) { - unsigned int size; - - size = env().template lattice4dSize(); - env().registerObject(getName(), size); + envTmpLat(LatticeComplex, "coor"); + envCacheLat(LatticeComplex, momphName_); + envCreate(SinkFn, getName(), 1, nullptr); } // execution /////////////////////////////////////////////////////////////////// template void TPoint::execute(void) { - std::vector p = strToVec(par().mom); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); + std::vector p = strToVec(par().mom); + LatticeComplex &ph = envGet(LatticeComplex, momphName_); Complex i(0.0,1.0); LOG(Message) << "Setting up point sink function for momentum [" << par().mom << "]" << std::endl; - ph = zero; - for(unsigned int mu = 0; mu < env().getNd(); mu++) + + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + LatticeComplex &coor = envGetTmp(LatticeComplex, "coor"); + + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - auto sink = [ph](const PropagatorField &field) + auto sink = [&ph](const PropagatorField &field) { SlicedPropagator res; PropagatorField tmp = ph*field; @@ -132,7 +142,7 @@ void TPoint::execute(void) return res; }; - env().setObject(getName(), new SinkFn(sink)); + envGet(SinkFn, getName()) = sink; } END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index fe6992fc..8063d939 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -105,7 +105,7 @@ void TRBPrecCG::setup(void) << par().residual << std::endl; auto Ls = env().getObjectLs(par().action); - auto &mat = mGetObj(FMat, par().action); + auto &mat = envGet(FMat, par().action); auto solver = [&mat, this](FermionField &sol, const FermionField &source) { ConjugateGradient cg(par().residual, 10000); @@ -113,7 +113,7 @@ void TRBPrecCG::setup(void) schurSolver(mat, source, sol); }; - mCreateObj(SolverFn, getName(), Ls, solver); + envCreate(SolverFn, getName(), Ls, new SolverFn(solver)); env().addOwnership(getName(), par().action); } diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 7815e5c1..5e16149e 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -111,7 +111,7 @@ std::vector TPoint::getOutput(void) template void TPoint::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// @@ -123,7 +123,7 @@ void TPoint::execute(void) LOG(Message) << "Creating point source at position [" << par().position << "]" << std::endl; - PropagatorField &src = *env().template createLattice(getName()); + PropagatorField &src = envGet(PropagatorField, getName()); id = 1.; src = zero; pokeSite(id, src, position); diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 63745baf..5ce2435f 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -12,13 +12,16 @@ modules_cc =\ modules_hpp =\ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ + Modules/MSink/Point.hpp \ + Modules/MSource/Point.hpp \ Modules/MGauge/Unit.hpp \ - Modules/MSolver/RBPrecCG.hpp + Modules/MSolver/RBPrecCG.hpp \ + Modules/MFermion/GaugeProp.hpp \ + Modules/MContraction/Meson.hpp # Modules/MContraction/Baryon.hpp \ # Modules/MContraction/DiscLoop.hpp \ # Modules/MContraction/Gamma3pt.hpp \ - # Modules/MContraction/Meson.hpp \ # Modules/MContraction/WardIdentity.hpp \ # Modules/MContraction/WeakHamiltonian.hpp \ # Modules/MContraction/WeakHamiltonianEye.hpp \ @@ -32,10 +35,8 @@ modules_hpp =\ # Modules/MScalar/ChargedProp.hpp \ # Modules/MScalar/FreeProp.hpp \ # Modules/MScalar/Scalar.hpp \ - # Modules/MSink/Point.hpp \ # Modules/MSink/Smear.hpp \ # Modules/MSolver/RBPrecCG.hpp \ - # Modules/MSource/Point.hpp \ # Modules/MSource/SeqConserved.hpp \ # Modules/MSource/SeqGamma.hpp \ # Modules/MSource/Wall.hpp \ From 2427a21428b6704a08119a24c60f0e77830c55c7 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 1 Dec 2017 19:44:07 +0000 Subject: [PATCH 089/145] minor serial IO fixes, XML now issues warning when trying to read absent nodes, these becomes --- lib/serialisation/JSON_IO.cc | 2 +- lib/serialisation/MacroMagic.h | 6 ++++- lib/serialisation/XmlIO.cc | 41 ++++++++++++++++++++++------------ lib/serialisation/XmlIO.h | 10 +++++++-- 4 files changed, 41 insertions(+), 18 deletions(-) diff --git a/lib/serialisation/JSON_IO.cc b/lib/serialisation/JSON_IO.cc index 99a9cdd6..6a01aa84 100644 --- a/lib/serialisation/JSON_IO.cc +++ b/lib/serialisation/JSON_IO.cc @@ -25,7 +25,7 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include using namespace Grid; using namespace std; diff --git a/lib/serialisation/MacroMagic.h b/lib/serialisation/MacroMagic.h index 774c947f..5df2c780 100644 --- a/lib/serialisation/MacroMagic.h +++ b/lib/serialisation/MacroMagic.h @@ -125,7 +125,11 @@ static inline void write(Writer &WR,const std::string &s, const cname &obj){ }\ template \ static inline void read(Reader &RD,const std::string &s, cname &obj){ \ - push(RD,s);\ + if (!push(RD,s))\ + {\ + std::cout << Grid::GridLogWarning << "IO: Cannot open node '" << s << "'" << std::endl;\ + return;\ + };\ GRID_MACRO_EVAL(GRID_MACRO_MAP(GRID_MACRO_READ_MEMBER,__VA_ARGS__)) \ pop(RD);\ }\ diff --git a/lib/serialisation/XmlIO.cc b/lib/serialisation/XmlIO.cc index 260611a5..8ac7422c 100644 --- a/lib/serialisation/XmlIO.cc +++ b/lib/serialisation/XmlIO.cc @@ -100,13 +100,16 @@ XmlReader::XmlReader(const string &fileName,string toplev) : fileName_(fileName) bool XmlReader::push(const string &s) { + if (node_.child(s.c_str())) + { + node_ = node_.child(s.c_str()); - if (node_.child(s.c_str()) == NULL ) + return true; + } + else + { return false; - - node_ = node_.child(s.c_str()); - return true; - + } } void XmlReader::pop(void) @@ -117,20 +120,30 @@ void XmlReader::pop(void) bool XmlReader::nextElement(const std::string &s) { if (node_.next_sibling(s.c_str())) - { - node_ = node_.next_sibling(s.c_str()); - - return true; - } + { + node_ = node_.next_sibling(s.c_str()); + + return true; + } else - { - return false; - } + { + return false; + } } template <> void XmlReader::readDefault(const string &s, string &output) { - output = node_.child(s.c_str()).first_child().value(); + if (node_.child(s.c_str())) + { + output = node_.child(s.c_str()).first_child().value(); + } + else + { + std::cout << GridLogWarning << "XML: cannot open node '" << s << "'"; + std::cout << std::endl; + + output = ""; + } } diff --git a/lib/serialisation/XmlIO.h b/lib/serialisation/XmlIO.h index fcdbf1e4..e37eb8d9 100644 --- a/lib/serialisation/XmlIO.h +++ b/lib/serialisation/XmlIO.h @@ -39,6 +39,7 @@ Author: paboyle #include #include +#include namespace Grid { @@ -119,7 +120,6 @@ namespace Grid std::string buf; readDefault(s, buf); - // std::cout << s << " " << buf << std::endl; fromString(output, buf); } @@ -132,7 +132,13 @@ namespace Grid std::string buf; unsigned int i = 0; - push(s); + if (!push(s)) + { + std::cout << GridLogWarning << "XML: cannot open node '" << s << "'"; + std::cout << std::endl; + + return; + } while (node_.child("elem")) { output.resize(i + 1); From 2a9ebddad59116151e6db2a0bc8cdbf53dd5741c Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 3 Dec 2017 19:45:15 +0100 Subject: [PATCH 090/145] Hadrons: scheduler offline, minimal code working again --- extras/Hadrons/Application.cc | 93 ++++++++++--------- extras/Hadrons/Module.hpp | 4 +- extras/Hadrons/Modules/MContraction/Meson.hpp | 4 +- 3 files changed, 51 insertions(+), 50 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 0a7d0290..0860437b 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -182,7 +182,7 @@ GeneticScheduler::ObjFunc memPeak = \ memPeak = env().executeProgram(program);\ env().dryRun(false);\ env().freeAll();\ - HadronsLogMessage.Active(true);\ + HadronsLogMessage.Active(msg);\ \ return memPeak;\ } @@ -199,58 +199,59 @@ void Application::schedule(void) auto con = graph.getConnectedComponents(); // constrained topological sort using a genetic algorithm - LOG(Message) << "Scheduling computation..." << std::endl; - LOG(Message) << " #module= " << graph.size() << std::endl; - LOG(Message) << " population size= " << par_.genetic.popSize << std::endl; - LOG(Message) << " max. generation= " << par_.genetic.maxGen << std::endl; - LOG(Message) << " max. cst. generation= " << par_.genetic.maxCstGen << std::endl; - LOG(Message) << " mutation rate= " << par_.genetic.mutationRate << std::endl; + // LOG(Message) << "Scheduling computation..." << std::endl; + // LOG(Message) << " #module= " << graph.size() << std::endl; + // LOG(Message) << " population size= " << par_.genetic.popSize << std::endl; + // LOG(Message) << " max. generation= " << par_.genetic.maxGen << std::endl; + // LOG(Message) << " max. cst. generation= " << par_.genetic.maxCstGen << std::endl; + // LOG(Message) << " mutation rate= " << par_.genetic.mutationRate << std::endl; - unsigned int k = 0, gen, prevPeak, nCstPeak = 0; - std::random_device rd; - GeneticScheduler::Parameters par; + // unsigned int k = 0, gen, prevPeak, nCstPeak = 0; + // std::random_device rd; + // GeneticScheduler::Parameters par; - par.popSize = par_.genetic.popSize; - par.mutationRate = par_.genetic.mutationRate; - par.seed = rd(); - memPeak_ = 0; - CartesianCommunicator::BroadcastWorld(0, &(par.seed), sizeof(par.seed)); + // par.popSize = par_.genetic.popSize; + // par.mutationRate = par_.genetic.mutationRate; + // par.seed = rd(); + // memPeak_ = 0; + // CartesianCommunicator::BroadcastWorld(0, &(par.seed), sizeof(par.seed)); for (unsigned int i = 0; i < con.size(); ++i) { - GeneticScheduler scheduler(con[i], memPeak, par); + // GeneticScheduler scheduler(con[i], memPeak, par); - gen = 0; - do - { - LOG(Debug) << "Generation " << gen << ":" << std::endl; - scheduler.nextGeneration(); - if (gen != 0) - { - if (prevPeak == scheduler.getMinValue()) - { - nCstPeak++; - } - else - { - nCstPeak = 0; - } - } + // gen = 0; + // do + // { + // LOG(Debug) << "Generation " << gen << ":" << std::endl; + // scheduler.nextGeneration(); + // if (gen != 0) + // { + // if (prevPeak == scheduler.getMinValue()) + // { + // nCstPeak++; + // } + // else + // { + // nCstPeak = 0; + // } + // } - prevPeak = scheduler.getMinValue(); - if (gen % 10 == 0) - { - LOG(Iterative) << "Generation " << gen << ": " - << MEM_MSG(scheduler.getMinValue()) << std::endl; - } + // prevPeak = scheduler.getMinValue(); + // if (gen % 10 == 0) + // { + // LOG(Iterative) << "Generation " << gen << ": " + // << MEM_MSG(scheduler.getMinValue()) << std::endl; + // } - gen++; - } while ((gen < par_.genetic.maxGen) - and (nCstPeak < par_.genetic.maxCstGen)); - auto &t = scheduler.getMinSchedule(); - if (scheduler.getMinValue() > memPeak_) - { - memPeak_ = scheduler.getMinValue(); - } + // gen++; + // } while ((gen < par_.genetic.maxGen) + // and (nCstPeak < par_.genetic.maxCstGen)); + // auto &t = scheduler.getMinSchedule(); + // if (scheduler.getMinValue() > memPeak_) + // { + // memPeak_ = scheduler.getMinValue(); + // } + auto t = con[i].topoSort(); for (unsigned int j = 0; j < t.size(); ++j) { program_.push_back(t[j]); diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index a0b062df..a9525029 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -95,8 +95,8 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define envGetTmp(type, name)\ *env().template getObject(getName() + "_tmp_" + name) -#define envIsType(type, name)\ -env().template getObject(name) +#define envHasType(type, name)\ +env().template isObjectOfType(name) #define envCreate(type, name, Ls, pt)\ env().template createObject(name, Environment::Storage::object, Ls, pt) diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index ccc6dc55..34127da3 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -179,8 +179,8 @@ void TMeson::execute(void) result[i].gamma_src = gammaList[i].second; result[i].corr.resize(nt); } - if (envIsType(SlicedPropagator1, par().q1) and - envIsType(SlicedPropagator2, par().q2)) + if (envHasType(SlicedPropagator1, par().q1) and + envHasType(SlicedPropagator2, par().q2)) { SlicedPropagator1 &q1 = envGet(SlicedPropagator1, par().q1); SlicedPropagator2 &q2 = envGet(SlicedPropagator2, par().q2); From 624246409cc769715c74665d876a4cb4038a9693 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 3 Dec 2017 19:46:18 +0100 Subject: [PATCH 091/145] Hadrons: module setup/execute protected to forbid user to bypass execution control --- extras/Hadrons/Module.hpp | 5 +++-- extras/Hadrons/Modules/MAction/DWF.hpp | 1 + extras/Hadrons/Modules/MAction/Wilson.hpp | 1 + extras/Hadrons/Modules/MContraction/Baryon.hpp | 1 + extras/Hadrons/Modules/MContraction/DiscLoop.hpp | 1 + extras/Hadrons/Modules/MContraction/Gamma3pt.hpp | 1 + extras/Hadrons/Modules/MContraction/Meson.hpp | 1 + extras/Hadrons/Modules/MContraction/WardIdentity.hpp | 1 + extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp | 4 +++- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 1 + extras/Hadrons/Modules/MGauge/Load.hpp | 1 + extras/Hadrons/Modules/MGauge/Random.hpp | 1 + extras/Hadrons/Modules/MGauge/StochEm.hpp | 1 + extras/Hadrons/Modules/MGauge/Unit.hpp | 1 + extras/Hadrons/Modules/MLoop/NoiseLoop.hpp | 1 + extras/Hadrons/Modules/MScalar/ChargedProp.hpp | 1 + extras/Hadrons/Modules/MScalar/FreeProp.hpp | 1 + extras/Hadrons/Modules/MSink/Point.hpp | 1 + extras/Hadrons/Modules/MSink/Smear.hpp | 1 + extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 1 + extras/Hadrons/Modules/MSource/Point.hpp | 1 + extras/Hadrons/Modules/MSource/SeqConserved.hpp | 1 + extras/Hadrons/Modules/MSource/SeqGamma.hpp | 1 + extras/Hadrons/Modules/MSource/Wall.hpp | 1 + extras/Hadrons/Modules/MSource/Z2.hpp | 1 + extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp | 1 + extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp | 1 + 27 files changed, 31 insertions(+), 3 deletions(-) diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index a9525029..017a9172 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -157,10 +157,11 @@ public: // parse parameters virtual void parseParameters(XmlReader &reader, const std::string name) = 0; virtual void saveParameters(XmlWriter &writer, const std::string name) = 0; - // setup - virtual void setup(void) {}; // execution void operator()(void); +protected: + // setup + virtual void setup(void) {}; virtual void execute(void) = 0; private: std::string name_; diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 7c82fe8b..36c70073 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -65,6 +65,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index 5c334f8d..7fe1f44e 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -63,6 +63,7 @@ public: // dependencies/products virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index 78bde5a2..da927391 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -72,6 +72,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // execution virtual void execute(void); }; diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index 4f782cd3..f8da3943 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -68,6 +68,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index 162ab786..a8653186 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -99,6 +99,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 34127da3..31640b7c 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -97,6 +97,7 @@ public: virtual std::vector getInput(void); virtual std::vector getOutput(void); virtual void parseGammaString(std::vector &gammaList); +protected: // execution virtual void execute(void); }; diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index 8a56e0eb..a298c1a1 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -74,6 +74,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp index 302b207e..7df40370 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp @@ -100,11 +100,13 @@ public:\ /* dependency relation */ \ virtual std::vector getInput(void);\ virtual std::vector getOutput(void);\ +public:\ + std::vector VA_label = {"V", "A"};\ +protected:\ /* setup */ \ virtual void setup(void);\ /* execution */ \ virtual void execute(void);\ - std::vector VA_label = {"V", "A"};\ };\ MODULE_REGISTER_NS(modname, T##modname, MContraction); diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 59994d0d..8529825b 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -85,6 +85,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/Load.hpp b/extras/Hadrons/Modules/MGauge/Load.hpp index 5ff6da0f..a338af79 100644 --- a/extras/Hadrons/Modules/MGauge/Load.hpp +++ b/extras/Hadrons/Modules/MGauge/Load.hpp @@ -58,6 +58,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/Random.hpp b/extras/Hadrons/Modules/MGauge/Random.hpp index a97d25cf..a07130e4 100644 --- a/extras/Hadrons/Modules/MGauge/Random.hpp +++ b/extras/Hadrons/Modules/MGauge/Random.hpp @@ -51,6 +51,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/StochEm.hpp b/extras/Hadrons/Modules/MGauge/StochEm.hpp index 12ce9fdc..bacb5172 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.hpp +++ b/extras/Hadrons/Modules/MGauge/StochEm.hpp @@ -60,6 +60,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/Unit.hpp b/extras/Hadrons/Modules/MGauge/Unit.hpp index 7cd15ef7..c1650cc7 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.hpp +++ b/extras/Hadrons/Modules/MGauge/Unit.hpp @@ -51,6 +51,7 @@ public: // dependencies/products virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp index 5d2c4a13..1f40dd48 100644 --- a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp +++ b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp @@ -74,6 +74,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp index fbe75c05..ab6a0184 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp @@ -37,6 +37,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.hpp b/extras/Hadrons/Modules/MScalar/FreeProp.hpp index 97cf288a..38372a0c 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.hpp +++ b/extras/Hadrons/Modules/MScalar/FreeProp.hpp @@ -33,6 +33,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index b124e2e5..853a7c32 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -61,6 +61,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSink/Smear.hpp b/extras/Hadrons/Modules/MSink/Smear.hpp index c3973d2b..b51d2f49 100644 --- a/extras/Hadrons/Modules/MSink/Smear.hpp +++ b/extras/Hadrons/Modules/MSink/Smear.hpp @@ -62,6 +62,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index 8063d939..d8a4b95f 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -62,6 +62,7 @@ public: // dependencies/products virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 5e16149e..b9813688 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -72,6 +72,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSource/SeqConserved.hpp b/extras/Hadrons/Modules/MSource/SeqConserved.hpp index 86a7dfb9..e8f91be1 100644 --- a/extras/Hadrons/Modules/MSource/SeqConserved.hpp +++ b/extras/Hadrons/Modules/MSource/SeqConserved.hpp @@ -83,6 +83,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSource/SeqGamma.hpp b/extras/Hadrons/Modules/MSource/SeqGamma.hpp index e2129a46..8f67f8fa 100644 --- a/extras/Hadrons/Modules/MSource/SeqGamma.hpp +++ b/extras/Hadrons/Modules/MSource/SeqGamma.hpp @@ -81,6 +81,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSource/Wall.hpp b/extras/Hadrons/Modules/MSource/Wall.hpp index 4de37e4d..57dee06d 100644 --- a/extras/Hadrons/Modules/MSource/Wall.hpp +++ b/extras/Hadrons/Modules/MSource/Wall.hpp @@ -73,6 +73,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index a7f7a3e6..e2cc4f34 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -76,6 +76,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index b085eb8c..f8714d88 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -80,6 +80,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp index 2799e5d0..9736ab54 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp @@ -64,6 +64,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution From 59aae5f5ec97133f4f9ba80f3d2f718284d9e7f7 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 3 Dec 2017 19:47:11 +0100 Subject: [PATCH 092/145] Hadrons: garbage collector clean temporaries --- extras/Hadrons/Environment.cc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index a6855862..27849bd7 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -422,6 +422,15 @@ Environment::executeProgram(const std::vector &p) } } } while (continueCollect); + // free temporaries + for (unsigned int i = 0; i < object_.size(); ++i) + { + if ((object_[i].storage == Storage::temporary) + and hasCreatedObject(i)) + { + freeObject(i); + } + } // any remaining objects in step i garbage collection schedule // is scheduled for step i + 1 if (i + 1 < p.size()) @@ -687,7 +696,7 @@ bool Environment::freeObject(const unsigned int address) { if (!hasOwners(address)) { - if (!isDryRun()) + if (!isDryRun() and hasCreatedObject(address)) { LOG(Message) << "Destroying object '" << object_[address].name << "'" << std::endl; From 01f00385a4460ea21e09c2beaa77bd20c7a78550 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 3 Dec 2017 19:47:40 +0100 Subject: [PATCH 093/145] Hadrons: genetic pair selection based on exponential probability --- extras/Hadrons/GeneticScheduler.hpp | 33 ++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/extras/Hadrons/GeneticScheduler.hpp b/extras/Hadrons/GeneticScheduler.hpp index d0c52596..3b0195e7 100644 --- a/extras/Hadrons/GeneticScheduler.hpp +++ b/extras/Hadrons/GeneticScheduler.hpp @@ -212,28 +212,23 @@ typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) std::vector prob; unsigned int ind; Gene *p1, *p2; + const double max = population_.rbegin()->first; + for (auto &c: population_) { - prob.push_back(1./c.first); - } - do - { - double probCpy; - - std::discrete_distribution dis1(prob.begin(), prob.end()); - auto rIt = population_.begin(); - ind = dis1(gen_); - std::advance(rIt, ind); - p1 = &(rIt->second); - probCpy = prob[ind]; - prob[ind] = 0.; - std::discrete_distribution dis2(prob.begin(), prob.end()); - rIt = population_.begin(); - std::advance(rIt, dis2(gen_)); - p2 = &(rIt->second); - prob[ind] = probCpy; - } while (p1 == p2); + prob.push_back(std::exp((c.first-1.)/max)); + } + std::discrete_distribution dis1(prob.begin(), prob.end()); + auto rIt = population_.begin(); + ind = dis1(gen_); + std::advance(rIt, ind); + p1 = &(rIt->second); + prob[ind] = 0.; + std::discrete_distribution dis2(prob.begin(), prob.end()); + rIt = population_.begin(); + std::advance(rIt, dis2(gen_)); + p2 = &(rIt->second); return std::make_pair(p1, p2); } From 3127b52c907d6055d5d8e044a1e3764cea8c9f6f Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 3 Dec 2017 19:48:34 +0100 Subject: [PATCH 094/145] bootstrap script does not destroy Eigen is working offline --- bootstrap.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/bootstrap.sh b/bootstrap.sh index dfb6735d..bdf748df 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -3,9 +3,7 @@ EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2' echo "-- deploying Eigen source..." -wget ${EIGEN_URL} --no-check-certificate -./scripts/update_eigen.sh `basename ${EIGEN_URL}` -rm `basename ${EIGEN_URL}` +wget ${EIGEN_URL} --no-check-certificate && ./scripts/update_eigen.sh `basename ${EIGEN_URL}` && rm `basename ${EIGEN_URL}` echo '-- generating Make.inc files...' ./scripts/filelist From ae3b7713a9b2d7c095e9e1dd8de396f506580a3f Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 5 Dec 2017 11:36:31 +0000 Subject: [PATCH 095/145] Cold start doesnt need RNG --- lib/qcd/utils/SUn.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/qcd/utils/SUn.h b/lib/qcd/utils/SUn.h index 8f0c0a7b..cdc6c961 100644 --- a/lib/qcd/utils/SUn.h +++ b/lib/qcd/utils/SUn.h @@ -746,7 +746,7 @@ template } } template - static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + static void ColdConfiguration(GaugeField &out){ typedef typename GaugeField::vector_type vector_type; typedef iSUnMatrix vMatrixType; typedef Lattice LatticeMatrixType; @@ -757,6 +757,10 @@ template PokeIndex(out,Umu,mu); } } + template + static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + ColdConfiguration(out); + } template static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){ From d93c6760ec850abb93ee3f94a3444ee0dba84c6f Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 5 Dec 2017 11:39:26 +0000 Subject: [PATCH 096/145] Faster code for split unsplit --- lib/lattice/Lattice_transfer.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index c7e2a507..32c15d22 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -822,6 +822,7 @@ void Grid_split(std::vector > & full,Lattice & split) // Loop over reordered data post A2A parallel_for(int c=0;c coor(ndim); for(int m=0;m > & full,Lattice & split) uint64_t lex_vec = lex_fvol_vec/fvol; // which node sets an adder to the coordinate - std::vector coor(ndim); Lexicographic::CoorFromIndex(coor, lex_fvol, ldims); coor[d] += m*ldims[d]; Lexicographic::IndexFromCoor(coor, lex_r, rdims); @@ -940,10 +940,11 @@ void Grid_unsplit(std::vector > & full,Lattice & split) { // Loop over reordered data post A2A - for(int c=0;c coor(ndim); for(int m=0;m > & full,Lattice & split) uint64_t lex_vec = lex_fvol_vec/fvol; // which node sets an adder to the coordinate - std::vector coor(ndim); Lexicographic::CoorFromIndex(coor, lex_fvol, ldims); coor[d] += m*ldims[d]; Lexicographic::IndexFromCoor(coor, lex_r, rdims); @@ -978,9 +978,9 @@ void Grid_unsplit(std::vector > & full,Lattice & split) lsites = full_grid->lSites(); for(int v=0;v Date: Tue, 5 Dec 2017 11:42:05 +0000 Subject: [PATCH 097/145] Faster RNG init --- lib/lattice/Lattice_rng.h | 35 +++++------------------------------ 1 file changed, 5 insertions(+), 30 deletions(-) diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 6dc50fd2..11d8e325 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -77,9 +77,6 @@ namespace Grid { // merge of April 11 2017 -//<<<<<<< HEAD - - // this function is necessary for the LS vectorised field inline int RNGfillable_general(GridBase *coarse,GridBase *fine) { @@ -91,7 +88,6 @@ namespace Grid { // all further divisions are local for(int d=0;d_processors[d]==1); for(int d=0;d_processors[d] == fine->_processors[d+lowerdims]); - // then divide the number of local sites // check that the total number of sims agree, meanse the iSites are the same @@ -102,27 +98,6 @@ namespace Grid { return fine->lSites() / coarse->lSites(); } - - /* - // Wrap seed_seq to give common interface with random_device - class fixedSeed { - public: - typedef std::seed_seq::result_type result_type; - std::seed_seq src; - - fixedSeed(const std::vector &seeds) : src(seeds.begin(),seeds.end()) {}; - - result_type operator () (void){ - std::vector list(1); - src.generate(list.begin(),list.end()); - return list[0]; - } - - }; - -======= ->>>>>>> develop - */ // real scalars are one component template @@ -171,7 +146,7 @@ namespace Grid { // support for parallel init /////////////////////// #ifdef RNG_FAST_DISCARD - static void Skip(RngEngine &eng) + static void Skip(RngEngine &eng,uint64_t site) { ///////////////////////////////////////////////////////////////////////////////////// // Skip by 2^40 elements between successive lattice sites @@ -184,7 +159,8 @@ namespace Grid { // and margin of safety is orders of magnitude. // We could hack Sitmo to skip in the higher order words of state if necessary ///////////////////////////////////////////////////////////////////////////////////// - uint64_t skip = 0x1; skip = skip<<40; + uint64_t skip = site; + skip = skip<<40; eng.discard(skip); } #endif @@ -411,9 +387,7 @@ namespace Grid { int rank,o_idx,i_idx; // Everybody loops over global volume. - for(int gidx=0;gidx<_grid->_gsites;gidx++){ - - Skip(master_engine); // Skip to next RNG sequence + parallel_for(int gidx=0;gidx<_grid->_gsites;gidx++){ // Where is it? _grid->GlobalIndexToGlobalCoor(gidx,gcoor); @@ -423,6 +397,7 @@ namespace Grid { if( rank == _grid->ThisRank() ){ int l_idx=generator_idx(o_idx,i_idx); _generators[l_idx] = master_engine; + Skip(_generators[l_idx],gidx); // Skip to next RNG sequence } } From a14038051fbeed58692fe1342f746c045e093585 Mon Sep 17 00:00:00 2001 From: paboyle Date: Tue, 5 Dec 2017 11:43:25 +0000 Subject: [PATCH 098/145] Improved AllToAll asserts --- lib/communicator/Communicator_base.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 73ea6165..548515cd 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -276,10 +276,11 @@ class CartesianCommunicator { assert(in.size()==out.size()); uint64_t bytes=sizeof(T); uint64_t words=in.size()/numnode; - + // std:: cout << "AllToAll buffer size "<< in.size()*sizeof(T)< Date: Tue, 5 Dec 2017 13:01:10 +0000 Subject: [PATCH 099/145] Improved parallel RNG init --- lib/lattice/Lattice_rng.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 11d8e325..d5190e63 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -159,9 +159,11 @@ namespace Grid { // and margin of safety is orders of magnitude. // We could hack Sitmo to skip in the higher order words of state if necessary ///////////////////////////////////////////////////////////////////////////////////// + // uint64_t skip = site+1; // Old init Skipped then drew. Checked compat with faster init uint64_t skip = site; skip = skip<<40; eng.discard(skip); + // std::cout << " Engine " < Date: Tue, 5 Dec 2017 13:07:31 +0000 Subject: [PATCH 100/145] Clean up of test --- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 34 ++++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index 06df58c6..7e11d8d1 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -81,21 +81,20 @@ int main (int argc, char ** argv) GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); - + std::cout << GridLogMessage << "Made the grids"< seeds({1,2,3,4}); - GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); - GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); std::vector src(nrhs,FGrid); std::vector src_chk(nrhs,FGrid); std::vector result(nrhs,FGrid); FermionField tmp(FGrid); + std::cout << GridLogMessage << "Made the Fermion Fields"< Date: Tue, 5 Dec 2017 14:12:22 +0000 Subject: [PATCH 101/145] Threading improvement --- lib/lattice/Lattice_rng.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index d5190e63..edf9dd23 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -385,13 +385,14 @@ namespace Grid { // MT implementation does not implement fast discard even though // in principle this is possible //////////////////////////////////////////////// - std::vector gcoor; - int rank,o_idx,i_idx; // Everybody loops over global volume. parallel_for(int gidx=0;gidx<_grid->_gsites;gidx++){ // Where is it? + int rank,o_idx,i_idx; + std::vector gcoor; + _grid->GlobalIndexToGlobalCoor(gidx,gcoor); _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); From 542225195903b5a54bd2b2768c8153b29fba5230 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 5 Dec 2017 15:31:59 +0100 Subject: [PATCH 102/145] Hadrons: execution part moved in a new virtual machine class --- extras/Hadrons/Application.cc | 41 +- extras/Hadrons/Application.hpp | 11 +- extras/Hadrons/Environment.cc | 387 ++--------------- extras/Hadrons/Environment.hpp | 124 +----- extras/Hadrons/Global.hpp | 4 + extras/Hadrons/Makefile.am | 6 +- extras/Hadrons/Module.cc | 8 +- extras/Hadrons/Module.hpp | 8 +- extras/Hadrons/Modules/MContraction/Meson.hpp | 2 +- extras/Hadrons/VirtualMachine.cc | 388 ++++++++++++++++++ extras/Hadrons/VirtualMachine.hpp | 164 ++++++++ 11 files changed, 647 insertions(+), 496 deletions(-) create mode 100644 extras/Hadrons/VirtualMachine.cc create mode 100644 extras/Hadrons/VirtualMachine.hpp diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 0860437b..af67dff3 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -73,12 +73,6 @@ Application::Application(const std::string parameterFileName) parameterFileName_ = parameterFileName; } -// environment shortcut //////////////////////////////////////////////////////// -Environment & Application::env(void) const -{ - return Environment::getInstance(); -} - // access ////////////////////////////////////////////////////////////////////// void Application::setPar(const Application::GlobalPar &par) { @@ -94,12 +88,13 @@ const Application::GlobalPar & Application::getPar(void) // execute ///////////////////////////////////////////////////////////////////// void Application::run(void) { - if (!parameterFileName_.empty() and (env().getNModule() == 0)) + if (!parameterFileName_.empty() and (vm().getNModule() == 0)) { parseParameterFile(parameterFileName_); } - env().checkGraph(); + //vm().checkGraph(); env().printContent(); + vm().printContent(); if (!scheduled_) { schedule(); @@ -137,7 +132,7 @@ void Application::parseParameterFile(const std::string parameterFileName) do { read(reader, "id", id); - env().createModule(id.name, id.type, reader); + vm().createModule(id.name, id.type, reader); } while (reader.nextElement("module")); pop(reader); pop(reader); @@ -147,7 +142,7 @@ void Application::saveParameterFile(const std::string parameterFileName) { XmlWriter writer(parameterFileName); ObjectId id; - const unsigned int nMod = env().getNModule(); + const unsigned int nMod = vm().getNModule(); LOG(Message) << "Saving application to '" << parameterFileName << "'..." << std::endl; write(writer, "parameters", getPar()); @@ -155,10 +150,10 @@ void Application::saveParameterFile(const std::string parameterFileName) for (unsigned int i = 0; i < nMod; ++i) { push(writer, "module"); - id.name = env().getModuleName(i); - id.type = env().getModule(i)->getRegisteredName(); + id.name = vm().getModuleName(i); + id.type = vm().getModule(i)->getRegisteredName(); write(writer, "id", id); - env().getModule(i)->saveParameters(writer, "options"); + vm().getModule(i)->saveParameters(writer, "options"); pop(writer); } pop(writer); @@ -178,9 +173,9 @@ GeneticScheduler::ObjFunc memPeak = \ \ msg = HadronsLogMessage.isActive();\ HadronsLogMessage.Active(false);\ - env().dryRun(true);\ - memPeak = env().executeProgram(program);\ - env().dryRun(false);\ + vm().dryRun(true);\ + memPeak = vm().executeProgram(program);\ + vm().dryRun(false);\ env().freeAll();\ HadronsLogMessage.Active(msg);\ \ @@ -193,7 +188,7 @@ void Application::schedule(void) // build module dependency graph LOG(Message) << "Building module graph..." << std::endl; - auto graph = env().makeModuleGraph(); + auto graph = vm().makeModuleGraph(); LOG(Debug) << "Module graph:" << std::endl; LOG(Debug) << graph << std::endl; auto con = graph.getConnectedComponents(); @@ -273,7 +268,7 @@ void Application::saveSchedule(const std::string filename) << std::endl; for (auto address: program_) { - program.push_back(env().getModuleName(address)); + program.push_back(vm().getModuleName(address)); } write(writer, "schedule", program); } @@ -291,7 +286,7 @@ void Application::loadSchedule(const std::string filename) program_.clear(); for (auto &name: program) { - program_.push_back(env().getModuleAddress(name)); + program_.push_back(vm().getModuleAddress(name)); } scheduled_ = true; memPeak_ = memPeak(program_); @@ -308,7 +303,7 @@ void Application::printSchedule(void) for (unsigned int i = 0; i < program_.size(); ++i) { LOG(Message) << std::setw(4) << i + 1 << ": " - << env().getModuleName(program_[i]) << std::endl; + << vm().getModuleName(program_[i]) << std::endl; } } @@ -321,8 +316,8 @@ void Application::configLoop(void) { LOG(Message) << BIG_SEP << " Starting measurement for trajectory " << t << " " << BIG_SEP << std::endl; - env().setTrajectory(t); - env().executeProgram(program_); + vm().setTrajectory(t); + vm().executeProgram(program_); } LOG(Message) << BIG_SEP << " End of measurement " << BIG_SEP << std::endl; env().freeAll(); @@ -331,7 +326,7 @@ void Application::configLoop(void) // memory profile ////////////////////////////////////////////////////////////// void Application::memoryProfile(void) { - auto graph = env().makeModuleGraph(); + auto graph = vm().makeModuleGraph(); auto program = graph.topoSort(); bool msg; diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index 8b11b0c7..66488206 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -31,8 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Application_hpp_ #include -#include -#include +#include #include BEGIN_HADRONS_NAMESPACE @@ -100,7 +99,9 @@ public: void configLoop(void); private: // environment shortcut - Environment & env(void) const; + DEFINE_ENV_ALIAS; + // virtual machine shortcut + DEFINE_VM_ALIAS; // memory profile void memoryProfile(void); private: @@ -119,14 +120,14 @@ private: template void Application::createModule(const std::string name) { - env().createModule(name); + vm().createModule(name); } template void Application::createModule(const std::string name, const typename M::Par &par) { - env().createModule(name, par); + vm().createModule(name, par); } END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 27849bd7..ea41f343 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -56,38 +56,6 @@ Environment::Environment(void) rng4d_.reset(new GridParallelRNG(grid4d_.get())); } -// dry run ///////////////////////////////////////////////////////////////////// -void Environment::dryRun(const bool isDry) -{ - dryRun_ = isDry; -} - -bool Environment::isDryRun(void) const -{ - return dryRun_; -} - -void Environment::memoryProfile(const bool doMemoryProfile) -{ - memoryProfile_ = doMemoryProfile; -} - -bool Environment::doMemoryProfile(void) const -{ - return memoryProfile_; -} - -// trajectory number /////////////////////////////////////////////////////////// -void Environment::setTrajectory(const unsigned int traj) -{ - traj_ = traj; -} - -unsigned int Environment::getTrajectory(void) const -{ - return traj_; -} - // grids /////////////////////////////////////////////////////////////////////// void Environment::createGrid(const unsigned int Ls) { @@ -153,6 +121,11 @@ int Environment::getDim(const unsigned int mu) const return dim_[mu]; } +unsigned long int Environment::getLocalVolume(void) const +{ + return locVol_; +} + // random number generator ///////////////////////////////////////////////////// void Environment::setSeed(const std::vector &seed) { @@ -164,313 +137,6 @@ GridParallelRNG * Environment::get4dRng(void) const return rng4d_.get(); } -// module management /////////////////////////////////////////////////////////// -void Environment::pushModule(Environment::ModPt &pt) -{ - std::string name = pt->getName(); - - if (!hasModule(name)) - { - std::vector inputAddress; - unsigned int address; - ModuleInfo m; - - m.data = std::move(pt); - m.type = typeIdPt(*m.data.get()); - m.name = name; - auto input = m.data->getInput(); - for (auto &in: input) - { - if (!hasObject(in)) - { - addObject(in , -1); - } - m.input.push_back(objectAddress_[in]); - } - auto output = m.data->getOutput(); - module_.push_back(std::move(m)); - address = static_cast(module_.size() - 1); - moduleAddress_[name] = address; - for (auto &out: output) - { - if (!hasObject(out)) - { - addObject(out, address); - } - else - { - if (object_[objectAddress_[out]].module < 0) - { - object_[objectAddress_[out]].module = address; - } - else - { - HADRON_ERROR("object '" + out - + "' is already produced by module '" - + module_[object_[getObjectAddress(out)].module].name - + "' (while pushing module '" + name + "')"); - } - } - } - } - else - { - HADRON_ERROR("module '" + name + "' already exists"); - } -} - -unsigned int Environment::getNModule(void) const -{ - return module_.size(); -} - -void Environment::createModule(const std::string name, const std::string type, - XmlReader &reader) -{ - auto &factory = ModuleFactory::getInstance(); - auto pt = factory.create(type, name); - - pt->parseParameters(reader, "options"); - pushModule(pt); -} - -ModuleBase * Environment::getModule(const unsigned int address) const -{ - if (hasModule(address)) - { - return module_[address].data.get(); - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -ModuleBase * Environment::getModule(const std::string name) const -{ - return getModule(getModuleAddress(name)); -} - -unsigned int Environment::getModuleAddress(const std::string name) const -{ - if (hasModule(name)) - { - return moduleAddress_.at(name); - } - else - { - HADRON_ERROR("no module with name '" + name + "'"); - } -} - -std::string Environment::getModuleName(const unsigned int address) const -{ - if (hasModule(address)) - { - return module_[address].name; - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -std::string Environment::getModuleType(const unsigned int address) const -{ - if (hasModule(address)) - { - return typeName(module_[address].type); - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -std::string Environment::getModuleType(const std::string name) const -{ - return getModuleType(getModuleAddress(name)); -} - -std::string Environment::getModuleNamespace(const unsigned int address) const -{ - std::string type = getModuleType(address), ns; - - auto pos2 = type.rfind("::"); - auto pos1 = type.rfind("::", pos2 - 2); - - return type.substr(pos1 + 2, pos2 - pos1 - 2); -} - -std::string Environment::getModuleNamespace(const std::string name) const -{ - return getModuleNamespace(getModuleAddress(name)); -} - -bool Environment::hasModule(const unsigned int address) const -{ - return (address < module_.size()); -} - -bool Environment::hasModule(const std::string name) const -{ - return (moduleAddress_.find(name) != moduleAddress_.end()); -} - -Graph Environment::makeModuleGraph(void) const -{ - Graph moduleGraph; - - for (unsigned int i = 0; i < module_.size(); ++i) - { - moduleGraph.addVertex(i); - for (auto &j: module_[i].input) - { - moduleGraph.addEdge(object_[j].module, i); - } - } - - return moduleGraph; -} - -void Environment::checkGraph(void) const -{ - for (auto &o: object_) - { - if (o.module < 0) - { - HADRON_ERROR("object '" + o.name + "' does not have a creator"); - } - } -} - -#define BIG_SEP "===============" -#define SEP "---------------" -#define MEM_MSG(size)\ -sizeString((size)*locVol_) << " (" << sizeString(size) << "/site)" - -Environment::Size -Environment::executeProgram(const std::vector &p) -{ - Size memPeak = 0, sizeBefore, sizeAfter; - std::vector> freeProg; - bool continueCollect, nothingFreed; - - // build garbage collection schedule - LOG(Debug) << "Building garbage collection schedule..." << std::endl; - freeProg.resize(p.size()); - for (unsigned int i = 0; i < object_.size(); ++i) - { - auto pred = [i, this](const unsigned int j) - { - auto &in = module_[j].input; - auto it = std::find(in.begin(), in.end(), i); - - return (it != in.end()) or (j == object_[i].module); - }; - auto it = std::find_if(p.rbegin(), p.rend(), pred); - if (it != p.rend()) - { - freeProg[std::distance(it, p.rend()) - 1].insert(i); - } - } - - // program execution - LOG(Debug) << "Executing program..." << std::endl; - for (unsigned int i = 0; i < p.size(); ++i) - { - // execute module - if (!isDryRun()) - { - LOG(Message) << SEP << " Measurement step " << i+1 << "/" - << p.size() << " (module '" << module_[p[i]].name - << "') " << SEP << std::endl; - } - (*module_[p[i]].data)(); - sizeBefore = getTotalSize(); - // print used memory after execution - if (!isDryRun()) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) - << std::endl; - } - if (sizeBefore > memPeak) - { - memPeak = sizeBefore; - } - // garbage collection for step i - if (!isDryRun()) - { - LOG(Message) << "Garbage collection..." << std::endl; - } - nothingFreed = true; - do - { - continueCollect = false; - auto toFree = freeProg[i]; - for (auto &j: toFree) - { - // continue garbage collection while there are still - // objects without owners - continueCollect = continueCollect or !hasOwners(j); - if(freeObject(j)) - { - // if an object has been freed, remove it from - // the garbage collection schedule - freeProg[i].erase(j); - nothingFreed = false; - } - } - } while (continueCollect); - // free temporaries - for (unsigned int i = 0; i < object_.size(); ++i) - { - if ((object_[i].storage == Storage::temporary) - and hasCreatedObject(i)) - { - freeObject(i); - } - } - // any remaining objects in step i garbage collection schedule - // is scheduled for step i + 1 - if (i + 1 < p.size()) - { - for (auto &j: freeProg[i]) - { - freeProg[i + 1].insert(j); - } - } - // print used memory after garbage collection if necessary - if (!isDryRun()) - { - sizeAfter = getTotalSize(); - if (sizeBefore != sizeAfter) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) - << std::endl; - } - else - { - LOG(Message) << "Nothing to free" << std::endl; - } - } - } - - return memPeak; -} - -Environment::Size Environment::executeProgram(const std::vector &p) -{ - std::vector pAddress; - - for (auto &n: p) - { - pAddress.push_back(getModuleAddress(n)); - } - - return executeProgram(pAddress); -} - // general memory management /////////////////////////////////////////////////// void Environment::addObject(const std::string name, const int moduleAddress) { @@ -490,6 +156,17 @@ void Environment::addObject(const std::string name, const int moduleAddress) } } +void Environment::setObjectModule(const unsigned int objAddress, + const int modAddress) +{ + object_[objAddress].module = modAddress; +} + +unsigned int Environment::getMaxAddress(void) const +{ + return object_.size(); +} + unsigned int Environment::getObjectAddress(const std::string name) const { if (hasObject(name)) @@ -555,7 +232,24 @@ Environment::Size Environment::getObjectSize(const std::string name) const return getObjectSize(getObjectAddress(name)); } -unsigned int Environment::getObjectModule(const unsigned int address) const +Environment::Storage Environment::getObjectStorage(const unsigned int address) const +{ + if (hasObject(address)) + { + return object_[address].storage; + } + else + { + HADRON_ERROR("no object with address " + std::to_string(address)); + } +} + +Environment::Storage Environment::getObjectStorage(const std::string name) const +{ + return getObjectStorage(getObjectAddress(name)); +} + +int Environment::getObjectModule(const unsigned int address) const { if (hasObject(address)) { @@ -567,7 +261,7 @@ unsigned int Environment::getObjectModule(const unsigned int address) const } } -unsigned int Environment::getObjectModule(const std::string name) const +int Environment::getObjectModule(const std::string name) const { return getObjectModule(getObjectAddress(name)); } @@ -696,7 +390,7 @@ bool Environment::freeObject(const unsigned int address) { if (!hasOwners(address)) { - if (!isDryRun() and hasCreatedObject(address)) + if (hasCreatedObject(address)) { LOG(Message) << "Destroying object '" << object_[address].name << "'" << std::endl; @@ -732,14 +426,9 @@ void Environment::freeAll(void) } } -void Environment::printContent(void) +// print environment content /////////////////////////////////////////////////// +void Environment::printContent(void) const { - LOG(Debug) << "Modules: " << std::endl; - for (unsigned int i = 0; i < module_.size(); ++i) - { - LOG(Debug) << std::setw(4) << i << ": " - << getModuleName(i) << std::endl; - } LOG(Debug) << "Objects: " << std::endl; for (unsigned int i = 0; i < object_.size(); ++i) { diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 58e035ac..9d482923 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -31,20 +31,12 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Environment_hpp_ #include -#include - -#ifndef SITE_SIZE_TYPE -#define SITE_SIZE_TYPE unsigned int -#endif BEGIN_HADRONS_NAMESPACE /****************************************************************************** * Global environment * ******************************************************************************/ -// forward declaration of Module -class ModuleBase; - class Object { public: @@ -66,26 +58,22 @@ private: std::unique_ptr objPt_{nullptr}; }; +#define DEFINE_ENV_ALIAS \ +inline Environment & env(void) const\ +{\ + return Environment::getInstance();\ +} + class Environment { SINGLETON(Environment); public: typedef SITE_SIZE_TYPE Size; - typedef std::unique_ptr ModPt; typedef std::unique_ptr GridPt; typedef std::unique_ptr GridRbPt; typedef std::unique_ptr RngPt; - typedef std::unique_ptr LatticePt; enum class Storage {object, cache, temporary}; private: - struct ModuleInfo - { - const std::type_info *type{nullptr}; - std::string name; - ModPt data{nullptr}; - std::vector input; - size_t maxAllocated; - }; struct ObjInfo { Size size{0}; @@ -98,53 +86,17 @@ private: std::unique_ptr data{nullptr}; }; public: - // dry run - void dryRun(const bool isDry); - bool isDryRun(void) const; - void memoryProfile(const bool doMemoryProfile); - bool doMemoryProfile(void) const; - // trajectory number - void setTrajectory(const unsigned int traj); - unsigned int getTrajectory(void) const; // grids void createGrid(const unsigned int Ls); GridCartesian * getGrid(const unsigned int Ls = 1) const; GridRedBlackCartesian * getRbGrid(const unsigned int Ls = 1) const; std::vector getDim(void) const; int getDim(const unsigned int mu) const; + unsigned long int getLocalVolume(void) const; unsigned int getNd(void) const; // random number generator void setSeed(const std::vector &seed); GridParallelRNG * get4dRng(void) const; - // module management - void pushModule(ModPt &pt); - template - void createModule(const std::string name); - template - void createModule(const std::string name, - const typename M::Par &par); - void createModule(const std::string name, - const std::string type, - XmlReader &reader); - unsigned int getNModule(void) const; - ModuleBase * getModule(const unsigned int address) const; - ModuleBase * getModule(const std::string name) const; - template - M * getModule(const unsigned int address) const; - template - M * getModule(const std::string name) const; - unsigned int getModuleAddress(const std::string name) const; - std::string getModuleName(const unsigned int address) const; - std::string getModuleType(const unsigned int address) const; - std::string getModuleType(const std::string name) const; - std::string getModuleNamespace(const unsigned int address) const; - std::string getModuleNamespace(const std::string name) const; - bool hasModule(const unsigned int address) const; - bool hasModule(const std::string name) const; - Graph makeModuleGraph(void) const; - void checkGraph(void) const; - Size executeProgram(const std::vector &p); - Size executeProgram(const std::vector &p); // general memory management void addObject(const std::string name, const int moduleAddress = -1); @@ -153,18 +105,23 @@ public: const Storage storage, const unsigned int Ls, P &&pt); + void setObjectModule(const unsigned int objAddress, + const int modAddress); template T * getObject(const unsigned int address) const; template T * getObject(const std::string name) const; + unsigned int getMaxAddress(void) const; unsigned int getObjectAddress(const std::string name) const; std::string getObjectName(const unsigned int address) const; std::string getObjectType(const unsigned int address) const; std::string getObjectType(const std::string name) const; Size getObjectSize(const unsigned int address) const; Size getObjectSize(const std::string name) const; - unsigned int getObjectModule(const unsigned int address) const; - unsigned int getObjectModule(const std::string name) const; + Storage getObjectStorage(const unsigned int address) const; + Storage getObjectStorage(const std::string name) const; + int getObjectModule(const unsigned int address) const; + int getObjectModule(const std::string name) const; unsigned int getObjectLs(const unsigned int address) const; unsigned int getObjectLs(const std::string name) const; bool hasObject(const unsigned int address) const; @@ -187,11 +144,11 @@ public: bool freeObject(const unsigned int address); bool freeObject(const std::string name); void freeAll(void); - void printContent(void); + // print environment content + void printContent(void) const; private: // general - bool dryRun_{false}, memoryProfile_{false}; - unsigned int traj_, locVol_; + unsigned long int locVol_; // grids std::vector dim_; GridPt grid4d_; @@ -201,12 +158,6 @@ private: unsigned int nd_; // random number generator RngPt rng4d_; - // module and related maps - std::vector module_; - std::map moduleAddress_; - std::string currentModule_{""}; - // lattice store - std::map lattice_; // object store std::vector object_; std::map objectAddress_; @@ -243,46 +194,7 @@ void Holder::reset(T *pt) /****************************************************************************** * Environment template implementation * ******************************************************************************/ -// module management /////////////////////////////////////////////////////////// -template -void Environment::createModule(const std::string name) -{ - ModPt pt(new M(name)); - - pushModule(pt); -} - -template -void Environment::createModule(const std::string name, - const typename M::Par &par) -{ - ModPt pt(new M(name)); - - static_cast(pt.get())->setPar(par); - pushModule(pt); -} - -template -M * Environment::getModule(const unsigned int address) const -{ - if (auto *pt = dynamic_cast(getModule(address))) - { - return pt; - } - else - { - HADRON_ERROR("module '" + module_[address].name - + "' does not have type " + typeid(M).name() - + "(object type: " + getModuleType(address) + ")"); - } -} - -template -M * Environment::getModule(const std::string name) const -{ - return getModule(getModuleAddress(name)); -} - +// general memory management /////////////////////////////////////////////////// template void Environment::createObject(const std::string name, const Environment::Storage storage, diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 371256e8..1f0ce201 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -35,6 +35,10 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include +#ifndef SITE_SIZE_TYPE +#define SITE_SIZE_TYPE unsigned int +#endif + #define BEGIN_HADRONS_NAMESPACE \ namespace Grid {\ using namespace QCD;\ diff --git a/extras/Hadrons/Makefile.am b/extras/Hadrons/Makefile.am index 9cb23600..826cb158 100644 --- a/extras/Hadrons/Makefile.am +++ b/extras/Hadrons/Makefile.am @@ -8,7 +8,8 @@ libHadrons_a_SOURCES = \ Application.cc \ Environment.cc \ Global.cc \ - Module.cc + Module.cc \ + VirtualMachine.cc libHadrons_adir = $(pkgincludedir)/Hadrons nobase_libHadrons_a_HEADERS = \ $(modules_hpp) \ @@ -20,7 +21,8 @@ nobase_libHadrons_a_HEADERS = \ Graph.hpp \ Module.hpp \ Modules.hpp \ - ModuleFactory.hpp + ModuleFactory.hpp \ + VirtualMachine.hpp HadronsXmlRun_SOURCES = HadronsXmlRun.cc HadronsXmlRun_LDADD = libHadrons.a -lGrid diff --git a/extras/Hadrons/Module.cc b/extras/Hadrons/Module.cc index 2549a931..bf596bfc 100644 --- a/extras/Hadrons/Module.cc +++ b/extras/Hadrons/Module.cc @@ -39,7 +39,6 @@ using namespace Hadrons; // constructor ///////////////////////////////////////////////////////////////// ModuleBase::ModuleBase(const std::string name) : name_(name) -, env_(Environment::getInstance()) {} // access ////////////////////////////////////////////////////////////////////// @@ -48,11 +47,6 @@ std::string ModuleBase::getName(void) const return name_; } -Environment & ModuleBase::env(void) const -{ - return env_; -} - // get factory registration name if available std::string ModuleBase::getRegisteredName(void) { @@ -64,7 +58,7 @@ std::string ModuleBase::getRegisteredName(void) void ModuleBase::operator()(void) { setup(); - if (!env().isDryRun()) + if (!vm().isDryRun()) { execute(); } diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 017a9172..d1910c9b 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -31,7 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Module_hpp_ #include -#include +#include BEGIN_HADRONS_NAMESPACE @@ -148,7 +148,6 @@ public: virtual ~ModuleBase(void) = default; // access std::string getName(void) const; - Environment &env(void) const; // get factory registration name if available virtual std::string getRegisteredName(void); // dependencies/products @@ -163,9 +162,12 @@ protected: // setup virtual void setup(void) {}; virtual void execute(void) = 0; + // environment shortcut + DEFINE_ENV_ALIAS; + // virtual machine shortcut + DEFINE_VM_ALIAS; private: std::string name_; - Environment &env_; }; // derived class, templating the parameter class diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 31640b7c..7c0012d2 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -211,7 +211,7 @@ void TMeson::execute(void) Gamma gSrc(gammaList[i].second); std::string ns; - ns = env().getModuleNamespace(env().getObjectModule(par().sink)); + ns = vm().getModuleNamespace(env().getObjectModule(par().sink)); if (ns == "MSource") { PropagatorField1 &sink = envGet(PropagatorField1, par().sink); diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc new file mode 100644 index 00000000..f09e2710 --- /dev/null +++ b/extras/Hadrons/VirtualMachine.cc @@ -0,0 +1,388 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/VirtualMachine.cc + +Copyright (C) 2017 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include + +using namespace Grid; +using namespace QCD; +using namespace Hadrons; + +/****************************************************************************** + * VirtualMachine implementation * + ******************************************************************************/ +// dry run ///////////////////////////////////////////////////////////////////// +void VirtualMachine::dryRun(const bool isDry) +{ + dryRun_ = isDry; +} + +bool VirtualMachine::isDryRun(void) const +{ + return dryRun_; +} + +void VirtualMachine::memoryProfile(const bool doMemoryProfile) +{ + memoryProfile_ = doMemoryProfile; +} + +bool VirtualMachine::doMemoryProfile(void) const +{ + return memoryProfile_; +} + +// trajectory counter ////////////////////////////////////////////////////////// +void VirtualMachine::setTrajectory(const unsigned int traj) +{ + traj_ = traj; +} + +unsigned int VirtualMachine::getTrajectory(void) const +{ + return traj_; +} + +// module management /////////////////////////////////////////////////////////// +void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) +{ + std::string name = pt->getName(); + + if (!hasModule(name)) + { + std::vector inputAddress; + unsigned int address; + ModuleInfo m; + + m.data = std::move(pt); + m.type = typeIdPt(*m.data.get()); + m.name = name; + auto input = m.data->getInput(); + for (auto &in: input) + { + if (!env().hasObject(in)) + { + env().addObject(in , -1); + } + m.input.push_back(env().getObjectAddress(in)); + } + auto output = m.data->getOutput(); + module_.push_back(std::move(m)); + address = static_cast(module_.size() - 1); + moduleAddress_[name] = address; + for (auto &out: output) + { + if (!env().hasObject(out)) + { + env().addObject(out, address); + } + else + { + if (env().getObjectModule(env().getObjectAddress(out)) < 0) + { + env().setObjectModule(env().getObjectAddress(out), address); + } + else + { + HADRON_ERROR("object '" + out + + "' is already produced by module '" + + module_[env().getObjectModule(out)].name + + "' (while pushing module '" + name + "')"); + } + } + } + } + else + { + HADRON_ERROR("module '" + name + "' already exists"); + } +} + +unsigned int VirtualMachine::getNModule(void) const +{ + return module_.size(); +} + +void VirtualMachine::createModule(const std::string name, const std::string type, + XmlReader &reader) +{ + auto &factory = ModuleFactory::getInstance(); + auto pt = factory.create(type, name); + + pt->parseParameters(reader, "options"); + pushModule(pt); +} + +ModuleBase * VirtualMachine::getModule(const unsigned int address) const +{ + if (hasModule(address)) + { + return module_[address].data.get(); + } + else + { + HADRON_ERROR("no module with address " + std::to_string(address)); + } +} + +ModuleBase * VirtualMachine::getModule(const std::string name) const +{ + return getModule(getModuleAddress(name)); +} + +unsigned int VirtualMachine::getModuleAddress(const std::string name) const +{ + if (hasModule(name)) + { + return moduleAddress_.at(name); + } + else + { + HADRON_ERROR("no module with name '" + name + "'"); + } +} + +std::string VirtualMachine::getModuleName(const unsigned int address) const +{ + if (hasModule(address)) + { + return module_[address].name; + } + else + { + HADRON_ERROR("no module with address " + std::to_string(address)); + } +} + +std::string VirtualMachine::getModuleType(const unsigned int address) const +{ + if (hasModule(address)) + { + return typeName(module_[address].type); + } + else + { + HADRON_ERROR("no module with address " + std::to_string(address)); + } +} + +std::string VirtualMachine::getModuleType(const std::string name) const +{ + return getModuleType(getModuleAddress(name)); +} + +std::string VirtualMachine::getModuleNamespace(const unsigned int address) const +{ + std::string type = getModuleType(address), ns; + + auto pos2 = type.rfind("::"); + auto pos1 = type.rfind("::", pos2 - 2); + + return type.substr(pos1 + 2, pos2 - pos1 - 2); +} + +std::string VirtualMachine::getModuleNamespace(const std::string name) const +{ + return getModuleNamespace(getModuleAddress(name)); +} + +bool VirtualMachine::hasModule(const unsigned int address) const +{ + return (address < module_.size()); +} + +bool VirtualMachine::hasModule(const std::string name) const +{ + return (moduleAddress_.find(name) != moduleAddress_.end()); +} + +Graph VirtualMachine::makeModuleGraph(void) const +{ + Graph moduleGraph; + + for (unsigned int i = 0; i < module_.size(); ++i) + { + moduleGraph.addVertex(i); + for (auto &j: module_[i].input) + { + moduleGraph.addEdge(env().getObjectModule(j), i); + } + } + + return moduleGraph; +} + +// void VirtualMachine::checkGraph(void) const +// { +// for (auto &o: object_) +// { +// if (o.module < 0) +// { +// HADRON_ERROR("object '" + o.name + "' does not have a creator"); +// } +// } +// } + +// general execution /////////////////////////////////////////////////////////// +#define BIG_SEP "===============" +#define SEP "---------------" +#define MEM_MSG(size)\ +sizeString((size)*env().getLocalVolume()) << " (" << sizeString(size) << "/site)" + +VirtualMachine::Size +VirtualMachine::executeProgram(const std::vector &p) +{ + Size memPeak = 0, sizeBefore, sizeAfter; + std::vector> freeProg; + bool continueCollect, nothingFreed; + + // build garbage collection schedule + LOG(Debug) << "Building garbage collection schedule..." << std::endl; + freeProg.resize(p.size()); + for (unsigned int i = 0; i < env().getMaxAddress(); ++i) + { + auto pred = [i, this](const unsigned int j) + { + auto &in = module_[j].input; + auto it = std::find(in.begin(), in.end(), i); + + return (it != in.end()) or (j == env().getObjectModule(i)); + }; + auto it = std::find_if(p.rbegin(), p.rend(), pred); + if (it != p.rend()) + { + freeProg[std::distance(it, p.rend()) - 1].insert(i); + } + } + + // program execution + LOG(Debug) << "Executing program..." << std::endl; + for (unsigned int i = 0; i < p.size(); ++i) + { + // execute module + if (!isDryRun()) + { + LOG(Message) << SEP << " Measurement step " << i+1 << "/" + << p.size() << " (module '" << module_[p[i]].name + << "') " << SEP << std::endl; + } + (*module_[p[i]].data)(); + sizeBefore = env().getTotalSize(); + // print used memory after execution + if (!isDryRun()) + { + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) + << std::endl; + } + if (sizeBefore > memPeak) + { + memPeak = sizeBefore; + } + // garbage collection for step i + if (!isDryRun()) + { + LOG(Message) << "Garbage collection..." << std::endl; + } + nothingFreed = true; + do + { + continueCollect = false; + auto toFree = freeProg[i]; + for (auto &j: toFree) + { + // continue garbage collection while there are still + // objects without owners + continueCollect = continueCollect or !env().hasOwners(j); + if(env().freeObject(j)) + { + // if an object has been freed, remove it from + // the garbage collection schedule + freeProg[i].erase(j); + nothingFreed = false; + } + } + } while (continueCollect); + // free temporaries + for (unsigned int i = 0; i < env().getMaxAddress(); ++i) + { + if ((env().getObjectStorage(i) == Environment::Storage::temporary) + and env().hasCreatedObject(i)) + { + env().freeObject(i); + } + } + // any remaining objects in step i garbage collection schedule + // is scheduled for step i + 1 + if (i + 1 < p.size()) + { + for (auto &j: freeProg[i]) + { + freeProg[i + 1].insert(j); + } + } + // print used memory after garbage collection if necessary + if (!isDryRun()) + { + sizeAfter = env().getTotalSize(); + if (sizeBefore != sizeAfter) + { + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) + << std::endl; + } + else + { + LOG(Message) << "Nothing to free" << std::endl; + } + } + } + + return memPeak; +} + +VirtualMachine::Size VirtualMachine::executeProgram(const std::vector &p) +{ + std::vector pAddress; + + for (auto &n: p) + { + pAddress.push_back(getModuleAddress(n)); + } + + return executeProgram(pAddress); +} + +// print VM content //////////////////////////////////////////////////////////// +void VirtualMachine::printContent(void) const +{ + LOG(Debug) << "Modules: " << std::endl; + for (unsigned int i = 0; i < module_.size(); ++i) + { + LOG(Debug) << std::setw(4) << i << ": " + << getModuleName(i) << std::endl; + } +} diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp new file mode 100644 index 00000000..357fdb5b --- /dev/null +++ b/extras/Hadrons/VirtualMachine.hpp @@ -0,0 +1,164 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/VirtualMachine.hpp + +Copyright (C) 2017 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#ifndef Hadrons_VirtualMachine_hpp_ +#define Hadrons_VirtualMachine_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +#define DEFINE_VM_ALIAS \ +inline VirtualMachine & vm(void) const\ +{\ + return VirtualMachine::getInstance();\ +} + +/****************************************************************************** + * Virtual machine for module execution * + ******************************************************************************/ +// forward declaration of Module +class ModuleBase; + +class VirtualMachine +{ + SINGLETON_DEFCTOR(VirtualMachine); +public: + typedef SITE_SIZE_TYPE Size; + typedef std::unique_ptr ModPt; +private: + struct ModuleInfo + { + const std::type_info *type{nullptr}; + std::string name; + ModPt data{nullptr}; + std::vector input; + size_t maxAllocated; + }; +public: + // dry run + void dryRun(const bool isDry); + bool isDryRun(void) const; + void memoryProfile(const bool doMemoryProfile); + bool doMemoryProfile(void) const; + // trajectory counter + void setTrajectory(const unsigned int traj); + unsigned int getTrajectory(void) const; + // module management + void pushModule(ModPt &pt); + template + void createModule(const std::string name); + template + void createModule(const std::string name, + const typename M::Par &par); + void createModule(const std::string name, + const std::string type, + XmlReader &reader); + unsigned int getNModule(void) const; + ModuleBase * getModule(const unsigned int address) const; + ModuleBase * getModule(const std::string name) const; + template + M * getModule(const unsigned int address) const; + template + M * getModule(const std::string name) const; + unsigned int getModuleAddress(const std::string name) const; + std::string getModuleName(const unsigned int address) const; + std::string getModuleType(const unsigned int address) const; + std::string getModuleType(const std::string name) const; + std::string getModuleNamespace(const unsigned int address) const; + std::string getModuleNamespace(const std::string name) const; + bool hasModule(const unsigned int address) const; + bool hasModule(const std::string name) const; + Graph makeModuleGraph(void) const; + void checkGraph(void) const; + // print VM content + void printContent(void) const; + // general execution + Size executeProgram(const std::vector &p); + Size executeProgram(const std::vector &p); +private: + // environment shortcut + DEFINE_ENV_ALIAS; +private: + // general + bool dryRun_{false}, memoryProfile_{false}; + unsigned int traj_; + // module and related maps + std::vector module_; + std::map moduleAddress_; + std::string currentModule_{""}; +}; + +/****************************************************************************** + * VirtualMachine template implementation * + ******************************************************************************/ +// module management /////////////////////////////////////////////////////////// +template +void VirtualMachine::createModule(const std::string name) +{ + ModPt pt(new M(name)); + + pushModule(pt); +} + +template +void VirtualMachine::createModule(const std::string name, + const typename M::Par &par) +{ + ModPt pt(new M(name)); + + static_cast(pt.get())->setPar(par); + pushModule(pt); +} + +template +M * VirtualMachine::getModule(const unsigned int address) const +{ + if (auto *pt = dynamic_cast(getModule(address))) + { + return pt; + } + else + { + HADRON_ERROR("module '" + module_[address].name + + "' does not have type " + typeid(M).name() + + "(has type: " + getModuleType(address) + ")"); + } +} + +template +M * VirtualMachine::getModule(const std::string name) const +{ + return getModule(getModuleAddress(name)); +} + +END_HADRONS_NAMESPACE + +#endif // Hadrons_VirtualMachine_hpp_ From 62eb1f0e593042f9f7665a55c0f17fe3e196beae Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 6 Dec 2017 16:48:17 +0100 Subject: [PATCH 103/145] FermionOperator virtual destructor needed for polymorphism --- lib/qcd/action/fermion/FermionOperator.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/qcd/action/fermion/FermionOperator.h b/lib/qcd/action/fermion/FermionOperator.h index 676a0e83..ddd2272a 100644 --- a/lib/qcd/action/fermion/FermionOperator.h +++ b/lib/qcd/action/fermion/FermionOperator.h @@ -47,6 +47,7 @@ namespace Grid { INHERIT_IMPL_TYPES(Impl); FermionOperator(const ImplParams &p= ImplParams()) : Impl(p) {}; + virtual ~FermionOperator(void) = default; virtual FermionField &tmp(void) = 0; From e78794688a676131ecf88902ae923a7d32b7cb96 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 6 Dec 2017 16:50:25 +0100 Subject: [PATCH 104/145] memory profiler improvement --- lib/allocator/AlignedAllocator.cc | 3 +- lib/allocator/AlignedAllocator.h | 79 ++++++++++++++++++------------- 2 files changed, 47 insertions(+), 35 deletions(-) diff --git a/lib/allocator/AlignedAllocator.cc b/lib/allocator/AlignedAllocator.cc index 944e287f..dfdb1050 100644 --- a/lib/allocator/AlignedAllocator.cc +++ b/lib/allocator/AlignedAllocator.cc @@ -3,7 +3,8 @@ namespace Grid { -MemoryStats *MemoryProfiler::stats = nullptr; +MemoryStats *MemoryProfiler::stats = nullptr; +bool MemoryProfiler::debug = false; int PointerCache::victim; diff --git a/lib/allocator/AlignedAllocator.h b/lib/allocator/AlignedAllocator.h index bdccacec..85e2b240 100644 --- a/lib/allocator/AlignedAllocator.h +++ b/lib/allocator/AlignedAllocator.h @@ -74,8 +74,47 @@ namespace Grid { { public: static MemoryStats *stats; + static bool debug; }; + #define profilerDebugPrint \ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + std::cout << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\ + std::cout << "[Memory debug] Total : " << s->totalAllocated << "B" << std::endl;\ + std::cout << "[Memory debug] Max : " << s->maxAllocated << "B" << std::endl;\ + std::cout << "[Memory debug] Current: " << s->totalAllocated << "B" << std::endl;\ + std::cout << "[Memory debug] Freed : " << s->totalFreed << "B" << std::endl;\ + } + + #define profilerAllocate(bytes)\ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + s->totalAllocated += (bytes);\ + s->currentlyAllocated += (bytes);\ + s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated);\ + }\ + if (MemoryProfiler::debug)\ + {\ + std::cout << "[Memory debug] allocating " << bytes << "B" << std::endl;\ + profilerDebugPrint;\ + } + + #define profilerFree(bytes)\ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + s->totalFreed += (bytes);\ + s->currentlyAllocated -= (bytes);\ + }\ + if (MemoryProfiler::debug)\ + {\ + std::cout << "[Memory debug] freeing " << bytes << "B" << std::endl;\ + profilerDebugPrint;\ + } + void check_huge_pages(void *Buf,uint64_t BYTES); //////////////////////////////////////////////////////////////////// @@ -104,13 +143,7 @@ public: pointer allocate(size_type __n, const void* _p= 0) { size_type bytes = __n*sizeof(_Tp); - - if (auto s = MemoryProfiler::stats) - { - s->totalAllocated += bytes; - s->currentlyAllocated += bytes; - s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); - } + profilerAllocate(bytes); _Tp *ptr = (_Tp *) PointerCache::Lookup(bytes); // if ( ptr != NULL ) @@ -141,11 +174,7 @@ public: void deallocate(pointer __p, size_type __n) { size_type bytes = __n * sizeof(_Tp); - if (auto s = MemoryProfiler::stats) - { - s->totalFreed += bytes; - s->currentlyAllocated -= bytes; - } + profilerFree(bytes); pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes); @@ -199,12 +228,7 @@ public: { size_type bytes = __n*sizeof(_Tp); - if (auto s = MemoryProfiler::stats) - { - s->totalAllocated += bytes; - s->currentlyAllocated += bytes; - s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); - } + profilerAllocate(bytes); #ifdef CRAY _Tp *ptr = (_Tp *) shmem_align(bytes,64); #else @@ -229,11 +253,7 @@ public: void deallocate(pointer __p, size_type __n) { size_type bytes = __n*sizeof(_Tp); - if (auto s = MemoryProfiler::stats) - { - s->totalFreed += bytes; - s->currentlyAllocated -= bytes; - } + profilerFree(bytes); shmem_free((void *)__p); } #else @@ -241,12 +261,7 @@ public: { size_type bytes = __n*sizeof(_Tp); - if (auto s = MemoryProfiler::stats) - { - s->totalAllocated += bytes; - s->currentlyAllocated += bytes; - s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated); - } + profilerAllocate(bytes); #ifdef HAVE_MM_MALLOC_H _Tp * ptr = (_Tp *) _mm_malloc(bytes, GRID_ALLOC_ALIGN); #else @@ -265,11 +280,7 @@ public: void deallocate(pointer __p, size_type __n) { size_type bytes = __n*sizeof(_Tp); - if (auto s = MemoryProfiler::stats) - { - s->totalFreed += bytes; - s->currentlyAllocated -= bytes; - } + profilerFree(bytes); #ifdef HAVE_MM_MALLOC_H _mm_free((void *)__p); #else From 0fbf445edd90be7ac6363a77bc93c8b7325c45fe Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 6 Dec 2017 16:51:48 +0100 Subject: [PATCH 105/145] Hadrons: object creation that get properly captured by the memory profiler --- extras/Hadrons/Environment.hpp | 32 +++++++++++++++------ extras/Hadrons/Module.hpp | 27 +++++++++-------- extras/Hadrons/Modules/MAction/DWF.hpp | 5 ++-- extras/Hadrons/Modules/MAction/Wilson.hpp | 5 ++-- extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 2 +- extras/Hadrons/VirtualMachine.cc | 3 +- 6 files changed, 44 insertions(+), 30 deletions(-) diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 9d482923..5058a820 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -100,11 +100,16 @@ public: // general memory management void addObject(const std::string name, const int moduleAddress = -1); - template + template + void createDerivedObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args); + template void createObject(const std::string name, - const Storage storage, + const Environment::Storage storage, const unsigned int Ls, - P &&pt); + Ts && ... args); void setObjectModule(const unsigned int objAddress, const int modAddress); template @@ -195,11 +200,11 @@ void Holder::reset(T *pt) * Environment template implementation * ******************************************************************************/ // general memory management /////////////////////////////////////////////////// -template -void Environment::createObject(const std::string name, +template +void Environment::createDerivedObject(const std::string name, const Environment::Storage storage, const unsigned int Ls, - P &&pt) + Ts && ... args) { if (!hasObject(name)) { @@ -210,13 +215,13 @@ void Environment::createObject(const std::string name, if (!object_[address].data) { - MemoryStats memStats; + MemoryStats memStats; MemoryProfiler::stats = &memStats; object_[address].storage = storage; object_[address].Ls = Ls; - object_[address].data.reset(new Holder(pt)); - object_[address].size = memStats.totalAllocated; + object_[address].data.reset(new Holder(new T(std::forward(args)...))); + object_[address].size = memStats.maxAllocated; object_[address].type = &typeid(T); MemoryProfiler::stats = nullptr; } @@ -226,6 +231,15 @@ void Environment::createObject(const std::string name, } } +template +void Environment::createObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args) +{ + createDerivedObject(name, storage, Ls, std::forward(args)...); +} + template T * Environment::getObject(const unsigned int address) const { diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index d1910c9b..14d98bfb 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -98,39 +98,42 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define envHasType(type, name)\ env().template isObjectOfType(name) -#define envCreate(type, name, Ls, pt)\ -env().template createObject(name, Environment::Storage::object, Ls, pt) +#define envCreate(type, name, Ls, ...)\ +env().template createObject(name, Environment::Storage::object, Ls, __VA_ARGS__) + +#define envCreateDerived(base, type, name, Ls, ...)\ +env().template createDerivedObject(name, Environment::Storage::object, Ls, __VA_ARGS__) #define envCreateLat4(type, name)\ -envCreate(type, name, 1, new type(env().getGrid())) +envCreate(type, name, 1, env().getGrid()) #define envCreateLat5(type, name, Ls)\ -envCreate(type, name, Ls, new type(env().getGrid(Ls))) +envCreate(type, name, Ls, env().getGrid(Ls)) #define envCreateLat(...)\ MACRO_REDIRECT(__VA_ARGS__, envCreateLat5, envCreateLat4)(__VA_ARGS__) -#define envCache(type, name, Ls, pt)\ -env().template createObject(name, Environment::Storage::cache, Ls, pt) +#define envCache(type, name, Ls, ...)\ +env().template createObject(name, Environment::Storage::cache, Ls, __VA_ARGS__) #define envCacheLat4(type, name)\ -envCache(type, name, 1, new type(env().getGrid())) +envCache(type, name, 1, env().getGrid()) #define envCacheLat5(type, name, Ls)\ -envCache(type, name, Ls, new type(env().getGrid(Ls))) +envCache(type, name, Ls, env().getGrid(Ls)) #define envCacheLat(...)\ MACRO_REDIRECT(__VA_ARGS__, envCacheLat5, envCacheLat4)(__VA_ARGS__) -#define envTmp(type, name, Ls, pt)\ +#define envTmp(type, name, Ls, ...)\ env().template createObject(getName() + "_tmp_" + name, \ - Environment::Storage::temporary, Ls, pt) + Environment::Storage::temporary, Ls, __VA_ARGS__) #define envTmpLat4(type, name)\ -envTmp(type, name, 1, new type(env().getGrid())) +envTmp(type, name, 1, env().getGrid()) #define envTmpLat5(type, name, Ls)\ -envTmp(type, name, Ls, new type(env().getGrid(Ls))) +envTmp(type, name, Ls, env().getGrid(Ls)) #define envTmpLat(...)\ MACRO_REDIRECT(__VA_ARGS__, envTmpLat5, envTmpLat4)(__VA_ARGS__) diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 36c70073..e7d28476 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -118,9 +118,8 @@ void TDWF::setup(void) auto &grb5 = *env().getRbGrid(par().Ls); std::vector boundary = strToVec(par().boundary); typename DomainWallFermion::ImplParams implParams(boundary); - envCreate(FMat, getName(), par().Ls, - new DomainWallFermion(U, g5, grb5, g4, grb4, par().mass, - par().M5, implParams)); + envCreateDerived(FMat, DomainWallFermion, getName(), par().Ls, U, g5, + grb5, g4, grb4, par().mass, par().M5, implParams); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index 7fe1f44e..591a3fed 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -111,9 +111,8 @@ void TWilson::setup(void) auto &gridRb = *env().getRbGrid(); std::vector boundary = strToVec(par().boundary); typename WilsonFermion::ImplParams implParams(boundary); - envCreate(FMat, getName(), 1, new WilsonFermion(U, grid, gridRb, - par().mass, - implParams)); + envCreateDerived(FMat, WilsonFermion, getName(), 1, U, grid, gridRb, + par().mass, implParams); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index d8a4b95f..d6c21412 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -114,7 +114,7 @@ void TRBPrecCG::setup(void) schurSolver(mat, source, sol); }; - envCreate(SolverFn, getName(), Ls, new SolverFn(solver)); + envCreate(SolverFn, getName(), Ls, solver); env().addOwnership(getName(), par().action); } diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index f09e2710..ae1d5b6b 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -251,8 +251,7 @@ Graph VirtualMachine::makeModuleGraph(void) const // general execution /////////////////////////////////////////////////////////// #define BIG_SEP "===============" #define SEP "---------------" -#define MEM_MSG(size)\ -sizeString((size)*env().getLocalVolume()) << " (" << sizeString(size) << "/site)" +#define MEM_MSG(size) sizeString(size) VirtualMachine::Size VirtualMachine::executeProgram(const std::vector &p) From f9aa39e1c458652185ea81d2cfa16b9e47119e4e Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Thu, 7 Dec 2017 14:40:58 +0100 Subject: [PATCH 106/145] global memory debug through command line flag --- extras/Hadrons/Environment.hpp | 15 +++++++++++---- extras/Hadrons/Global.cc | 27 --------------------------- extras/Hadrons/Global.hpp | 3 --- lib/allocator/AlignedAllocator.cc | 25 +++++++++++++++++++++++++ lib/allocator/AlignedAllocator.h | 21 ++++++++++++++------- lib/util/Init.cc | 8 +++++++- 6 files changed, 57 insertions(+), 42 deletions(-) diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 5058a820..5177b312 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -216,14 +216,21 @@ void Environment::createDerivedObject(const std::string name, if (!object_[address].data) { MemoryStats memStats; - - MemoryProfiler::stats = &memStats; + + if (!MemoryProfiler::stats) + { + MemoryProfiler::stats = &memStats; + } + size_t initMem = MemoryProfiler::stats->currentlyAllocated; object_[address].storage = storage; object_[address].Ls = Ls; object_[address].data.reset(new Holder(new T(std::forward(args)...))); - object_[address].size = memStats.maxAllocated; + object_[address].size = MemoryProfiler::stats->maxAllocated - initMem; object_[address].type = &typeid(T); - MemoryProfiler::stats = nullptr; + if (MemoryProfiler::stats == &memStats) + { + MemoryProfiler::stats = nullptr; + } } else { diff --git a/extras/Hadrons/Global.cc b/extras/Hadrons/Global.cc index 7b0b8fb6..130ede96 100644 --- a/extras/Hadrons/Global.cc +++ b/extras/Hadrons/Global.cc @@ -39,33 +39,6 @@ HadronsLogger Hadrons::HadronsLogMessage(1,"Message"); HadronsLogger Hadrons::HadronsLogIterative(1,"Iterative"); HadronsLogger Hadrons::HadronsLogDebug(1,"Debug"); -// pretty size formatting ////////////////////////////////////////////////////// -std::string Hadrons::sizeString(long unsigned int bytes) - -{ - constexpr unsigned int bufSize = 256; - const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"}; - char buf[256]; - long unsigned int s = 0; - double count = bytes; - - while (count >= 1024 && s < 7) - { - s++; - count /= 1024; - } - if (count - floor(count) == 0.0) - { - snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]); - } - else - { - snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]); - } - - return std::string(buf); -} - // type utilities ////////////////////////////////////////////////////////////// constexpr unsigned int maxNameSize = 1024u; diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 1f0ce201..4c37b961 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -138,9 +138,6 @@ public:\ private:\ name(void) = default; -// pretty size formating -std::string sizeString(long unsigned int bytes); - // type utilities template const std::type_info * typeIdPt(const T &x) diff --git a/lib/allocator/AlignedAllocator.cc b/lib/allocator/AlignedAllocator.cc index dfdb1050..10b49f4b 100644 --- a/lib/allocator/AlignedAllocator.cc +++ b/lib/allocator/AlignedAllocator.cc @@ -97,4 +97,29 @@ void check_huge_pages(void *Buf,uint64_t BYTES) #endif } +std::string sizeString(const size_t bytes) +{ + constexpr unsigned int bufSize = 256; + const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"}; + char buf[256]; + size_t s = 0; + double count = bytes; + + while (count >= 1024 && s < 7) + { + s++; + count /= 1024; + } + if (count - floor(count) == 0.0) + { + snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]); + } + else + { + snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]); + } + + return std::string(buf); +} + } diff --git a/lib/allocator/AlignedAllocator.h b/lib/allocator/AlignedAllocator.h index 85e2b240..3b27aec9 100644 --- a/lib/allocator/AlignedAllocator.h +++ b/lib/allocator/AlignedAllocator.h @@ -64,6 +64,8 @@ namespace Grid { }; + std::string sizeString(size_t bytes); + struct MemoryStats { size_t totalAllocated{0}, maxAllocated{0}, @@ -77,15 +79,20 @@ namespace Grid { static bool debug; }; + #define memString(bytes) std::to_string(bytes) + " (" + sizeString(bytes) + ")" #define profilerDebugPrint \ if (MemoryProfiler::stats)\ {\ auto s = MemoryProfiler::stats;\ - std::cout << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\ - std::cout << "[Memory debug] Total : " << s->totalAllocated << "B" << std::endl;\ - std::cout << "[Memory debug] Max : " << s->maxAllocated << "B" << std::endl;\ - std::cout << "[Memory debug] Current: " << s->totalAllocated << "B" << std::endl;\ - std::cout << "[Memory debug] Freed : " << s->totalFreed << "B" << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] total : " << memString(s->totalAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] max : " << memString(s->maxAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] current: " << memString(s->currentlyAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] freed : " << memString(s->totalFreed) \ + << std::endl;\ } #define profilerAllocate(bytes)\ @@ -98,7 +105,7 @@ namespace Grid { }\ if (MemoryProfiler::debug)\ {\ - std::cout << "[Memory debug] allocating " << bytes << "B" << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] allocating " << memString(bytes) << std::endl;\ profilerDebugPrint;\ } @@ -111,7 +118,7 @@ namespace Grid { }\ if (MemoryProfiler::debug)\ {\ - std::cout << "[Memory debug] freeing " << bytes << "B" << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] freeing " << memString(bytes) << std::endl;\ profilerDebugPrint;\ } diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 031f8f5a..20367293 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -204,7 +204,7 @@ std::string GridCmdVectorIntToString(const std::vector & vec){ // Reinit guard ///////////////////////////////////////////////////////// static int Grid_is_initialised = 0; - +static MemoryStats dbgMemStats; void Grid_init(int *argc,char ***argv) { @@ -251,6 +251,11 @@ void Grid_init(int *argc,char ***argv) assert(fp!=(FILE *)NULL); } + if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-mem") ){ + MemoryProfiler::debug = true; + MemoryProfiler::stats = &dbgMemStats; + } + //////////////////////////////////// // Banner //////////////////////////////////// @@ -324,6 +329,7 @@ void Grid_init(int *argc,char ***argv) std::cout< Date: Tue, 12 Dec 2017 13:08:01 +0000 Subject: [PATCH 107/145] Hadrons: much simpler reference dependency --- extras/Hadrons/Environment.cc | 78 +++---------------- extras/Hadrons/Environment.hpp | 27 +++---- extras/Hadrons/Graph.hpp | 12 +-- extras/Hadrons/Module.hpp | 1 + extras/Hadrons/Modules/MAction/DWF.hpp | 9 +++ extras/Hadrons/Modules/MAction/Wilson.hpp | 9 +++ extras/Hadrons/Modules/MContraction/Meson.hpp | 9 +++ extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 9 +++ extras/Hadrons/Modules/MGauge/Unit.cc | 7 ++ extras/Hadrons/Modules/MGauge/Unit.hpp | 1 + extras/Hadrons/Modules/MSink/Point.hpp | 9 +++ extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 12 ++- extras/Hadrons/Modules/MSource/Point.hpp | 9 +++ .../Modules/templates/Module.cc.template | 8 ++ .../Modules/templates/Module.hpp.template | 1 + .../templates/Module_in_NS.cc.template | 8 ++ .../templates/Module_in_NS.hpp.template | 1 + .../Modules/templates/Module_tmp.hpp.template | 1 + .../templates/Module_tmp_in_NS.hpp.template | 9 +++ extras/Hadrons/VirtualMachine.cc | 77 +++++++++--------- 20 files changed, 171 insertions(+), 126 deletions(-) diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index ea41f343..66291966 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -341,81 +341,21 @@ Environment::Size Environment::getTotalSize(void) const return size; } -void Environment::addOwnership(const unsigned int owner, - const unsigned int property) +void Environment::freeObject(const unsigned int address) { - if (hasObject(property)) + if (hasCreatedObject(address)) { - object_[property].owners.insert(owner); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(property)); - } - if (hasObject(owner)) - { - object_[owner].properties.insert(property); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(owner)); + LOG(Message) << "Destroying object '" << object_[address].name + << "'" << std::endl; } + object_[address].size = 0; + object_[address].type = nullptr; + object_[address].data.reset(nullptr); } -void Environment::addOwnership(const std::string owner, - const std::string property) +void Environment::freeObject(const std::string name) { - addOwnership(getObjectAddress(owner), getObjectAddress(property)); -} - -bool Environment::hasOwners(const unsigned int address) const -{ - - if (hasObject(address)) - { - return (!object_[address].owners.empty()); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(address)); - } -} - -bool Environment::hasOwners(const std::string name) const -{ - return hasOwners(getObjectAddress(name)); -} - -bool Environment::freeObject(const unsigned int address) -{ - if (!hasOwners(address)) - { - if (hasCreatedObject(address)) - { - LOG(Message) << "Destroying object '" << object_[address].name - << "'" << std::endl; - } - for (auto &p: object_[address].properties) - { - object_[p].owners.erase(address); - } - object_[address].size = 0; - object_[address].type = nullptr; - object_[address].owners.clear(); - object_[address].properties.clear(); - object_[address].data.reset(nullptr); - - return true; - } - else - { - return false; - } -} - -bool Environment::freeObject(const std::string name) -{ - return freeObject(getObjectAddress(name)); + freeObject(getObjectAddress(name)); } void Environment::freeAll(void) diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 5177b312..811ee14e 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -82,7 +82,6 @@ private: const std::type_info *type{nullptr}; std::string name; int module{-1}; - std::set owners, properties; std::unique_ptr data{nullptr}; }; public: @@ -140,14 +139,8 @@ public: template bool isObjectOfType(const std::string name) const; Environment::Size getTotalSize(void) const; - void addOwnership(const unsigned int owner, - const unsigned int property); - void addOwnership(const std::string owner, - const std::string property); - bool hasOwners(const unsigned int address) const; - bool hasOwners(const std::string name) const; - bool freeObject(const unsigned int address); - bool freeObject(const std::string name); + void freeObject(const unsigned int address); + void freeObject(const std::string name); void freeAll(void); // print environment content void printContent(void) const; @@ -252,15 +245,23 @@ T * Environment::getObject(const unsigned int address) const { if (hasObject(address)) { - if (auto h = dynamic_cast *>(object_[address].data.get())) + if (hasCreatedObject(address)) { - return h->getPt(); + if (auto h = dynamic_cast *>(object_[address].data.get())) + { + return h->getPt(); + } + else + { + HADRON_ERROR("object with address " + std::to_string(address) + + " does not have type '" + typeName(&typeid(T)) + + "' (has type '" + getObjectType(address) + "')"); + } } else { HADRON_ERROR("object with address " + std::to_string(address) + - " does not have type '" + typeName(&typeid(T)) + - "' (has type '" + getObjectType(address) + "')"); + " is empty"); } } else diff --git a/extras/Hadrons/Graph.hpp b/extras/Hadrons/Graph.hpp index df255517..bb9ae679 100644 --- a/extras/Hadrons/Graph.hpp +++ b/extras/Hadrons/Graph.hpp @@ -430,7 +430,7 @@ std::vector Graph::getAdjacentVertices(const T &value) const { return ((e.first == value) or (e.second == value)); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { @@ -442,7 +442,7 @@ std::vector Graph::getAdjacentVertices(const T &value) const { adjacentVertex.push_back((*eIt).first); } - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return adjacentVertex; @@ -458,12 +458,12 @@ std::vector Graph::getChildren(const T &value) const { return (e.first == value); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { child.push_back((*eIt).second); - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return child; @@ -479,12 +479,12 @@ std::vector Graph::getParents(const T &value) const { return (e.second == value); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { parent.push_back((*eIt).first); - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return parent; diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 14d98bfb..c6b58e9f 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -155,6 +155,7 @@ public: virtual std::string getRegisteredName(void); // dependencies/products virtual std::vector getInput(void) = 0; + virtual std::vector getReference(void) = 0; virtual std::vector getOutput(void) = 0; // parse parameters virtual void parseParameters(XmlReader &reader, const std::string name) = 0; diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index e7d28476..91e4ec94 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -64,6 +64,7 @@ public: virtual ~TDWF(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -92,6 +93,14 @@ std::vector TDWF::getInput(void) return in; } +template +std::vector TDWF::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TDWF::getOutput(void) { diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index 591a3fed..1ca3bf59 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -62,6 +62,7 @@ public: virtual ~TWilson(void) = default; // dependencies/products virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -90,6 +91,14 @@ std::vector TWilson::getInput(void) return in; } +template +std::vector TWilson::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TWilson::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 7c0012d2..7d19feb8 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -95,6 +95,7 @@ public: virtual ~TMeson(void) = default; // dependencies/products virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); virtual void parseGammaString(std::vector &gammaList); protected: @@ -122,6 +123,14 @@ std::vector TMeson::getInput(void) return input; } +template +std::vector TMeson::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TMeson::getOutput(void) { diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 8529825b..f860c403 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -84,6 +84,7 @@ public: virtual ~TGaugeProp(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -115,6 +116,14 @@ std::vector TGaugeProp::getInput(void) return in; } +template +std::vector TGaugeProp::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TGaugeProp::getOutput(void) { diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index b3a7d634..bc05a785 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -47,6 +47,13 @@ std::vector TUnit::getInput(void) return std::vector(); } +std::vector TUnit::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + std::vector TUnit::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/MGauge/Unit.hpp b/extras/Hadrons/Modules/MGauge/Unit.hpp index c1650cc7..4b69f0ce 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.hpp +++ b/extras/Hadrons/Modules/MGauge/Unit.hpp @@ -50,6 +50,7 @@ public: virtual ~TUnit(void) = default; // dependencies/products virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 853a7c32..16b89434 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -60,6 +60,7 @@ public: virtual ~TPoint(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -93,6 +94,14 @@ std::vector TPoint::getInput(void) return in; } +template +std::vector TPoint::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TPoint::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index d6c21412..bb4f3f62 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -61,6 +61,7 @@ public: virtual ~TRBPrecCG(void) = default; // dependencies/products virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -84,11 +85,19 @@ TRBPrecCG::TRBPrecCG(const std::string name) template std::vector TRBPrecCG::getInput(void) { - std::vector in = {par().action}; + std::vector in = {}; return in; } +template +std::vector TRBPrecCG::getReference(void) +{ + std::vector ref = {par().action}; + + return ref; +} + template std::vector TRBPrecCG::getOutput(void) { @@ -115,7 +124,6 @@ void TRBPrecCG::setup(void) schurSolver(mat, source, sol); }; envCreate(SolverFn, getName(), Ls, solver); - env().addOwnership(getName(), par().action); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index b9813688..3fab41c0 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -71,6 +71,7 @@ public: virtual ~TPoint(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -100,6 +101,14 @@ std::vector TPoint::getInput(void) return in; } +template +std::vector TPoint::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TPoint::getOutput(void) { diff --git a/extras/Hadrons/Modules/templates/Module.cc.template b/extras/Hadrons/Modules/templates/Module.cc.template index 0c509d6d..29edadfb 100644 --- a/extras/Hadrons/Modules/templates/Module.cc.template +++ b/extras/Hadrons/Modules/templates/Module.cc.template @@ -19,6 +19,14 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } +template +std::vector T___FILEBASENAME___::getReference(void) +{ + std::vector in = {}; + + return in; +} + std::vector T___FILEBASENAME___::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/templates/Module.hpp.template b/extras/Hadrons/Modules/templates/Module.hpp.template index fb43260f..b59e168f 100644 --- a/extras/Hadrons/Modules/templates/Module.hpp.template +++ b/extras/Hadrons/Modules/templates/Module.hpp.template @@ -26,6 +26,7 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_in_NS.cc.template b/extras/Hadrons/Modules/templates/Module_in_NS.cc.template index 8b2a0ec0..880129bd 100644 --- a/extras/Hadrons/Modules/templates/Module_in_NS.cc.template +++ b/extras/Hadrons/Modules/templates/Module_in_NS.cc.template @@ -20,6 +20,14 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } +template +std::vector T___FILEBASENAME___::getReference(void) +{ + std::vector in = {}; + + return in; +} + std::vector T___FILEBASENAME___::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template b/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template index ea77b12a..f90cb052 100644 --- a/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template @@ -28,6 +28,7 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_tmp.hpp.template b/extras/Hadrons/Modules/templates/Module_tmp.hpp.template index 2ee053a9..b4e7f87f 100644 --- a/extras/Hadrons/Modules/templates/Module_tmp.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_tmp.hpp.template @@ -27,6 +27,7 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template b/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template index b79c0ad3..9aef1c92 100644 --- a/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template @@ -29,6 +29,7 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); @@ -56,6 +57,14 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } +template +std::vector T___FILEBASENAME___::getReference(void) +{ + std::vector in = {}; + + return in; +} + template std::vector T___FILEBASENAME___::getOutput(void) { diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index ae1d5b6b..7f967f66 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -82,8 +82,7 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) m.data = std::move(pt); m.type = typeIdPt(*m.data.get()); m.name = name; - auto input = m.data->getInput(); - for (auto &in: input) + for (auto &in: m.data->getInput()) { if (!env().hasObject(in)) { @@ -91,11 +90,18 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } m.input.push_back(env().getObjectAddress(in)); } - auto output = m.data->getOutput(); + for (auto &ref: m.data->getReference()) + { + if (!env().hasObject(ref)) + { + env().addObject(ref , -1); + } + m.input.push_back(env().getObjectAddress(ref)); + } module_.push_back(std::move(m)); address = static_cast(module_.size() - 1); moduleAddress_[name] = address; - for (auto &out: output) + for (auto &out: getModule(address)->getOutput()) { if (!env().hasObject(out)) { @@ -114,6 +120,25 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) + module_[env().getObjectModule(out)].name + "' (while pushing module '" + name + "')"); } + if (getModule(address)->getReference().size() > 0) + { + auto pred = [this, out](const ModuleInfo &n) + { + auto &in = n.input; + auto it = std::find(in.begin(), in.end(), env().getObjectAddress(out)); + + return (it != in.end()); + }; + auto it = std::find_if(module_.begin(), module_.end(), pred); + while (it != module_.end()) + { + for (auto &ref: getModule(address)->getReference()) + { + it->input.push_back(env().getObjectAddress(ref)); + } + it = std::find_if(++it, module_.end(), pred); + } + } } } } @@ -225,12 +250,17 @@ Graph VirtualMachine::makeModuleGraph(void) const { Graph moduleGraph; - for (unsigned int i = 0; i < module_.size(); ++i) + // create vertices + for (unsigned int m = 0; m < module_.size(); ++m) { - moduleGraph.addVertex(i); - for (auto &j: module_[i].input) + moduleGraph.addVertex(m); + } + // create edges + for (unsigned int m = 0; m < module_.size(); ++m) + { + for (auto &in: module_[m].input) { - moduleGraph.addEdge(env().getObjectModule(j), i); + moduleGraph.addEdge(env().getObjectModule(in), m); } } @@ -258,7 +288,6 @@ VirtualMachine::executeProgram(const std::vector &p) { Size memPeak = 0, sizeBefore, sizeAfter; std::vector> freeProg; - bool continueCollect, nothingFreed; // build garbage collection schedule LOG(Debug) << "Building garbage collection schedule..." << std::endl; @@ -307,25 +336,10 @@ VirtualMachine::executeProgram(const std::vector &p) { LOG(Message) << "Garbage collection..." << std::endl; } - nothingFreed = true; - do + for (auto &j: freeProg[i]) { - continueCollect = false; - auto toFree = freeProg[i]; - for (auto &j: toFree) - { - // continue garbage collection while there are still - // objects without owners - continueCollect = continueCollect or !env().hasOwners(j); - if(env().freeObject(j)) - { - // if an object has been freed, remove it from - // the garbage collection schedule - freeProg[i].erase(j); - nothingFreed = false; - } - } - } while (continueCollect); + env().freeObject(j); + } // free temporaries for (unsigned int i = 0; i < env().getMaxAddress(); ++i) { @@ -335,15 +349,6 @@ VirtualMachine::executeProgram(const std::vector &p) env().freeObject(i); } } - // any remaining objects in step i garbage collection schedule - // is scheduled for step i + 1 - if (i + 1 < p.size()) - { - for (auto &j: freeProg[i]) - { - freeProg[i + 1].insert(j); - } - } // print used memory after garbage collection if necessary if (!isDryRun()) { From 26d7b829a076fa74df370789e9f723f8b793fa67 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 12 Dec 2017 14:04:28 +0000 Subject: [PATCH 108/145] Hadrons: error managed through expections --- extras/Hadrons/Application.cc | 8 +-- extras/Hadrons/Environment.cc | 23 +++--- extras/Hadrons/Environment.hpp | 10 +-- extras/Hadrons/Exceptions.cc | 57 +++++++++++++++ extras/Hadrons/Exceptions.hpp | 72 +++++++++++++++++++ extras/Hadrons/Factory.hpp | 2 +- extras/Hadrons/Global.hpp | 7 +- extras/Hadrons/Graph.hpp | 12 ++-- extras/Hadrons/Makefile.am | 2 + extras/Hadrons/Module.cc | 2 +- .../Modules/MContraction/WardIdentity.hpp | 2 +- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 2 +- .../Modules/MUtilities/TestSeqConserved.hpp | 2 +- extras/Hadrons/VirtualMachine.cc | 14 ++-- extras/Hadrons/VirtualMachine.hpp | 2 +- 15 files changed, 174 insertions(+), 43 deletions(-) create mode 100644 extras/Hadrons/Exceptions.cc create mode 100644 extras/Hadrons/Exceptions.hpp diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index af67dff3..aa66d36f 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -123,11 +123,11 @@ void Application::parseParameterFile(const std::string parameterFileName) setPar(par); if (!push(reader, "modules")) { - HADRON_ERROR("Cannot open node 'modules' in parameter file '" + parameterFileName + "'"); + HADRON_ERROR(Parsing, "Cannot open node 'modules' in parameter file '" + parameterFileName + "'"); } if (!push(reader, "module")) { - HADRON_ERROR("Cannot open node 'modules/module' in parameter file '" + parameterFileName + "'"); + HADRON_ERROR(Parsing, "Cannot open node 'modules/module' in parameter file '" + parameterFileName + "'"); } do { @@ -262,7 +262,7 @@ void Application::saveSchedule(const std::string filename) if (!scheduled_) { - HADRON_ERROR("Computation not scheduled"); + HADRON_ERROR(Definition, "Computation not scheduled"); } LOG(Message) << "Saving current schedule to '" << filename << "'..." << std::endl; @@ -296,7 +296,7 @@ void Application::printSchedule(void) { if (!scheduled_) { - HADRON_ERROR("Computation not scheduled"); + HADRON_ERROR(Definition, "Computation not scheduled"); } LOG(Message) << "Schedule (memory peak: " << MEM_MSG(memPeak_) << "):" << std::endl; diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 66291966..403476d0 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -35,6 +35,9 @@ using namespace Grid; using namespace QCD; using namespace Hadrons; +#define ERROR_NO_ADDRESS(address)\ +HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); + /****************************************************************************** * Environment implementation * ******************************************************************************/ @@ -83,7 +86,7 @@ GridCartesian * Environment::getGrid(const unsigned int Ls) const } catch(std::out_of_range &) { - HADRON_ERROR("no grid with Ls= " << Ls); + HADRON_ERROR(Definition, "no grid with Ls= " + std::to_string(Ls)); } } @@ -102,7 +105,7 @@ GridRedBlackCartesian * Environment::getRbGrid(const unsigned int Ls) const } catch(std::out_of_range &) { - HADRON_ERROR("no red-black 5D grid with Ls= " << Ls); + HADRON_ERROR(Definition, "no red-black 5D grid with Ls= " + std::to_string(Ls)); } } @@ -152,7 +155,7 @@ void Environment::addObject(const std::string name, const int moduleAddress) } else { - HADRON_ERROR("object '" + name + "' already exists"); + HADRON_ERROR(Definition, "object '" + name + "' already exists"); } } @@ -175,7 +178,7 @@ unsigned int Environment::getObjectAddress(const std::string name) const } else { - HADRON_ERROR("no object with name '" + name + "'"); + HADRON_ERROR(Definition, "no object with name '" + name + "'"); } } @@ -187,7 +190,7 @@ std::string Environment::getObjectName(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -206,7 +209,7 @@ std::string Environment::getObjectType(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -223,7 +226,7 @@ Environment::Size Environment::getObjectSize(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -240,7 +243,7 @@ Environment::Storage Environment::getObjectStorage(const unsigned int address) c } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -257,7 +260,7 @@ int Environment::getObjectModule(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -274,7 +277,7 @@ unsigned int Environment::getObjectLs(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 811ee14e..60371c20 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -227,7 +227,7 @@ void Environment::createDerivedObject(const std::string name, } else { - HADRON_ERROR("object '" + name + "' already allocated"); + HADRON_ERROR(Definition, "object '" + name + "' already allocated"); } } @@ -253,20 +253,20 @@ T * Environment::getObject(const unsigned int address) const } else { - HADRON_ERROR("object with address " + std::to_string(address) + + HADRON_ERROR(Definition, "object with address " + std::to_string(address) + " does not have type '" + typeName(&typeid(T)) + "' (has type '" + getObjectType(address) + "')"); } } else { - HADRON_ERROR("object with address " + std::to_string(address) + + HADRON_ERROR(Definition, "object with address " + std::to_string(address) + " is empty"); } } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); } } @@ -292,7 +292,7 @@ bool Environment::isObjectOfType(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); } } diff --git a/extras/Hadrons/Exceptions.cc b/extras/Hadrons/Exceptions.cc new file mode 100644 index 00000000..bf532c21 --- /dev/null +++ b/extras/Hadrons/Exceptions.cc @@ -0,0 +1,57 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Exceptions.cc + +Copyright (C) 2017 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +#ifndef ERR_SUFF +#define ERR_SUFF " (" + loc + ")" +#endif + +#define CONST_EXC(name, init) \ +name::name(std::string msg, std::string loc)\ +:init\ +{} + +using namespace Grid; +using namespace Hadrons; +using namespace Exceptions; + +// logic errors +CONST_EXC(Logic, logic_error(msg + ERR_SUFF)) +CONST_EXC(Definition, Logic("definition error: " + msg, loc)) +CONST_EXC(Implementation, Logic("implementation error: " + msg, loc)) +CONST_EXC(Range, Logic("range error: " + msg, loc)) +CONST_EXC(Size, Logic("size error: " + msg, loc)) +// runtime errors +CONST_EXC(Runtime, runtime_error(msg + ERR_SUFF)) +CONST_EXC(Argument, Runtime("argument error: " + msg, loc)) +CONST_EXC(Io, Runtime("IO error: " + msg, loc)) +CONST_EXC(Memory, Runtime("memory error: " + msg, loc)) +CONST_EXC(Parsing, Runtime("parsing error: " + msg, loc)) +CONST_EXC(Program, Runtime("program error: " + msg, loc)) +CONST_EXC(System, Runtime("system error: " + msg, loc)) \ No newline at end of file diff --git a/extras/Hadrons/Exceptions.hpp b/extras/Hadrons/Exceptions.hpp new file mode 100644 index 00000000..8f04ab41 --- /dev/null +++ b/extras/Hadrons/Exceptions.hpp @@ -0,0 +1,72 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Exceptions.hpp + +Copyright (C) 2017 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#ifndef Hadrons_Exceptions_hpp_ +#define Hadrons_Exceptions_hpp_ + +#include +#ifndef Hadrons_Global_hpp_ +#include +#endif + +#define SRC_LOC std::string(__FUNCTION__) + " at " + std::string(__FILE__) + ":"\ + + std::to_string(__LINE__) +#define HADRON_ERROR(exc, msg)\ +LOG(Error) << msg << std::endl;\ +throw(Exceptions::exc(msg, SRC_LOC)); + +#define DECL_EXC(name, base) \ +class name: public base\ +{\ +public:\ + name(std::string msg, std::string loc);\ +} + +BEGIN_HADRONS_NAMESPACE + +namespace Exceptions +{ + // logic errors + DECL_EXC(Logic, std::logic_error); + DECL_EXC(Definition, Logic); + DECL_EXC(Implementation, Logic); + DECL_EXC(Range, Logic); + DECL_EXC(Size, Logic); + // runtime errors + DECL_EXC(Runtime, std::runtime_error); + DECL_EXC(Argument, Runtime); + DECL_EXC(Io, Runtime); + DECL_EXC(Memory, Runtime); + DECL_EXC(Parsing, Runtime); + DECL_EXC(Program, Runtime); + DECL_EXC(System, Runtime); +} + +END_HADRONS_NAMESPACE + +#endif // Hadrons_Exceptions_hpp_ diff --git a/extras/Hadrons/Factory.hpp b/extras/Hadrons/Factory.hpp index da86acae..65ce03ca 100644 --- a/extras/Hadrons/Factory.hpp +++ b/extras/Hadrons/Factory.hpp @@ -95,7 +95,7 @@ std::unique_ptr Factory::create(const std::string type, } catch (std::out_of_range &) { - HADRON_ERROR("object of type '" + type + "' unknown"); + HADRON_ERROR(Argument, "object of type '" + type + "' unknown"); } return func(name); diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 4c37b961..c3d60bf2 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -100,11 +100,6 @@ public: }; #define LOG(channel) std::cout << HadronsLog##channel -#define HADRON_ERROR(msg)\ -LOG(Error) << msg << " (" << __FUNCTION__ << " at " << __FILE__ << ":"\ - << __LINE__ << ")" << std::endl;\ -abort(); - #define DEBUG_VAR(var) LOG(Debug) << #var << "= " << (var) << std::endl; extern HadronsLogger HadronsLogError; @@ -176,4 +171,6 @@ typedef XmlWriter CorrWriter; END_HADRONS_NAMESPACE +#include + #endif // Hadrons_Global_hpp_ diff --git a/extras/Hadrons/Graph.hpp b/extras/Hadrons/Graph.hpp index bb9ae679..a9c240fa 100644 --- a/extras/Hadrons/Graph.hpp +++ b/extras/Hadrons/Graph.hpp @@ -185,7 +185,7 @@ void Graph::removeVertex(const T &value) } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); } // remove all edges containing the vertex @@ -214,7 +214,7 @@ void Graph::removeEdge(const Edge &e) } else { - HADRON_ERROR("edge " << e << " does not exists"); + HADRON_ERROR(Range, "edge does not exists"); } } @@ -260,7 +260,7 @@ void Graph::mark(const T &value, const bool doMark) } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); } } @@ -298,7 +298,7 @@ bool Graph::isMarked(const T &value) const } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); return false; } @@ -544,7 +544,7 @@ std::vector Graph::topoSort(void) { if (tmpMarked.at(v)) { - HADRON_ERROR("cannot topologically sort a cyclic graph"); + HADRON_ERROR(Range, "cannot topologically sort a cyclic graph"); } if (!isMarked(v)) { @@ -603,7 +603,7 @@ std::vector Graph::topoSort(Gen &gen) { if (tmpMarked.at(v)) { - HADRON_ERROR("cannot topologically sort a cyclic graph"); + HADRON_ERROR(Range, "cannot topologically sort a cyclic graph"); } if (!isMarked(v)) { diff --git a/extras/Hadrons/Makefile.am b/extras/Hadrons/Makefile.am index 826cb158..3d07679a 100644 --- a/extras/Hadrons/Makefile.am +++ b/extras/Hadrons/Makefile.am @@ -7,6 +7,7 @@ libHadrons_a_SOURCES = \ $(modules_cc) \ Application.cc \ Environment.cc \ + Exceptions.cc \ Global.cc \ Module.cc \ VirtualMachine.cc @@ -15,6 +16,7 @@ nobase_libHadrons_a_HEADERS = \ $(modules_hpp) \ Application.hpp \ Environment.hpp \ + Exceptions.hpp \ Factory.hpp \ GeneticScheduler.hpp \ Global.hpp \ diff --git a/extras/Hadrons/Module.cc b/extras/Hadrons/Module.cc index bf596bfc..383a5c2e 100644 --- a/extras/Hadrons/Module.cc +++ b/extras/Hadrons/Module.cc @@ -50,7 +50,7 @@ std::string ModuleBase::getName(void) const // get factory registration name if available std::string ModuleBase::getRegisteredName(void) { - HADRON_ERROR("module '" + getName() + "' has a type not registered" + HADRON_ERROR(Definition, "module '" + getName() + "' has no registered type" + " in the factory"); } diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index a298c1a1..90922c27 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -118,7 +118,7 @@ void TWardIdentity::setup(void) Ls_ = env().getObjectLs(par().q); if (Ls_ != env().getObjectLs(par().action)) { - HADRON_ERROR("Ls mismatch between quark action and propagator"); + HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); } } diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index f860c403..4d08841d 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -187,7 +187,7 @@ void TGaugeProp::execute(void) { if (Ls_ != env().getObjectLs(par().source)) { - HADRON_ERROR("Ls mismatch between quark action and source"); + HADRON_ERROR(Size, "Ls mismatch between quark action and source"); } else { diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index f8714d88..b0f2846f 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -122,7 +122,7 @@ void TTestSeqConserved::setup(void) auto Ls = env().getObjectLs(par().q); if (Ls != env().getObjectLs(par().action)) { - HADRON_ERROR("Ls mismatch between quark action and propagator"); + HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); } } diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 7f967f66..0c3eca20 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -115,7 +115,7 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } else { - HADRON_ERROR("object '" + out + HADRON_ERROR(Definition, "object '" + out + "' is already produced by module '" + module_[env().getObjectModule(out)].name + "' (while pushing module '" + name + "')"); @@ -144,7 +144,7 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } else { - HADRON_ERROR("module '" + name + "' already exists"); + HADRON_ERROR(Definition, "module '" + name + "' already exists"); } } @@ -171,7 +171,7 @@ ModuleBase * VirtualMachine::getModule(const unsigned int address) const } else { - HADRON_ERROR("no module with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); } } @@ -188,7 +188,7 @@ unsigned int VirtualMachine::getModuleAddress(const std::string name) const } else { - HADRON_ERROR("no module with name '" + name + "'"); + HADRON_ERROR(Definition, "no module with name '" + name + "'"); } } @@ -200,7 +200,7 @@ std::string VirtualMachine::getModuleName(const unsigned int address) const } else { - HADRON_ERROR("no module with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); } } @@ -212,7 +212,7 @@ std::string VirtualMachine::getModuleType(const unsigned int address) const } else { - HADRON_ERROR("no module with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); } } @@ -273,7 +273,7 @@ Graph VirtualMachine::makeModuleGraph(void) const // { // if (o.module < 0) // { -// HADRON_ERROR("object '" + o.name + "' does not have a creator"); +// HADRON_ERROR(Runtime, "object '" + o.name + "' does not have a creator"); // } // } // } diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index 357fdb5b..c5557add 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -147,7 +147,7 @@ M * VirtualMachine::getModule(const unsigned int address) const } else { - HADRON_ERROR("module '" + module_[address].name + HADRON_ERROR(Definition, "module '" + module_[address].name + "' does not have type " + typeid(M).name() + "(has type: " + getModuleType(address) + ")"); } From 259d504ef0325879d19d8283a4cd97a4dabd8c1d Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 12 Dec 2017 19:32:58 +0000 Subject: [PATCH 109/145] Hadrons: first full implementation of the module memory profiler --- extras/Hadrons/Application.cc | 5 +- extras/Hadrons/Environment.cc | 13 +- extras/Hadrons/Environment.hpp | 9 +- extras/Hadrons/Global.hpp | 2 +- extras/Hadrons/Module.hpp | 6 +- extras/Hadrons/Modules/MContraction/Meson.hpp | 2 +- extras/Hadrons/VirtualMachine.cc | 120 ++++++++++++++++++ extras/Hadrons/VirtualMachine.hpp | 22 +++- 8 files changed, 166 insertions(+), 13 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index aa66d36f..135e4df4 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -92,9 +92,10 @@ void Application::run(void) { parseParameterFile(parameterFileName_); } - //vm().checkGraph(); - env().printContent(); vm().printContent(); + env().printContent(); + //vm().checkGraph(); + vm().memoryProfile(); if (!scheduled_) { schedule(); diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 403476d0..6de13e86 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -369,6 +369,16 @@ void Environment::freeAll(void) } } +void Environment::protectObjects(const bool protect) +{ + protect_ = protect; +} + +bool Environment::objectsProtected(void) const +{ + return protect_; +} + // print environment content /////////////////////////////////////////////////// void Environment::printContent(void) const { @@ -376,6 +386,7 @@ void Environment::printContent(void) const for (unsigned int i = 0; i < object_.size(); ++i) { LOG(Debug) << std::setw(4) << i << ": " - << getObjectName(i) << std::endl; + << getObjectName(i) << " (" + << sizeString(getObjectSize(i)) << ")" << std::endl; } } diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 60371c20..adea13ce 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -142,11 +142,14 @@ public: void freeObject(const unsigned int address); void freeObject(const std::string name); void freeAll(void); + void protectObjects(const bool protect); + bool objectsProtected(void) const; // print environment content void printContent(void) const; private: // general unsigned long int locVol_; + bool protect_{true}; // grids std::vector dim_; GridPt grid4d_; @@ -195,8 +198,8 @@ void Holder::reset(T *pt) // general memory management /////////////////////////////////////////////////// template void Environment::createDerivedObject(const std::string name, - const Environment::Storage storage, - const unsigned int Ls, + const Environment::Storage storage, + const unsigned int Ls, Ts && ... args) { if (!hasObject(name)) @@ -206,7 +209,7 @@ void Environment::createDerivedObject(const std::string name, unsigned int address = getObjectAddress(name); - if (!object_[address].data) + if (!object_[address].data or !objectsProtected()) { MemoryStats memStats; diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index c3d60bf2..ebfe94dc 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -36,7 +36,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #ifndef SITE_SIZE_TYPE -#define SITE_SIZE_TYPE unsigned int +#define SITE_SIZE_TYPE size_t #endif #define BEGIN_HADRONS_NAMESPACE \ diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index c6b58e9f..25c0ac05 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -160,12 +160,12 @@ public: // parse parameters virtual void parseParameters(XmlReader &reader, const std::string name) = 0; virtual void saveParameters(XmlWriter &writer, const std::string name) = 0; - // execution - void operator()(void); -protected: // setup virtual void setup(void) {}; virtual void execute(void) = 0; + // execution + void operator()(void); +protected: // environment shortcut DEFINE_ENV_ALIAS; // virtual machine shortcut diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 7d19feb8..3c179d44 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -134,7 +134,7 @@ std::vector TMeson::getReference(void) template std::vector TMeson::getOutput(void) { - std::vector output = {getName()}; + std::vector output = {}; return output; } diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 0c3eca20..15e53dbf 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -390,3 +390,123 @@ void VirtualMachine::printContent(void) const << getModuleName(i) << std::endl; } } + +// memory profile ////////////////////////////////////////////////////////////// +VirtualMachine::MemoryProfile VirtualMachine::memoryProfile(void) const +{ + bool protect = env().objectsProtected(); + bool hmsg = HadronsLogMessage.isActive(); + bool gmsg = GridLogMessage.isActive(); + bool err = HadronsLogError.isActive(); + MemoryProfile profile; + auto program = makeModuleGraph().topoSort(); + + profile.module.resize(getNModule()); + env().protectObjects(false); + GridLogMessage.Active(false); + HadronsLogMessage.Active(false); + HadronsLogError.Active(false); + for (auto it = program.rbegin(); it != program.rend(); ++it) + { + auto a = *it; + + if (profile.module[a].empty()) + { + LOG(Debug) << "Profiling memory for module '" << module_[a].name << "' (" << a << ")..." << std::endl; + memoryProfile(profile, a); + env().freeAll(); + } + } + env().protectObjects(protect); + GridLogMessage.Active(gmsg); + HadronsLogMessage.Active(hmsg); + HadronsLogError.Active(err); + LOG(Debug) << "Memory profile:" << std::endl; + LOG(Debug) << "----------------" << std::endl; + for (unsigned int a = 0; a < profile.module.size(); ++a) + { + LOG(Debug) << getModuleName(a) << " (" << a << ")" << std::endl; + for (auto &o: profile.module[a]) + { + LOG(Debug) << "|__ " << env().getObjectName(o.first) << " (" + << sizeString(o.second) << ")" << std::endl; + } + LOG(Debug) << std::endl; + } + LOG(Debug) << "----------------" << std::endl; + + return profile; +} + +void VirtualMachine::resizeProfile(MemoryProfile &profile) const +{ + if (env().getMaxAddress() > profile.object.size()) + { + MemoryPrint empty; + + empty.size = 0; + empty.module = -1; + profile.object.resize(env().getMaxAddress(), empty); + } +} + +void VirtualMachine::updateProfile(MemoryProfile &profile, + const unsigned int address) const +{ + resizeProfile(profile); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile.object[a].module == -1)) + { + profile.object[a].size = env().getObjectSize(a); + profile.object[a].module = address; + profile.module[address][a] = profile.object[a].size; + } + } +} + +void VirtualMachine::cleanEnvironment(MemoryProfile &profile) const +{ + resizeProfile(profile); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile.object[a].module == -1)) + { + env().freeObject(a); + } + } +} + +void VirtualMachine::memoryProfile(MemoryProfile &profile, + const unsigned int address) const +{ + auto m = getModule(address); + + LOG(Debug) << "Setting up module '" << m->getName() << "' (" << address << ")..." << std::endl; + + try + { + m->setup(); + updateProfile(profile, address); + } + catch (Exceptions::Definition &) + { + cleanEnvironment(profile); + for (auto &in: m->getInput()) + { + memoryProfile(profile, env().getObjectModule(in)); + } + for (auto &ref: m->getReference()) + { + memoryProfile(profile, env().getObjectModule(ref)); + } + m->setup(); + updateProfile(profile, address); + } +} + +void VirtualMachine::memoryProfile(MemoryProfile &profile, + const std::string name) const +{ + memoryProfile(profile, getModuleAddress(name)); +} diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index c5557add..56e5a8cf 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -51,8 +51,18 @@ class VirtualMachine { SINGLETON_DEFCTOR(VirtualMachine); public: - typedef SITE_SIZE_TYPE Size; - typedef std::unique_ptr ModPt; + typedef SITE_SIZE_TYPE Size; + typedef std::unique_ptr ModPt; + struct MemoryPrint + { + Size size; + unsigned int module; + }; + struct MemoryProfile + { + std::vector> module; + std::vector object; + }; private: struct ModuleInfo { @@ -100,12 +110,20 @@ public: void checkGraph(void) const; // print VM content void printContent(void) const; + // memory profile + MemoryProfile memoryProfile(void) const; // general execution Size executeProgram(const std::vector &p); Size executeProgram(const std::vector &p); private: // environment shortcut DEFINE_ENV_ALIAS; + // memory profile + void resizeProfile(MemoryProfile &profile) const; + void updateProfile(MemoryProfile &profile, const unsigned int address) const; + void cleanEnvironment(MemoryProfile &profile) const; + void memoryProfile(MemoryProfile &profile, const std::string name) const; + void memoryProfile(MemoryProfile &profile, const unsigned int address) const; private: // general bool dryRun_{false}, memoryProfile_{false}; From a9c8d7dad03f1b39acb5e081c3424d03ee035e07 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 13 Dec 2017 12:13:40 +0000 Subject: [PATCH 110/145] Hadrons: code cleaning --- extras/Hadrons/VirtualMachine.cc | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 15e53dbf..1f4772a6 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -79,21 +79,26 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) unsigned int address; ModuleInfo m; + // module registration ------------------------------------------------- m.data = std::move(pt); m.type = typeIdPt(*m.data.get()); m.name = name; + // input dependencies for (auto &in: m.data->getInput()) { if (!env().hasObject(in)) { + // if object does not exist, add it with no creator module env().addObject(in , -1); } m.input.push_back(env().getObjectAddress(in)); } + // reference dependencies for (auto &ref: m.data->getReference()) { if (!env().hasObject(ref)) { + // if object does not exist, add it with no creator module env().addObject(ref , -1); } m.input.push_back(env().getObjectAddress(ref)); @@ -101,20 +106,24 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) module_.push_back(std::move(m)); address = static_cast(module_.size() - 1); moduleAddress_[name] = address; + // connecting outputs to potential inputs ------------------------------ for (auto &out: getModule(address)->getOutput()) { if (!env().hasObject(out)) { + // output does not exists, add it env().addObject(out, address); } else { if (env().getObjectModule(env().getObjectAddress(out)) < 0) { + // output exists but without creator, correct it env().setObjectModule(env().getObjectAddress(out), address); } else { + // output already fully registered, error HADRON_ERROR(Definition, "object '" + out + "' is already produced by module '" + module_[env().getObjectModule(out)].name @@ -122,10 +131,14 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } if (getModule(address)->getReference().size() > 0) { + // module has references, dependency should be propagated + // to children modules; find module with `out` as an input + // and add references to their input auto pred = [this, out](const ModuleInfo &n) { auto &in = n.input; - auto it = std::find(in.begin(), in.end(), env().getObjectAddress(out)); + auto it = std::find(in.begin(), in.end(), + env().getObjectAddress(out)); return (it != in.end()); }; @@ -154,7 +167,7 @@ unsigned int VirtualMachine::getNModule(void) const } void VirtualMachine::createModule(const std::string name, const std::string type, - XmlReader &reader) + XmlReader &reader) { auto &factory = ModuleFactory::getInstance(); auto pt = factory.create(type, name); @@ -267,17 +280,6 @@ Graph VirtualMachine::makeModuleGraph(void) const return moduleGraph; } -// void VirtualMachine::checkGraph(void) const -// { -// for (auto &o: object_) -// { -// if (o.module < 0) -// { -// HADRON_ERROR(Runtime, "object '" + o.name + "' does not have a creator"); -// } -// } -// } - // general execution /////////////////////////////////////////////////////////// #define BIG_SEP "===============" #define SEP "---------------" @@ -412,7 +414,8 @@ VirtualMachine::MemoryProfile VirtualMachine::memoryProfile(void) const if (profile.module[a].empty()) { - LOG(Debug) << "Profiling memory for module '" << module_[a].name << "' (" << a << ")..." << std::endl; + LOG(Debug) << "Profiling memory for module '" << module_[a].name + << "' (" << a << ")..." << std::endl; memoryProfile(profile, a); env().freeAll(); } From 61fc50d616674e198b503d177ed86adef0e2260b Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 13 Dec 2017 13:44:23 +0000 Subject: [PATCH 111/145] Hadrons: better organisation of the VM --- extras/Hadrons/Application.cc | 23 +- extras/Hadrons/Application.hpp | 2 - extras/Hadrons/Module.cc | 5 +- extras/Hadrons/VirtualMachine.cc | 403 +++++++++++++++--------------- extras/Hadrons/VirtualMachine.hpp | 47 ++-- 5 files changed, 236 insertions(+), 244 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 135e4df4..24618447 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -94,8 +94,6 @@ void Application::run(void) } vm().printContent(); env().printContent(); - //vm().checkGraph(); - vm().memoryProfile(); if (!scheduled_) { schedule(); @@ -185,11 +183,11 @@ GeneticScheduler::ObjFunc memPeak = \ void Application::schedule(void) { - DEFINE_MEMPEAK; + //DEFINE_MEMPEAK; // build module dependency graph LOG(Message) << "Building module graph..." << std::endl; - auto graph = vm().makeModuleGraph(); + auto graph = vm().getModuleGraph(); LOG(Debug) << "Module graph:" << std::endl; LOG(Debug) << graph << std::endl; auto con = graph.getConnectedComponents(); @@ -276,7 +274,7 @@ void Application::saveSchedule(const std::string filename) void Application::loadSchedule(const std::string filename) { - DEFINE_MEMPEAK; + //DEFINE_MEMPEAK; TextReader reader(filename); std::vector program; @@ -290,7 +288,7 @@ void Application::loadSchedule(const std::string filename) program_.push_back(vm().getModuleAddress(name)); } scheduled_ = true; - memPeak_ = memPeak(program_); + //memPeak_ = memPeak(program_); } void Application::printSchedule(void) @@ -323,16 +321,3 @@ void Application::configLoop(void) LOG(Message) << BIG_SEP << " End of measurement " << BIG_SEP << std::endl; env().freeAll(); } - -// memory profile ////////////////////////////////////////////////////////////// -void Application::memoryProfile(void) -{ - auto graph = vm().makeModuleGraph(); - auto program = graph.topoSort(); - bool msg; - - msg = HadronsLogMessage.isActive(); - HadronsLogMessage.Active(false); - - HadronsLogMessage.Active(msg); -} diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index 66488206..8d2537d0 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -102,8 +102,6 @@ private: DEFINE_ENV_ALIAS; // virtual machine shortcut DEFINE_VM_ALIAS; - // memory profile - void memoryProfile(void); private: long unsigned int locVol_; std::string parameterFileName_{""}; diff --git a/extras/Hadrons/Module.cc b/extras/Hadrons/Module.cc index 383a5c2e..e5ef0fe4 100644 --- a/extras/Hadrons/Module.cc +++ b/extras/Hadrons/Module.cc @@ -58,8 +58,5 @@ std::string ModuleBase::getRegisteredName(void) void ModuleBase::operator()(void) { setup(); - if (!vm().isDryRun()) - { - execute(); - } + execute(); } diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 1f4772a6..8667a51c 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -36,27 +36,6 @@ using namespace Hadrons; /****************************************************************************** * VirtualMachine implementation * ******************************************************************************/ -// dry run ///////////////////////////////////////////////////////////////////// -void VirtualMachine::dryRun(const bool isDry) -{ - dryRun_ = isDry; -} - -bool VirtualMachine::isDryRun(void) const -{ - return dryRun_; -} - -void VirtualMachine::memoryProfile(const bool doMemoryProfile) -{ - memoryProfile_ = doMemoryProfile; -} - -bool VirtualMachine::doMemoryProfile(void) const -{ - return memoryProfile_; -} - // trajectory counter ////////////////////////////////////////////////////////// void VirtualMachine::setTrajectory(const unsigned int traj) { @@ -259,40 +238,192 @@ bool VirtualMachine::hasModule(const std::string name) const return (moduleAddress_.find(name) != moduleAddress_.end()); } -Graph VirtualMachine::makeModuleGraph(void) const +// print VM content //////////////////////////////////////////////////////////// +void VirtualMachine::printContent(void) const { - Graph moduleGraph; + LOG(Debug) << "Modules: " << std::endl; + for (unsigned int i = 0; i < module_.size(); ++i) + { + LOG(Debug) << std::setw(4) << i << ": " + << getModuleName(i) << std::endl; + } +} + +// module graph //////////////////////////////////////////////////////////////// +Graph VirtualMachine::getModuleGraph(void) +{ + if (graphOutdated_) + { + makeModuleGraph(); + graphOutdated_ = false; + } + + return graph_; +} + +void VirtualMachine::makeModuleGraph(void) +{ + Graph graph; // create vertices for (unsigned int m = 0; m < module_.size(); ++m) { - moduleGraph.addVertex(m); + graph.addVertex(m); } // create edges for (unsigned int m = 0; m < module_.size(); ++m) { for (auto &in: module_[m].input) { - moduleGraph.addEdge(env().getObjectModule(in), m); + graph.addEdge(env().getObjectModule(in), m); } } - - return moduleGraph; + graph_ = graph; } -// general execution /////////////////////////////////////////////////////////// -#define BIG_SEP "===============" -#define SEP "---------------" -#define MEM_MSG(size) sizeString(size) - -VirtualMachine::Size -VirtualMachine::executeProgram(const std::vector &p) +// memory profile ////////////////////////////////////////////////////////////// +const VirtualMachine::MemoryProfile & VirtualMachine::getMemoryProfile(void) { - Size memPeak = 0, sizeBefore, sizeAfter; - std::vector> freeProg; + if (memoryProfileOutdated_) + { + makeMemoryProfile(); + memoryProfileOutdated_ = false; + } + + return profile_; +} + +void VirtualMachine::makeMemoryProfile(void) +{ + bool protect = env().objectsProtected(); + bool hmsg = HadronsLogMessage.isActive(); + bool gmsg = GridLogMessage.isActive(); + bool err = HadronsLogError.isActive(); + auto program = getModuleGraph().topoSort(); + + resetProfile(); + profile_.module.resize(getNModule()); + env().protectObjects(false); + GridLogMessage.Active(false); + HadronsLogMessage.Active(false); + HadronsLogError.Active(false); + for (auto it = program.rbegin(); it != program.rend(); ++it) + { + auto a = *it; + + if (profile_.module[a].empty()) + { + LOG(Debug) << "Profiling memory for module '" << module_[a].name + << "' (" << a << ")..." << std::endl; + memoryProfile(a); + env().freeAll(); + } + } + env().protectObjects(protect); + GridLogMessage.Active(gmsg); + HadronsLogMessage.Active(hmsg); + HadronsLogError.Active(err); + LOG(Debug) << "Memory profile:" << std::endl; + LOG(Debug) << "----------------" << std::endl; + for (unsigned int a = 0; a < profile_.module.size(); ++a) + { + LOG(Debug) << getModuleName(a) << " (" << a << ")" << std::endl; + for (auto &o: profile_.module[a]) + { + LOG(Debug) << "|__ " << env().getObjectName(o.first) << " (" + << sizeString(o.second) << ")" << std::endl; + } + LOG(Debug) << std::endl; + } + LOG(Debug) << "----------------" << std::endl; +} + +void VirtualMachine::resetProfile(void) +{ + profile_.module.clear(); + profile_.object.clear(); +} + +void VirtualMachine::resizeProfile(void) +{ + if (env().getMaxAddress() > profile_.object.size()) + { + MemoryPrint empty; + + empty.size = 0; + empty.module = -1; + profile_.object.resize(env().getMaxAddress(), empty); + } +} + +void VirtualMachine::updateProfile(const unsigned int address) +{ + resizeProfile(); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile_.object[a].module == -1)) + { + profile_.object[a].size = env().getObjectSize(a); + profile_.object[a].module = address; + profile_.module[address][a] = profile_.object[a].size; + if (env().getObjectModule(a) < 0) + { + env().setObjectModule(a, address); + } + } + } +} + +void VirtualMachine::cleanEnvironment(void) +{ + resizeProfile(); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile_.object[a].module == -1)) + { + env().freeObject(a); + } + } +} + +void VirtualMachine::memoryProfile(const unsigned int address) +{ + auto m = getModule(address); + + LOG(Debug) << "Setting up module '" << m->getName() + << "' (" << address << ")..." << std::endl; + try + { + m->setup(); + updateProfile(address); + } + catch (Exceptions::Definition &) + { + cleanEnvironment(); + for (auto &in: m->getInput()) + { + memoryProfile(env().getObjectModule(in)); + } + for (auto &ref: m->getReference()) + { + memoryProfile(env().getObjectModule(ref)); + } + m->setup(); + updateProfile(address); + } +} + +void VirtualMachine::memoryProfile(const std::string name) +{ + memoryProfile(getModuleAddress(name)); +} + +// garbage collector /////////////////////////////////////////////////////////// +VirtualMachine::GarbageSchedule +VirtualMachine::makeGarbageSchedule(const std::vector &p) const +{ + GarbageSchedule freeProg; - // build garbage collection schedule - LOG(Debug) << "Building garbage collection schedule..." << std::endl; freeProg.resize(p.size()); for (unsigned int i = 0; i < env().getMaxAddress(); ++i) { @@ -310,34 +441,42 @@ VirtualMachine::executeProgram(const std::vector &p) } } + return freeProg; +} + +// general execution /////////////////////////////////////////////////////////// +#define BIG_SEP "===============" +#define SEP "---------------" +#define MEM_MSG(size) sizeString(size) + +void VirtualMachine::executeProgram(const std::vector &p) const +{ + Size memPeak = 0, sizeBefore, sizeAfter; + GarbageSchedule freeProg; + + // build garbage collection schedule + LOG(Debug) << "Building garbage collection schedule..." << std::endl; + freeProg = makeGarbageSchedule(p); + // program execution LOG(Debug) << "Executing program..." << std::endl; for (unsigned int i = 0; i < p.size(); ++i) { // execute module - if (!isDryRun()) - { - LOG(Message) << SEP << " Measurement step " << i+1 << "/" - << p.size() << " (module '" << module_[p[i]].name - << "') " << SEP << std::endl; - } + LOG(Message) << SEP << " Measurement step " << i + 1 << "/" + << p.size() << " (module '" << module_[p[i]].name + << "') " << SEP << std::endl; (*module_[p[i]].data)(); sizeBefore = env().getTotalSize(); // print used memory after execution - if (!isDryRun()) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) - << std::endl; - } + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) + << std::endl; if (sizeBefore > memPeak) { memPeak = sizeBefore; } // garbage collection for step i - if (!isDryRun()) - { - LOG(Message) << "Garbage collection..." << std::endl; - } + LOG(Message) << "Garbage collection..." << std::endl; for (auto &j: freeProg[i]) { env().freeObject(j); @@ -352,25 +491,20 @@ VirtualMachine::executeProgram(const std::vector &p) } } // print used memory after garbage collection if necessary - if (!isDryRun()) + sizeAfter = env().getTotalSize(); + if (sizeBefore != sizeAfter) { - sizeAfter = env().getTotalSize(); - if (sizeBefore != sizeAfter) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) - << std::endl; - } - else - { - LOG(Message) << "Nothing to free" << std::endl; - } + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) + << std::endl; + } + else + { + LOG(Message) << "Nothing to free" << std::endl; } } - - return memPeak; } -VirtualMachine::Size VirtualMachine::executeProgram(const std::vector &p) +void VirtualMachine::executeProgram(const std::vector &p) const { std::vector pAddress; @@ -378,138 +512,5 @@ VirtualMachine::Size VirtualMachine::executeProgram(const std::vector profile.object.size()) - { - MemoryPrint empty; - - empty.size = 0; - empty.module = -1; - profile.object.resize(env().getMaxAddress(), empty); - } -} - -void VirtualMachine::updateProfile(MemoryProfile &profile, - const unsigned int address) const -{ - resizeProfile(profile); - for (unsigned int a = 0; a < env().getMaxAddress(); ++a) - { - if (env().hasCreatedObject(a) and (profile.object[a].module == -1)) - { - profile.object[a].size = env().getObjectSize(a); - profile.object[a].module = address; - profile.module[address][a] = profile.object[a].size; - } - } -} - -void VirtualMachine::cleanEnvironment(MemoryProfile &profile) const -{ - resizeProfile(profile); - for (unsigned int a = 0; a < env().getMaxAddress(); ++a) - { - if (env().hasCreatedObject(a) and (profile.object[a].module == -1)) - { - env().freeObject(a); - } - } -} - -void VirtualMachine::memoryProfile(MemoryProfile &profile, - const unsigned int address) const -{ - auto m = getModule(address); - - LOG(Debug) << "Setting up module '" << m->getName() << "' (" << address << ")..." << std::endl; - - try - { - m->setup(); - updateProfile(profile, address); - } - catch (Exceptions::Definition &) - { - cleanEnvironment(profile); - for (auto &in: m->getInput()) - { - memoryProfile(profile, env().getObjectModule(in)); - } - for (auto &ref: m->getReference()) - { - memoryProfile(profile, env().getObjectModule(ref)); - } - m->setup(); - updateProfile(profile, address); - } -} - -void VirtualMachine::memoryProfile(MemoryProfile &profile, - const std::string name) const -{ - memoryProfile(profile, getModuleAddress(name)); + executeProgram(pAddress); } diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index 56e5a8cf..88e70b55 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -51,8 +51,9 @@ class VirtualMachine { SINGLETON_DEFCTOR(VirtualMachine); public: - typedef SITE_SIZE_TYPE Size; - typedef std::unique_ptr ModPt; + typedef SITE_SIZE_TYPE Size; + typedef std::unique_ptr ModPt; + typedef std::vector> GarbageSchedule; struct MemoryPrint { Size size; @@ -73,11 +74,6 @@ private: size_t maxAllocated; }; public: - // dry run - void dryRun(const bool isDry); - bool isDryRun(void) const; - void memoryProfile(const bool doMemoryProfile); - bool doMemoryProfile(void) const; // trajectory counter void setTrajectory(const unsigned int traj); unsigned int getTrajectory(void) const; @@ -106,32 +102,47 @@ public: std::string getModuleNamespace(const std::string name) const; bool hasModule(const unsigned int address) const; bool hasModule(const std::string name) const; - Graph makeModuleGraph(void) const; - void checkGraph(void) const; // print VM content void printContent(void) const; + // module graph (could be a const reference if topoSort was const) + Graph getModuleGraph(void); // memory profile - MemoryProfile memoryProfile(void) const; + const MemoryProfile &getMemoryProfile(void); + // garbage collector + GarbageSchedule makeGarbageSchedule(const std::vector &p) const; + // high-water memory function + Size memoryNeeded(const std::vector &p, + const GarbageSchedule &g); + Size memoryNeeded(const std::vector &p); // general execution - Size executeProgram(const std::vector &p); - Size executeProgram(const std::vector &p); + void executeProgram(const std::vector &p) const; + void executeProgram(const std::vector &p) const; private: // environment shortcut DEFINE_ENV_ALIAS; + // module graph + void makeModuleGraph(void); // memory profile - void resizeProfile(MemoryProfile &profile) const; - void updateProfile(MemoryProfile &profile, const unsigned int address) const; - void cleanEnvironment(MemoryProfile &profile) const; - void memoryProfile(MemoryProfile &profile, const std::string name) const; - void memoryProfile(MemoryProfile &profile, const unsigned int address) const; + void makeMemoryProfile(void); + void resetProfile(void); + void resizeProfile(void); + void updateProfile(const unsigned int address); + void cleanEnvironment(void); + void memoryProfile(const std::string name); + void memoryProfile(const unsigned int address); private: // general - bool dryRun_{false}, memoryProfile_{false}; unsigned int traj_; // module and related maps std::vector module_; std::map moduleAddress_; std::string currentModule_{""}; + // module graph + bool graphOutdated_{true}; + Graph graph_; + // memory profile + bool memoryProfileOutdated_{true}; + MemoryProfile profile_; }; /****************************************************************************** From 0887566134b7cd7b1a4fb3af69180c0dd9dbed91 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 13 Dec 2017 16:36:15 +0000 Subject: [PATCH 112/145] Hadrons: scheduler back! --- extras/Hadrons/Application.cc | 110 +++-------------------- extras/Hadrons/Application.hpp | 31 +++---- extras/Hadrons/GeneticScheduler.hpp | 66 +++++++------- extras/Hadrons/VirtualMachine.cc | 134 +++++++++++++++++++++++----- extras/Hadrons/VirtualMachine.hpp | 34 ++++--- 5 files changed, 189 insertions(+), 186 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 24618447..9a3366d4 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -94,10 +94,7 @@ void Application::run(void) } vm().printContent(); env().printContent(); - if (!scheduled_) - { - schedule(); - } + schedule(); printSchedule(); configLoop(); } @@ -122,11 +119,13 @@ void Application::parseParameterFile(const std::string parameterFileName) setPar(par); if (!push(reader, "modules")) { - HADRON_ERROR(Parsing, "Cannot open node 'modules' in parameter file '" + parameterFileName + "'"); + HADRON_ERROR(Parsing, "Cannot open node 'modules' in parameter file '" + + parameterFileName + "'"); } if (!push(reader, "module")) { - HADRON_ERROR(Parsing, "Cannot open node 'modules/module' in parameter file '" + parameterFileName + "'"); + HADRON_ERROR(Parsing, "Cannot open node 'modules/module' in parameter file '" + + parameterFileName + "'"); } do { @@ -160,98 +159,13 @@ void Application::saveParameterFile(const std::string parameterFileName) } // schedule computation //////////////////////////////////////////////////////// -#define MEM_MSG(size)\ -sizeString((size)*locVol_) << " (" << sizeString(size) << "/site)" - -#define DEFINE_MEMPEAK \ -GeneticScheduler::ObjFunc memPeak = \ -[this](const std::vector &program)\ -{\ - unsigned int memPeak;\ - bool msg;\ - \ - msg = HadronsLogMessage.isActive();\ - HadronsLogMessage.Active(false);\ - vm().dryRun(true);\ - memPeak = vm().executeProgram(program);\ - vm().dryRun(false);\ - env().freeAll();\ - HadronsLogMessage.Active(msg);\ - \ - return memPeak;\ -} - void Application::schedule(void) { - //DEFINE_MEMPEAK; - - // build module dependency graph - LOG(Message) << "Building module graph..." << std::endl; - auto graph = vm().getModuleGraph(); - LOG(Debug) << "Module graph:" << std::endl; - LOG(Debug) << graph << std::endl; - auto con = graph.getConnectedComponents(); - - // constrained topological sort using a genetic algorithm - // LOG(Message) << "Scheduling computation..." << std::endl; - // LOG(Message) << " #module= " << graph.size() << std::endl; - // LOG(Message) << " population size= " << par_.genetic.popSize << std::endl; - // LOG(Message) << " max. generation= " << par_.genetic.maxGen << std::endl; - // LOG(Message) << " max. cst. generation= " << par_.genetic.maxCstGen << std::endl; - // LOG(Message) << " mutation rate= " << par_.genetic.mutationRate << std::endl; - - // unsigned int k = 0, gen, prevPeak, nCstPeak = 0; - // std::random_device rd; - // GeneticScheduler::Parameters par; - - // par.popSize = par_.genetic.popSize; - // par.mutationRate = par_.genetic.mutationRate; - // par.seed = rd(); - // memPeak_ = 0; - // CartesianCommunicator::BroadcastWorld(0, &(par.seed), sizeof(par.seed)); - for (unsigned int i = 0; i < con.size(); ++i) + if (!scheduled_ and !loadedSchedule_) { - // GeneticScheduler scheduler(con[i], memPeak, par); - - // gen = 0; - // do - // { - // LOG(Debug) << "Generation " << gen << ":" << std::endl; - // scheduler.nextGeneration(); - // if (gen != 0) - // { - // if (prevPeak == scheduler.getMinValue()) - // { - // nCstPeak++; - // } - // else - // { - // nCstPeak = 0; - // } - // } - - // prevPeak = scheduler.getMinValue(); - // if (gen % 10 == 0) - // { - // LOG(Iterative) << "Generation " << gen << ": " - // << MEM_MSG(scheduler.getMinValue()) << std::endl; - // } - - // gen++; - // } while ((gen < par_.genetic.maxGen) - // and (nCstPeak < par_.genetic.maxCstGen)); - // auto &t = scheduler.getMinSchedule(); - // if (scheduler.getMinValue() > memPeak_) - // { - // memPeak_ = scheduler.getMinValue(); - // } - auto t = con[i].topoSort(); - for (unsigned int j = 0; j < t.size(); ++j) - { - program_.push_back(t[j]); - } + program_ = vm().schedule(par_.genetic); + scheduled_ = true; } - scheduled_ = true; } void Application::saveSchedule(const std::string filename) @@ -274,8 +188,6 @@ void Application::saveSchedule(const std::string filename) void Application::loadSchedule(const std::string filename) { - //DEFINE_MEMPEAK; - TextReader reader(filename); std::vector program; @@ -287,8 +199,7 @@ void Application::loadSchedule(const std::string filename) { program_.push_back(vm().getModuleAddress(name)); } - scheduled_ = true; - //memPeak_ = memPeak(program_); + loadedSchedule_ = true; } void Application::printSchedule(void) @@ -297,7 +208,8 @@ void Application::printSchedule(void) { HADRON_ERROR(Definition, "Computation not scheduled"); } - LOG(Message) << "Schedule (memory peak: " << MEM_MSG(memPeak_) << "):" + auto peak = vm().memoryNeeded(program_); + LOG(Message) << "Schedule (memory needed: " << sizeString(peak) << "):" << std::endl; for (unsigned int i = 0; i < program_.size(); ++i) { diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index 8d2537d0..4b2ce77b 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -50,25 +50,13 @@ public: unsigned int, end, unsigned int, step); }; - class GeneticPar: Serializable - { - public: - GeneticPar(void): - popSize{20}, maxGen{1000}, maxCstGen{100}, mutationRate{.1} {}; - public: - GRID_SERIALIZABLE_CLASS_MEMBERS(GeneticPar, - unsigned int, popSize, - unsigned int, maxGen, - unsigned int, maxCstGen, - double , mutationRate); - }; class GlobalPar: Serializable { public: GRID_SERIALIZABLE_CLASS_MEMBERS(GlobalPar, - TrajRange, trajCounter, - GeneticPar, genetic, - std::string, seed); + TrajRange, trajCounter, + VirtualMachine::GeneticPar, genetic, + std::string, seed); }; public: // constructors @@ -103,12 +91,11 @@ private: // virtual machine shortcut DEFINE_VM_ALIAS; private: - long unsigned int locVol_; - std::string parameterFileName_{""}; - GlobalPar par_; - std::vector program_; - Environment::Size memPeak_; - bool scheduled_{false}; + long unsigned int locVol_; + std::string parameterFileName_{""}; + GlobalPar par_; + VirtualMachine::Program program_; + bool scheduled_{false}, loadedSchedule_{false}; }; /****************************************************************************** @@ -119,6 +106,7 @@ template void Application::createModule(const std::string name) { vm().createModule(name); + scheduled_ = false; } template @@ -126,6 +114,7 @@ void Application::createModule(const std::string name, const typename M::Par &par) { vm().createModule(name, par); + scheduled_ = false; } END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/GeneticScheduler.hpp b/extras/Hadrons/GeneticScheduler.hpp index 3b0195e7..f199f1ed 100644 --- a/extras/Hadrons/GeneticScheduler.hpp +++ b/extras/Hadrons/GeneticScheduler.hpp @@ -38,13 +38,13 @@ BEGIN_HADRONS_NAMESPACE /****************************************************************************** * Scheduler based on a genetic algorithm * ******************************************************************************/ -template +template class GeneticScheduler { public: - typedef std::vector Gene; - typedef std::pair GenePair; - typedef std::function ObjFunc; + typedef std::vector Gene; + typedef std::pair GenePair; + typedef std::function ObjFunc; struct Parameters { double mutationRate; @@ -65,7 +65,7 @@ public: void benchmarkCrossover(const unsigned int nIt); // print population friend std::ostream & operator<<(std::ostream &out, - const GeneticScheduler &s) + const GeneticScheduler &s) { out << "["; for (auto &p: s.population_) @@ -87,19 +87,19 @@ private: void mutation(Gene &m, const Gene &c); private: - Graph &graph_; - const ObjFunc &func_; - const Parameters par_; - std::multimap population_; - std::mt19937 gen_; + Graph &graph_; + const ObjFunc &func_; + const Parameters par_; + std::multimap population_; + std::mt19937 gen_; }; /****************************************************************************** * template implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// -template -GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, +template +GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, const Parameters &par) : graph_(graph) , func_(func) @@ -109,22 +109,22 @@ GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, } // access ////////////////////////////////////////////////////////////////////// -template -const typename GeneticScheduler::Gene & -GeneticScheduler::getMinSchedule(void) +template +const typename GeneticScheduler::Gene & +GeneticScheduler::getMinSchedule(void) { return population_.begin()->second; } -template -int GeneticScheduler::getMinValue(void) +template +int GeneticScheduler::getMinValue(void) { return population_.begin()->first; } // breed a new generation ////////////////////////////////////////////////////// -template -void GeneticScheduler::nextGeneration(void) +template +void GeneticScheduler::nextGeneration(void) { // random initialization of the population if necessary if (population_.size() != par_.popSize) @@ -158,8 +158,8 @@ void GeneticScheduler::nextGeneration(void) } // evolution steps ///////////////////////////////////////////////////////////// -template -void GeneticScheduler::initPopulation(void) +template +void GeneticScheduler::initPopulation(void) { population_.clear(); for (unsigned int i = 0; i < par_.popSize; ++i) @@ -170,8 +170,8 @@ void GeneticScheduler::initPopulation(void) } } -template -void GeneticScheduler::doCrossover(void) +template +void GeneticScheduler::doCrossover(void) { auto p = selectPair(); Gene &p1 = *(p.first), &p2 = *(p.second); @@ -185,8 +185,8 @@ void GeneticScheduler::doCrossover(void) } } -template -void GeneticScheduler::doMutation(void) +template +void GeneticScheduler::doMutation(void) { std::uniform_real_distribution mdis(0., 1.); std::uniform_int_distribution pdis(0, population_.size() - 1); @@ -206,8 +206,8 @@ void GeneticScheduler::doMutation(void) } // genetic operators /////////////////////////////////////////////////////////// -template -typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) +template +typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) { std::vector prob; unsigned int ind; @@ -233,8 +233,8 @@ typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) return std::make_pair(p1, p2); } -template -void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, +template +void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, const Gene &p2) { Gene buf; @@ -268,8 +268,8 @@ void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, } } -template -void GeneticScheduler::mutation(Gene &m, const Gene &c) +template +void GeneticScheduler::mutation(Gene &m, const Gene &c) { Gene buf; std::uniform_int_distribution dis(0, c.size() - 1); @@ -298,8 +298,8 @@ void GeneticScheduler::mutation(Gene &m, const Gene &c) } } -template -void GeneticScheduler::benchmarkCrossover(const unsigned int nIt) +template +void GeneticScheduler::benchmarkCrossover(const unsigned int nIt) { Gene p1, p2, c1, c2; double neg = 0., eq = 0., pos = 0., total; diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 8667a51c..8a6bd149 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -27,6 +27,7 @@ See the full license in the file "LICENSE" in the top level distribution directo /* END LEGAL */ #include +#include #include using namespace Grid; @@ -133,6 +134,8 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } } } + graphOutdated_ = true; + memoryProfileOutdated_ = true; } else { @@ -364,6 +367,7 @@ void VirtualMachine::updateProfile(const unsigned int address) if (env().hasCreatedObject(a) and (profile_.object[a].module == -1)) { profile_.object[a].size = env().getObjectSize(a); + profile_.object[a].storage = env().getObjectStorage(a); profile_.object[a].module = address; profile_.module[address][a] = profile_.object[a].size; if (env().getObjectModule(a) < 0) @@ -419,37 +423,130 @@ void VirtualMachine::memoryProfile(const std::string name) } // garbage collector /////////////////////////////////////////////////////////// -VirtualMachine::GarbageSchedule -VirtualMachine::makeGarbageSchedule(const std::vector &p) const +VirtualMachine::GarbageSchedule +VirtualMachine::makeGarbageSchedule(const Program &p) const { GarbageSchedule freeProg; freeProg.resize(p.size()); - for (unsigned int i = 0; i < env().getMaxAddress(); ++i) + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) { - auto pred = [i, this](const unsigned int j) + if (env().getObjectStorage(a) == Environment::Storage::temporary) { - auto &in = module_[j].input; - auto it = std::find(in.begin(), in.end(), i); - - return (it != in.end()) or (j == env().getObjectModule(i)); - }; - auto it = std::find_if(p.rbegin(), p.rend(), pred); - if (it != p.rend()) + auto it = std::find(p.begin(), p.end(), env().getObjectModule(a)); + + if (it != p.end()) + { + freeProg[std::distance(p.begin(), it)].insert(a); + } + } + else if (env().getObjectStorage(a) == Environment::Storage::object) { - freeProg[std::distance(it, p.rend()) - 1].insert(i); + auto pred = [a, this](const unsigned int b) + { + auto &in = module_[b].input; + auto it = std::find(in.begin(), in.end(), a); + + return (it != in.end()) or (b == env().getObjectModule(a)); + }; + auto it = std::find_if(p.rbegin(), p.rend(), pred); + if (it != p.rend()) + { + freeProg[std::distance(it, p.rend()) - 1].insert(a); + } } } return freeProg; } +// high-water memory function ////////////////////////////////////////////////// +VirtualMachine::Size VirtualMachine::memoryNeeded(const Program &p) +{ + const MemoryProfile &profile = getMemoryProfile(); + GarbageSchedule freep = makeGarbageSchedule(p); + Size current = 0, max = 0; + + for (unsigned int i = 0; i < p.size(); ++i) + { + for (auto &o: profile.module[p[i]]) + { + current += o.second; + } + max = std::max(current, max); + for (auto &o: freep[i]) + { + current -= profile.object[o].size; + } + } + + return max; +} + +// genetic scheduler /////////////////////////////////////////////////////////// +VirtualMachine::Program VirtualMachine::schedule(const GeneticPar &par) +{ + typedef GeneticScheduler Scheduler; + + auto graph = getModuleGraph(); + + //constrained topological sort using a genetic algorithm + LOG(Message) << "Scheduling computation..." << std::endl; + LOG(Message) << " #module= " << graph.size() << std::endl; + LOG(Message) << " population size= " << par.popSize << std::endl; + LOG(Message) << " max. generation= " << par.maxGen << std::endl; + LOG(Message) << " max. cst. generation= " << par.maxCstGen << std::endl; + LOG(Message) << " mutation rate= " << par.mutationRate << std::endl; + + unsigned int k = 0, gen, prevPeak, nCstPeak = 0; + std::random_device rd; + Scheduler::Parameters gpar; + + gpar.popSize = par.popSize; + gpar.mutationRate = par.mutationRate; + gpar.seed = rd(); + CartesianCommunicator::BroadcastWorld(0, &(gpar.seed), sizeof(gpar.seed)); + Scheduler::ObjFunc memPeak = [this](const Program &p)->Size + { + return memoryNeeded(p); + }; + Scheduler scheduler(graph, memPeak, gpar); + gen = 0; + do + { + LOG(Debug) << "Generation " << gen << ":" << std::endl; + scheduler.nextGeneration(); + if (gen != 0) + { + if (prevPeak == scheduler.getMinValue()) + { + nCstPeak++; + } + else + { + nCstPeak = 0; + } + } + + prevPeak = scheduler.getMinValue(); + if (gen % 10 == 0) + { + LOG(Iterative) << "Generation " << gen << ": " + << sizeString(scheduler.getMinValue()) << std::endl; + } + + gen++; + } while ((gen < par.maxGen) and (nCstPeak < par.maxCstGen)); + + return scheduler.getMinSchedule(); +} + // general execution /////////////////////////////////////////////////////////// #define BIG_SEP "===============" #define SEP "---------------" #define MEM_MSG(size) sizeString(size) -void VirtualMachine::executeProgram(const std::vector &p) const +void VirtualMachine::executeProgram(const Program &p) const { Size memPeak = 0, sizeBefore, sizeAfter; GarbageSchedule freeProg; @@ -481,15 +578,6 @@ void VirtualMachine::executeProgram(const std::vector &p) const { env().freeObject(j); } - // free temporaries - for (unsigned int i = 0; i < env().getMaxAddress(); ++i) - { - if ((env().getObjectStorage(i) == Environment::Storage::temporary) - and env().hasCreatedObject(i)) - { - env().freeObject(i); - } - } // print used memory after garbage collection if necessary sizeAfter = env().getTotalSize(); if (sizeBefore != sizeAfter) @@ -506,7 +594,7 @@ void VirtualMachine::executeProgram(const std::vector &p) const void VirtualMachine::executeProgram(const std::vector &p) const { - std::vector pAddress; + Program pAddress; for (auto &n: p) { diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index 88e70b55..a411c108 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -51,19 +51,33 @@ class VirtualMachine { SINGLETON_DEFCTOR(VirtualMachine); public: - typedef SITE_SIZE_TYPE Size; - typedef std::unique_ptr ModPt; - typedef std::vector> GarbageSchedule; + typedef SITE_SIZE_TYPE Size; + typedef std::unique_ptr ModPt; + typedef std::vector> GarbageSchedule; + typedef std::vector Program; struct MemoryPrint { - Size size; - unsigned int module; + Size size; + Environment::Storage storage; + unsigned int module; }; struct MemoryProfile { std::vector> module; std::vector object; }; + class GeneticPar: Serializable + { + public: + GeneticPar(void): + popSize{20}, maxGen{1000}, maxCstGen{100}, mutationRate{.1} {}; + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(GeneticPar, + unsigned int, popSize, + unsigned int, maxGen, + unsigned int, maxCstGen, + double , mutationRate); + }; private: struct ModuleInfo { @@ -109,13 +123,13 @@ public: // memory profile const MemoryProfile &getMemoryProfile(void); // garbage collector - GarbageSchedule makeGarbageSchedule(const std::vector &p) const; + GarbageSchedule makeGarbageSchedule(const Program &p) const; // high-water memory function - Size memoryNeeded(const std::vector &p, - const GarbageSchedule &g); - Size memoryNeeded(const std::vector &p); + Size memoryNeeded(const Program &p); + // genetic scheduler + Program schedule(const GeneticPar &par); // general execution - void executeProgram(const std::vector &p) const; + void executeProgram(const Program &p) const; void executeProgram(const std::vector &p) const; private: // environment shortcut From 842754bea9f8c4ba42c9295854342c6857f061bf Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 13 Dec 2017 19:41:41 +0000 Subject: [PATCH 113/145] Hadrons: most modules ported to the new interface, compiles but untested --- extras/Hadrons/Module.hpp | 4 +- extras/Hadrons/Modules.hpp | 38 +++++------ extras/Hadrons/Modules/MAction/DWF.hpp | 11 ++-- extras/Hadrons/Modules/MAction/Wilson.hpp | 1 + .../Hadrons/Modules/MContraction/Baryon.hpp | 32 +++++++--- .../Hadrons/Modules/MContraction/DiscLoop.hpp | 17 +++-- .../Hadrons/Modules/MContraction/Gamma3pt.hpp | 21 +++++-- extras/Hadrons/Modules/MContraction/Meson.hpp | 11 ++-- .../Modules/MContraction/WardIdentity.hpp | 36 ++++++++--- .../Modules/MContraction/WeakHamiltonian.hpp | 1 + .../MContraction/WeakHamiltonianEye.cc | 41 ++++++++---- .../MContraction/WeakHamiltonianNonEye.cc | 44 ++++++++----- .../MContraction/WeakNeutral4ptDisc.cc | 39 ++++++++---- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 16 ++--- extras/Hadrons/Modules/MGauge/Load.cc | 19 ++++-- extras/Hadrons/Modules/MGauge/Load.hpp | 1 + extras/Hadrons/Modules/MGauge/Random.cc | 16 ++++- extras/Hadrons/Modules/MGauge/Random.hpp | 1 + extras/Hadrons/Modules/MGauge/StochEm.cc | 29 +++++---- extras/Hadrons/Modules/MGauge/StochEm.hpp | 1 + extras/Hadrons/Modules/MGauge/Unit.cc | 1 + extras/Hadrons/Modules/MLoop/NoiseLoop.hpp | 18 ++++-- extras/Hadrons/Modules/MSink/Point.hpp | 13 ++-- extras/Hadrons/Modules/MSink/Smear.hpp | 23 ++++--- extras/Hadrons/Modules/MSource/Point.hpp | 11 ++-- .../Hadrons/Modules/MSource/SeqConserved.hpp | 17 +++-- extras/Hadrons/Modules/MSource/SeqGamma.hpp | 53 +++++++++++----- extras/Hadrons/Modules/MSource/Wall.hpp | 49 ++++++++++----- extras/Hadrons/Modules/MSource/Z2.hpp | 38 ++++++++--- .../Modules/MUtilities/TestSeqConserved.hpp | 44 ++++++++----- .../Modules/MUtilities/TestSeqGamma.hpp | 28 ++++++--- extras/Hadrons/modules.inc | 63 +++++++++---------- tests/hadrons/Test_hadrons.hpp | 40 ++++++------ 33 files changed, 504 insertions(+), 273 deletions(-) diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 25c0ac05..390573d8 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -92,8 +92,8 @@ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define envGet(type, name)\ *env().template getObject(name) -#define envGetTmp(type, name)\ -*env().template getObject(getName() + "_tmp_" + name) +#define envGetTmp(type, var)\ +type &var = *env().template getObject(getName() + "_tmp_" + #var) #define envHasType(type, name)\ env().template isObjectOfType(name) diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index bb574a14..61a20058 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -30,31 +30,31 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include -// #include -// #include -// #include +#include +#include +#include #include -// #include -// #include -// #include -// #include -// #include +#include +#include +#include +#include +#include #include -// #include -// #include -// #include +#include +#include +#include #include -// #include +#include // #include // #include // #include #include -// #include +#include #include #include -// #include -// #include -// #include -// #include -// #include -// #include +#include +#include +#include +#include +#include +#include diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 91e4ec94..0cb9a4cb 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -119,12 +119,13 @@ void TDWF::setup(void) << std::endl; LOG(Message) << "Fermion boundary conditions: " << par().boundary << std::endl; + env().createGrid(par().Ls); - auto &U = envGet(LatticeGaugeField, par().gauge); - auto &g4 = *env().getGrid(); - auto &grb4 = *env().getRbGrid(); - auto &g5 = *env().getGrid(par().Ls); - auto &grb5 = *env().getRbGrid(par().Ls); + auto &U = envGet(LatticeGaugeField, par().gauge); + auto &g4 = *env().getGrid(); + auto &grb4 = *env().getRbGrid(); + auto &g5 = *env().getGrid(par().Ls); + auto &grb5 = *env().getRbGrid(par().Ls); std::vector boundary = strToVec(par().boundary); typename DomainWallFermion::ImplParams implParams(boundary); envCreateDerived(FMat, DomainWallFermion, getName(), par().Ls, U, g5, diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index 1ca3bf59..a6b3f0d6 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -115,6 +115,7 @@ void TWilson::setup(void) << " using gauge field '" << par().gauge << "'" << std::endl; LOG(Message) << "Fermion boundary conditions: " << par().boundary << std::endl; + auto &U = envGet(LatticeGaugeField, par().gauge); auto &grid = *env().getGrid(); auto &gridRb = *env().getRbGrid(); diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index da927391..28f6aa51 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -71,8 +71,11 @@ public: virtual ~TBaryon(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: + // setup + virtual void setup(void); // execution virtual void execute(void); }; @@ -97,14 +100,29 @@ std::vector TBaryon::getInput(void) return input; } +template +std::vector TBaryon::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TBaryon::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } +// setup /////////////////////////////////////////////////////////////////////// +template +void TBaryon::setup(void) +{ + envTmpLat(LatticeComplex, "c"); +} + // execution /////////////////////////////////////////////////////////////////// template void TBaryon::execute(void) @@ -113,12 +131,12 @@ void TBaryon::execute(void) << " quarks '" << par().q1 << "', '" << par().q2 << "', and '" << par().q3 << "'" << std::endl; - CorrWriter writer(par().output); - PropagatorField1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); - PropagatorField3 &q3 = *env().template getObject(par().q2); - LatticeComplex c(env().getGrid()); - Result result; + CorrWriter writer(par().output); + auto &q1 = envGet(PropagatorField1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + auto &q3 = envGet(PropagatorField3, par().q2); + envGetTmp(LatticeComplex, c); + Result result; // FIXME: do contractions diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index f8da3943..c0fbe296 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -67,6 +67,7 @@ public: virtual ~TDiscLoop(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -95,10 +96,18 @@ std::vector TDiscLoop::getInput(void) return in; } +template +std::vector TDiscLoop::getReference(void) +{ + std::vector out = {}; + + return out; +} + template std::vector TDiscLoop::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -107,7 +116,7 @@ std::vector TDiscLoop::getOutput(void) template void TDiscLoop::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// @@ -119,12 +128,12 @@ void TDiscLoop::execute(void) << " insertion." << std::endl; CorrWriter writer(par().output); - PropagatorField &q_loop = *env().template getObject(par().q_loop); - LatticeComplex c(env().getGrid()); + auto &q_loop = envGet(PropagatorField, par().q_loop); Gamma gamma(par().gamma); std::vector buf; Result result; + envGetTmp(LatticeComplex, c); c = trace(gamma*q_loop); sliceSum(c, buf, Tp); diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index a8653186..4a6baf3e 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -98,6 +98,7 @@ public: virtual ~TGamma3pt(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -126,10 +127,18 @@ std::vector TGamma3pt::getInput(void) return in; } +template +std::vector TGamma3pt::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TGamma3pt::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -138,7 +147,7 @@ std::vector TGamma3pt::getOutput(void) template void TGamma3pt::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// @@ -153,10 +162,9 @@ void TGamma3pt::execute(void) // Initialise variables. q2 and q3 are normal propagators, q1 may be // sink smeared. CorrWriter writer(par().output); - SlicedPropagator1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); - PropagatorField3 &q3 = *env().template getObject(par().q3); - LatticeComplex c(env().getGrid()); + auto &q1 = envGet(SlicedPropagator1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + auto &q3 = envGet(PropagatorField2, par().q3); Gamma g5(Gamma::Algebra::Gamma5); Gamma gamma(par().gamma); std::vector buf; @@ -165,6 +173,7 @@ void TGamma3pt::execute(void) // Extract relevant timeslice of sinked propagator q1, then contract & // sum over all spacial positions of gamma insertion. SitePropagator1 q1Snk = q1[par().tSnk]; + envGetTmp(LatticeComplex, c); c = trace(g5*q1Snk*adj(q2)*(g5*gamma)*q3); sliceSum(c, buf, Tp); diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 3c179d44..1fd86d3a 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -161,6 +161,7 @@ void TMeson::parseGammaString(std::vector &gammaList) // Parse individual contractions from input string. gammaList = strToVec(par().gammas); } + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// @@ -192,8 +193,8 @@ void TMeson::execute(void) if (envHasType(SlicedPropagator1, par().q1) and envHasType(SlicedPropagator2, par().q2)) { - SlicedPropagator1 &q1 = envGet(SlicedPropagator1, par().q1); - SlicedPropagator2 &q2 = envGet(SlicedPropagator2, par().q2); + auto &q1 = envGet(SlicedPropagator1, par().q1); + auto &q2 = envGet(SlicedPropagator2, par().q2); LOG(Message) << "(propagator already sinked)" << std::endl; for (unsigned int i = 0; i < result.size(); ++i) @@ -209,10 +210,10 @@ void TMeson::execute(void) } else { - PropagatorField1 &q1 = envGet(PropagatorField1, par().q1); - PropagatorField2 &q2 = envGet(PropagatorField2, par().q2); - LatticeComplex c(env().getGrid()); + auto &q1 = envGet(PropagatorField1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + envGetTmp(LatticeComplex, c); LOG(Message) << "(using sink '" << par().sink << "')" << std::endl; for (unsigned int i = 0; i < result.size(); ++i) { diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index 90922c27..c92c7243 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -73,6 +73,7 @@ public: virtual ~TWardIdentity(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -103,10 +104,18 @@ std::vector TWardIdentity::getInput(void) return in; } +template +std::vector TWardIdentity::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TWardIdentity::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -120,6 +129,15 @@ void TWardIdentity::setup(void) { HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); } + envTmpLat(PropagatorField, "tmp"); + envTmpLat(PropagatorField, "vector_WI"); + if (par().test_axial) + { + envTmpLat(PropagatorField, "psi"); + envTmpLat(LatticeComplex, "PP"); + envTmpLat(LatticeComplex, "axial_defect"); + envTmpLat(LatticeComplex, "PJ5q"); + } } // execution /////////////////////////////////////////////////////////////////// @@ -129,12 +147,13 @@ void TWardIdentity::execute(void) LOG(Message) << "Performing Ward Identity checks for quark '" << par().q << "'." << std::endl; - PropagatorField tmp(env().getGrid()), vector_WI(env().getGrid()); - PropagatorField &q = *env().template getObject(par().q); - FMat &act = *(env().template getObject(par().action)); - Gamma g5(Gamma::Algebra::Gamma5); + auto &q = envGet(PropagatorField, par().q); + auto &act = envGet(FMat, par().action); + Gamma g5(Gamma::Algebra::Gamma5); // Compute D_mu V_mu, D here is backward derivative. + envGetTmp(PropagatorField, tmp); + envGetTmp(PropagatorField, vector_WI); vector_WI = zero; for (unsigned int mu = 0; mu < Nd; ++mu) { @@ -149,9 +168,10 @@ void TWardIdentity::execute(void) if (par().test_axial) { - PropagatorField psi(env().getGrid()); - LatticeComplex PP(env().getGrid()), axial_defect(env().getGrid()), - PJ5q(env().getGrid()); + envGetTmp(PropagatorField, psi); + envGetTmp(LatticeComplex, PP); + envGetTmp(LatticeComplex, axial_defect); + envGetTmp(LatticeComplex, PJ5q); std::vector axial_buf; // Compute , D is backwards derivative. diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp index 7df40370..2b53c87a 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp @@ -99,6 +99,7 @@ public:\ virtual ~T##modname(void) = default;\ /* dependency relation */ \ virtual std::vector getInput(void);\ + virtual std::vector getReference(void);\ virtual std::vector getOutput(void);\ public:\ std::vector VA_label = {"V", "A"};\ diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc index 314b080a..7a73a7e3 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc @@ -74,9 +74,16 @@ std::vector TWeakHamiltonianEye::getInput(void) return in; } +std::vector TWeakHamiltonianEye::getReference(void) +{ + std::vector out = {}; + + return out; +} + std::vector TWeakHamiltonianEye::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -84,7 +91,15 @@ std::vector TWeakHamiltonianEye::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakHamiltonianEye::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp1"); + envTmpLat(LatticeComplex, "tmp2"); + envTmp(std::vector, "S_body", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "S_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "E_body", 1, ndim, LatticeComplex(env().getGrid())); + envTmp(std::vector, "E_loop", 1, ndim, LatticeComplex(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -96,22 +111,22 @@ void TWeakHamiltonianEye::execute(void) << "'." << std::endl; CorrWriter writer(par().output); - SlicedPropagator &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + auto &q1 = envGet(SlicedPropagator, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_eye_diag); unsigned int ndim = env().getNd(); - PropagatorField tmp1(env().getGrid()); - LatticeComplex tmp2(env().getGrid()); - std::vector S_body(ndim, tmp1); - std::vector S_loop(ndim, tmp1); - std::vector E_body(ndim, tmp2); - std::vector E_loop(ndim, tmp2); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp1); + envGetTmp(LatticeComplex, tmp2); + envGetTmp(std::vector, S_body); + envGetTmp(std::vector, S_loop); + envGetTmp(std::vector, E_body); + envGetTmp(std::vector, E_loop); // Get sink timeslice of q1. SitePropagator q1Snk = q1[par().tSnk]; diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc index 2c4df68a..c333713d 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc @@ -74,9 +74,15 @@ std::vector TWeakHamiltonianNonEye::getInput(void) return in; } +std::vector TWeakHamiltonianNonEye::getReference(void) +{ + std::vector out = {}; + + return out; +} std::vector TWeakHamiltonianNonEye::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -84,7 +90,15 @@ std::vector TWeakHamiltonianNonEye::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakHamiltonianNonEye::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp1"); + envTmpLat(LatticeComplex, "tmp2"); + envTmp(std::vector, "C_i_side_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "C_f_side_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "W_i_side_loop", 1, ndim, LatticeComplex(env().getGrid())); + envTmp(std::vector, "W_f_side_loop", 1, ndim, LatticeComplex(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -95,23 +109,23 @@ void TWeakHamiltonianNonEye::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); - PropagatorField &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + CorrWriter writer(par().output); + auto &q1 = envGet(PropagatorField, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_noneye_diag); - unsigned int ndim = env().getNd(); + unsigned int ndim = env().getNd(); - PropagatorField tmp1(env().getGrid()); - LatticeComplex tmp2(env().getGrid()); - std::vector C_i_side_loop(ndim, tmp1); - std::vector C_f_side_loop(ndim, tmp1); - std::vector W_i_side_loop(ndim, tmp2); - std::vector W_f_side_loop(ndim, tmp2); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp1); + envGetTmp(LatticeComplex, tmp2); + envGetTmp(std::vector, C_i_side_loop); + envGetTmp(std::vector, C_f_side_loop); + envGetTmp(std::vector, W_i_side_loop); + envGetTmp(std::vector, W_f_side_loop); // Setup for C-type contractions. for (int mu = 0; mu < ndim; ++mu) diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc index 6685f292..e0f07f6c 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc @@ -76,9 +76,16 @@ std::vector TWeakNeutral4ptDisc::getInput(void) return in; } +std::vector TWeakNeutral4ptDisc::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + std::vector TWeakNeutral4ptDisc::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -86,7 +93,13 @@ std::vector TWeakNeutral4ptDisc::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakNeutral4ptDisc::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp"); + envTmpLat(LatticeComplex, "curr"); + envTmp(std::vector, "meson", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "loop", 1, ndim, PropagatorField(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -97,21 +110,21 @@ void TWeakNeutral4ptDisc::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); - PropagatorField &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + CorrWriter writer(par().output); + auto &q1 = envGet(PropagatorField, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_neut_disc_diag); - unsigned int ndim = env().getNd(); + unsigned int ndim = env().getNd(); - PropagatorField tmp(env().getGrid()); - std::vector meson(ndim, tmp); - std::vector loop(ndim, tmp); - LatticeComplex curr(env().getGrid()); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp); + envGetTmp(LatticeComplex, curr); + envGetTmp(std::vector, meson); + envGetTmp(std::vector, loop); // Setup for type 1 contractions. for (int mu = 0; mu < ndim; ++mu) diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 4d08841d..e77df287 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -154,21 +154,21 @@ void TGaugeProp::execute(void) LOG(Message) << "Computing quark propagator '" << getName() << "'" << std::endl; - FermionField &source = envGetTmp(FermionField, "source"); - FermionField &sol = envGetTmp(FermionField, "sol"); - FermionField &tmp = envGetTmp(FermionField, "tmp"); - std::string propName = (Ls_ == 1) ? getName() : (getName() + "_5d"); - PropagatorField &prop = envGet(PropagatorField, propName); - PropagatorField &fullSrc = envGet(PropagatorField, par().source); - SolverFn &solver = envGet(SolverFn, par().solver); + std::string propName = (Ls_ == 1) ? getName() : (getName() + "_5d"); + auto &prop = envGet(PropagatorField, propName); + auto &fullSrc = envGet(PropagatorField, par().source); + auto &solver = envGet(SolverFn, par().solver); + envGetTmp(FermionField, source); + envGetTmp(FermionField, sol); + envGetTmp(FermionField, tmp); LOG(Message) << "Inverting using solver '" << par().solver << "' on source '" << par().source << "'" << std::endl; for (unsigned int s = 0; s < Ns; ++s) for (unsigned int c = 0; c < Nc; ++c) { LOG(Message) << "Inversion for spin= " << s << ", color= " << c - << std::endl; + << std::endl; // source conversion for 4D sources if (!env().isObject5d(par().source)) { diff --git a/extras/Hadrons/Modules/MGauge/Load.cc b/extras/Hadrons/Modules/MGauge/Load.cc index 062e7e98..c2fd49de 100644 --- a/extras/Hadrons/Modules/MGauge/Load.cc +++ b/extras/Hadrons/Modules/MGauge/Load.cc @@ -49,6 +49,13 @@ std::vector TLoad::getInput(void) return in; } +std::vector TLoad::getReference(void) +{ + std::vector ref; + + return ref; +} + std::vector TLoad::getOutput(void) { std::vector out = {getName()}; @@ -59,19 +66,19 @@ std::vector TLoad::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TLoad::setup(void) { - env().registerLattice(getName()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TLoad::execute(void) { - FieldMetaData header; - std::string fileName = par().file + "." - + std::to_string(env().getTrajectory()); - + FieldMetaData header; + std::string fileName = par().file + "." + + std::to_string(vm().getTrajectory()); LOG(Message) << "Loading NERSC configuration from file '" << fileName << "'" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + + auto &U = envGet(LatticeGaugeField, getName()); NerscIO::readConfiguration(U, header, fileName); LOG(Message) << "NERSC header:" << std::endl; dump_meta_data(header, LOG(Message)); diff --git a/extras/Hadrons/Modules/MGauge/Load.hpp b/extras/Hadrons/Modules/MGauge/Load.hpp index a338af79..a967d714 100644 --- a/extras/Hadrons/Modules/MGauge/Load.hpp +++ b/extras/Hadrons/Modules/MGauge/Load.hpp @@ -57,6 +57,7 @@ public: virtual ~TLoad(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/Random.cc b/extras/Hadrons/Modules/MGauge/Random.cc index c10fdfc3..fdb0d145 100644 --- a/extras/Hadrons/Modules/MGauge/Random.cc +++ b/extras/Hadrons/Modules/MGauge/Random.cc @@ -44,7 +44,16 @@ TRandom::TRandom(const std::string name) // dependencies/products /////////////////////////////////////////////////////// std::vector TRandom::getInput(void) { - return std::vector(); + std::vector in; + + return in; +} + +std::vector TRandom::getReference(void) +{ + std::vector ref; + + return ref; } std::vector TRandom::getOutput(void) @@ -57,13 +66,14 @@ std::vector TRandom::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TRandom::setup(void) { - env().registerLattice(getName()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TRandom::execute(void) { LOG(Message) << "Generating random gauge configuration" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + + auto &U = envGet(LatticeGaugeField, getName()); SU3::HotConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MGauge/Random.hpp b/extras/Hadrons/Modules/MGauge/Random.hpp index a07130e4..30525113 100644 --- a/extras/Hadrons/Modules/MGauge/Random.hpp +++ b/extras/Hadrons/Modules/MGauge/Random.hpp @@ -50,6 +50,7 @@ public: virtual ~TRandom(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/StochEm.cc b/extras/Hadrons/Modules/MGauge/StochEm.cc index c7a9fc4f..a878ae2f 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.cc +++ b/extras/Hadrons/Modules/MGauge/StochEm.cc @@ -47,6 +47,13 @@ std::vector TStochEm::getInput(void) return in; } +std::vector TStochEm::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + std::vector TStochEm::getOutput(void) { std::vector out = {getName()}; @@ -57,32 +64,28 @@ std::vector TStochEm::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TStochEm::setup(void) { - if (!env().hasRegisteredObject("_" + getName() + "_weight")) + if (!env().hasCreatedObject("_" + getName() + "_weight")) { - env().registerLattice("_" + getName() + "_weight"); + envCacheLat(EmComp, "_" + getName() + "_weight"); } - env().registerLattice(getName()); + envCreateLat(EmField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TStochEm::execute(void) { + LOG(Message) << "Generating stochatic EM potential..." << std::endl; + PhotonR photon(par().gauge, par().zmScheme); - EmField &a = *env().createLattice(getName()); - EmComp *w; + auto &a = envGet(EmField, getName()); + auto &w = envGet(EmComp, "_" + getName() + "_weight"); if (!env().hasCreatedObject("_" + getName() + "_weight")) { LOG(Message) << "Caching stochatic EM potential weight (gauge: " << par().gauge << ", zero-mode scheme: " << par().zmScheme << ")..." << std::endl; - w = env().createLattice("_" + getName() + "_weight"); - photon.StochasticWeight(*w); + photon.StochasticWeight(w); } - else - { - w = env().getObject("_" + getName() + "_weight"); - } - LOG(Message) << "Generating stochatic EM potential..." << std::endl; - photon.StochasticField(a, *env().get4dRng(), *w); + photon.StochasticField(a, *env().get4dRng(), w); } diff --git a/extras/Hadrons/Modules/MGauge/StochEm.hpp b/extras/Hadrons/Modules/MGauge/StochEm.hpp index bacb5172..efc2e39b 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.hpp +++ b/extras/Hadrons/Modules/MGauge/StochEm.hpp @@ -59,6 +59,7 @@ public: virtual ~TStochEm(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index bc05a785..af31f124 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -71,6 +71,7 @@ void TUnit::setup(void) void TUnit::execute(void) { LOG(Message) << "Creating unit gauge configuration" << std::endl; + auto &U = envGet(LatticeGaugeField, getName()); SU3::ColdConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp index 1f40dd48..0feb5efb 100644 --- a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp +++ b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp @@ -73,6 +73,7 @@ public: virtual ~TNoiseLoop(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -101,6 +102,15 @@ std::vector TNoiseLoop::getInput(void) return in; } + +template +std::vector TNoiseLoop::getReference(void) +{ + std::vector out = {}; + + return out; +} + template std::vector TNoiseLoop::getOutput(void) { @@ -113,16 +123,16 @@ std::vector TNoiseLoop::getOutput(void) template void TNoiseLoop::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// template void TNoiseLoop::execute(void) { - PropagatorField &loop = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &eta = *env().template getObject(par().eta); + auto &loop = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &eta = envGet(PropagatorField, par().eta); loop = q*adj(eta); } diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 16b89434..42cae4f6 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -122,18 +122,19 @@ void TPoint::setup(void) // execution /////////////////////////////////////////////////////////////////// template void TPoint::execute(void) -{ - std::vector p = strToVec(par().mom); - LatticeComplex &ph = envGet(LatticeComplex, momphName_); - Complex i(0.0,1.0); - +{ LOG(Message) << "Setting up point sink function for momentum [" << par().mom << "]" << std::endl; + auto &ph = envGet(LatticeComplex, momphName_); + if (!hasPhase_) { - LatticeComplex &coor = envGetTmp(LatticeComplex, "coor"); + Complex i(0.0,1.0); + std::vector p; + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); ph = zero; for(unsigned int mu = 0; mu < env().getNd(); mu++) { diff --git a/extras/Hadrons/Modules/MSink/Smear.hpp b/extras/Hadrons/Modules/MSink/Smear.hpp index b51d2f49..03cc861a 100644 --- a/extras/Hadrons/Modules/MSink/Smear.hpp +++ b/extras/Hadrons/Modules/MSink/Smear.hpp @@ -61,6 +61,7 @@ public: virtual ~TSmear(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -89,6 +90,14 @@ std::vector TSmear::getInput(void) return in; } +template +std::vector TSmear::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TSmear::getOutput(void) { @@ -101,9 +110,7 @@ std::vector TSmear::getOutput(void) template void TSmear::setup(void) { - unsigned int nt = env().getDim(Tp); - unsigned int size = nt * sizeof(SitePropagator); - env().registerObject(getName(), size); + envCreate(SlicedPropagator, getName(), 1, env().getDim(Tp)); } // execution /////////////////////////////////////////////////////////////////// @@ -114,11 +121,11 @@ void TSmear::execute(void) << "' using sink function '" << par().sink << "'." << std::endl; - SinkFn &sink = *env().template getObject(par().sink); - PropagatorField &q = *env().template getObject(par().q); - SlicedPropagator *out = new SlicedPropagator(env().getDim(Tp)); - *out = sink(q); - env().setObject(getName(), out); + auto &sink = envGet(SinkFn, par().sink); + auto &q = envGet(PropagatorField, par().q); + auto &out = envGet(SlicedPropagator, getName()); + + out = sink(q); } END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 3fab41c0..6470c77f 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -128,12 +128,13 @@ void TPoint::setup(void) template void TPoint::execute(void) { - std::vector position = strToVec(par().position); - SitePropagator id; - LOG(Message) << "Creating point source at position [" << par().position - << "]" << std::endl; - PropagatorField &src = envGet(PropagatorField, getName()); + << "]" << std::endl; + + std::vector position = strToVec(par().position); + auto &src = envGet(PropagatorField, getName()); + SitePropagator id; + id = 1.; src = zero; pokeSite(id, src, position); diff --git a/extras/Hadrons/Modules/MSource/SeqConserved.hpp b/extras/Hadrons/Modules/MSource/SeqConserved.hpp index e8f91be1..9ccbee1b 100644 --- a/extras/Hadrons/Modules/MSource/SeqConserved.hpp +++ b/extras/Hadrons/Modules/MSource/SeqConserved.hpp @@ -82,6 +82,7 @@ public: virtual ~TSeqConserved(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -110,6 +111,14 @@ std::vector TSeqConserved::getInput(void) return in; } +template +std::vector TSeqConserved::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TSeqConserved::getOutput(void) { @@ -123,7 +132,7 @@ template void TSeqConserved::setup(void) { auto Ls_ = env().getObjectLs(par().action); - env().template registerLattice(getName(), Ls_); + envCreateLat(PropagatorField, getName(), Ls_); } // execution /////////////////////////////////////////////////////////////////// @@ -143,9 +152,9 @@ void TSeqConserved::execute(void) << par().mu << ") for " << par().tA << " <= t <= " << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - FMat &mat = *(env().template getObject(par().action)); + auto &src = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &mat = envGet(FMat, par().action); std::vector mom = strToVec(par().mom); mat.SeqConservedCurrent(q, src, par().curr_type, par().mu, diff --git a/extras/Hadrons/Modules/MSource/SeqGamma.hpp b/extras/Hadrons/Modules/MSource/SeqGamma.hpp index 8f67f8fa..d2b3c958 100644 --- a/extras/Hadrons/Modules/MSource/SeqGamma.hpp +++ b/extras/Hadrons/Modules/MSource/SeqGamma.hpp @@ -80,12 +80,16 @@ public: virtual ~TSeqGamma(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_, tName_; }; MODULE_REGISTER_NS(SeqGamma, TSeqGamma, MSource); @@ -97,6 +101,8 @@ MODULE_REGISTER_NS(SeqGamma, TSeqGamma, MSource); template TSeqGamma::TSeqGamma(const std::string name) : Module(name) +, momphName_ (name + "_momph") +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// @@ -108,6 +114,14 @@ std::vector TSeqGamma::getInput(void) return in; } +template +std::vector TSeqGamma::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TSeqGamma::getOutput(void) { @@ -120,7 +134,10 @@ std::vector TSeqGamma::getOutput(void) template void TSeqGamma::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envCacheLat(Lattice>, tName_); + envCacheLat(LatticeComplex, momphName_); + envTmpLat(LatticeComplex, "coor"); } // execution /////////////////////////////////////////////////////////////////// @@ -138,23 +155,29 @@ void TSeqGamma::execute(void) << " sequential source for " << par().tA << " <= t <= " << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - Lattice> t(env().getGrid()); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); - Gamma g(par().gamma); - std::vector p; - Complex i(0.0,1.0); + auto &src = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &ph = envGet(LatticeComplex, momphName_); + auto &t = envGet(Lattice>, tName_); + Gamma g(par().gamma); - p = strToVec(par().mom); - ph = zero; - for(unsigned int mu = 0; mu < env().getNd(); mu++) + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + p[mu]*coor*((1./(env().getGrid()->_fdimensions[mu]))); + Complex i(0.0,1.0); + std::vector p; + + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + LatticeCoordinate(t, Tp); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - LatticeCoordinate(t, Tp); src = where((t >= par().tA) and (t <= par().tB), ph*(g*q), 0.*q); } diff --git a/extras/Hadrons/Modules/MSource/Wall.hpp b/extras/Hadrons/Modules/MSource/Wall.hpp index 57dee06d..d9814d9e 100644 --- a/extras/Hadrons/Modules/MSource/Wall.hpp +++ b/extras/Hadrons/Modules/MSource/Wall.hpp @@ -72,12 +72,16 @@ public: virtual ~TWall(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_, tName_; }; MODULE_REGISTER_NS(Wall, TWall, MSource); @@ -89,17 +93,27 @@ MODULE_REGISTER_NS(Wall, TWall, MSource); template TWall::TWall(const std::string name) : Module(name) +, momphName_ (name + "_momph") +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// template std::vector TWall::getInput(void) { - std::vector in; + std::vector in = {}; return in; } +template +std::vector TWall::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TWall::getOutput(void) { @@ -112,7 +126,7 @@ std::vector TWall::getOutput(void) template void TWall::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// @@ -122,21 +136,28 @@ void TWall::execute(void) LOG(Message) << "Generating wall source at t = " << par().tW << " with momentum " << par().mom << std::endl; - PropagatorField &src = *env().template createLattice(getName()); - Lattice> t(env().getGrid()); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); - std::vector p; - Complex i(0.0,1.0); + auto &src = envGet(PropagatorField, getName()); + auto &ph = envGet(LatticeComplex, momphName_); + auto &t = envGet(Lattice>, tName_); - p = strToVec(par().mom); - ph = zero; - for(unsigned int mu = 0; mu < Nd; mu++) + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + p[mu]*coor*((1./(env().getGrid()->_fdimensions[mu]))); + Complex i(0.0,1.0); + std::vector p; + + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + LatticeCoordinate(t, Tp); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - LatticeCoordinate(t, Tp); + src = 1.; src = where((t == par().tW), src*ph, 0.*src); } diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index e2cc4f34..2e864ff0 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -75,12 +75,16 @@ public: virtual ~TZ2(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasT_{false}; + std::string tName_; }; MODULE_REGISTER_NS(Z2, TZ2, MSource); @@ -93,6 +97,7 @@ MODULE_REGISTER_NS(ScalarZ2, TZ2, MSource); template TZ2::TZ2(const std::string name) : Module(name) +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// @@ -104,6 +109,14 @@ std::vector TZ2::getInput(void) return in; } +template +std::vector TZ2::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TZ2::getOutput(void) { @@ -116,29 +129,36 @@ std::vector TZ2::getOutput(void) template void TZ2::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envCacheLat(Lattice>, tName_); + envTmpLat(LatticeComplex, "eta"); } // execution /////////////////////////////////////////////////////////////////// template void TZ2::execute(void) { - Lattice> t(env().getGrid()); - LatticeComplex eta(env().getGrid()); - Complex shift(1., 1.); - if (par().tA == par().tB) { LOG(Message) << "Generating Z_2 wall source at t= " << par().tA - << std::endl; + << std::endl; } else { LOG(Message) << "Generating Z_2 band for " << par().tA << " <= t <= " - << par().tB << std::endl; + << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - LatticeCoordinate(t, Tp); + + auto &src = envGet(PropagatorField, getName()); + auto &t = envGet(Lattice>, getName()); + Complex shift(1., 1.); + + if (!hasT_) + { + LatticeCoordinate(t, Tp); + hasT_ = true; + } + envGetTmp(LatticeComplex, eta); bernoulli(*env().get4dRng(), eta); eta = (2.*eta - shift)*(1./::sqrt(2.)); eta = where((t >= par().tA) and (t <= par().tB), eta, 0.*eta); diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index b0f2846f..081d2911 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -79,6 +79,7 @@ public: virtual ~TTestSeqConserved(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -107,6 +108,14 @@ std::vector TTestSeqConserved::getInput(void) return in; } +template +std::vector TTestSeqConserved::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TTestSeqConserved::getOutput(void) { @@ -124,36 +133,37 @@ void TTestSeqConserved::setup(void) { HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); } + envTmpLat(PropagatorField, "tmp"); + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// template void TTestSeqConserved::execute(void) { - PropagatorField tmp(env().getGrid()); - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &qSeq = *env().template getObject(par().qSeq); - FMat &act = *(env().template getObject(par().action)); - Gamma g5(Gamma::Algebra::Gamma5); - Gamma::Algebra gA = (par().curr == Current::Axial) ? - Gamma::Algebra::Gamma5 : - Gamma::Algebra::Identity; - Gamma g(gA); - SitePropagator qSite; - Complex test_S, test_V, check_S, check_V; - std::vector check_buf; - LatticeComplex c(env().getGrid()); - // Check sequential insertion of current gives same result as conserved // current sink upon contraction. Assume q uses a point source. - std::vector siteCoord; + + auto &q = envGet(PropagatorField, par().q); + auto &qSeq = envGet(PropagatorField, par().qSeq); + auto &act = envGet(FMat, par().action); + Gamma g5(Gamma::Algebra::Gamma5); + Gamma::Algebra gA = (par().curr == Current::Axial) ? + Gamma::Algebra::Gamma5 : + Gamma::Algebra::Identity; + Gamma g(gA); + SitePropagator qSite; + Complex test_S, test_V, check_S, check_V; + std::vector check_buf; + std::vector siteCoord; + + envGetTmp(PropagatorField, tmp); + envGetTmp(LatticeComplex, c); siteCoord = strToVec(par().origin); peekSite(qSite, qSeq, siteCoord); test_S = trace(qSite*g); test_V = trace(qSite*g*Gamma::gmu[par().mu]); - act.ContractConservedCurrent(q, q, tmp, par().curr, par().mu); - c = trace(tmp*g); sliceSum(c, check_buf, Tp); check_S = TensorRemove(check_buf[par().t_J]); diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp index 9736ab54..30bd4b69 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp @@ -63,6 +63,7 @@ public: virtual ~TTestSeqGamma(void) = default; // dependency relation virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -91,6 +92,14 @@ std::vector TTestSeqGamma::getInput(void) return in; } +template +std::vector TTestSeqGamma::getReference(void) +{ + std::vector ref = {}; + + return ref; +} + template std::vector TTestSeqGamma::getOutput(void) { @@ -103,26 +112,27 @@ std::vector TTestSeqGamma::getOutput(void) template void TTestSeqGamma::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// template void TTestSeqGamma::execute(void) { - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &qSeq = *env().template getObject(par().qSeq); - LatticeComplex c(env().getGrid()); - Gamma g5(Gamma::Algebra::Gamma5); - Gamma g(par().gamma); - SitePropagator qSite; - Complex test, check; + auto &q = envGet(PropagatorField, par().q); + auto &qSeq = envGet(PropagatorField, par().qSeq); + Gamma g5(Gamma::Algebra::Gamma5); + Gamma g(par().gamma); + SitePropagator qSite; + Complex test, check; std::vector check_buf; + std::vector siteCoord; // Check sequential insertion of gamma matrix gives same result as // insertion of gamma at sink upon contraction. Assume q uses a point // source. - std::vector siteCoord; + + envGetTmp(LatticeComplex, c); siteCoord = strToVec(par().origin); peekSite(qSite, qSeq, siteCoord); test = trace(g*qSite); diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 5ce2435f..2f4d183e 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -1,45 +1,38 @@ modules_cc =\ - Modules/MGauge/Unit.cc - # Modules/MContraction/WeakHamiltonianEye.cc \ - # Modules/MContraction/WeakHamiltonianNonEye.cc \ - # Modules/MContraction/WeakNeutral4ptDisc.cc \ - # Modules/MGauge/Load.cc \ - # Modules/MGauge/Random.cc \ - # Modules/MGauge/StochEm.cc \ - # Modules/MScalar/ChargedProp.cc \ - # Modules/MScalar/FreeProp.cc + Modules/MGauge/Unit.cc \ + Modules/MContraction/WeakHamiltonianEye.cc \ + Modules/MContraction/WeakHamiltonianNonEye.cc \ + Modules/MContraction/WeakNeutral4ptDisc.cc \ + Modules/MGauge/Load.cc \ + Modules/MGauge/Random.cc \ + Modules/MGauge/StochEm.cc modules_hpp =\ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ Modules/MSink/Point.hpp \ Modules/MSource/Point.hpp \ + Modules/MGauge/Load.hpp \ + Modules/MGauge/Random.hpp \ + Modules/MGauge/StochEm.hpp \ Modules/MGauge/Unit.hpp \ Modules/MSolver/RBPrecCG.hpp \ Modules/MFermion/GaugeProp.hpp \ - Modules/MContraction/Meson.hpp - - # Modules/MContraction/Baryon.hpp \ - # Modules/MContraction/DiscLoop.hpp \ - # Modules/MContraction/Gamma3pt.hpp \ - # Modules/MContraction/WardIdentity.hpp \ - # Modules/MContraction/WeakHamiltonian.hpp \ - # Modules/MContraction/WeakHamiltonianEye.hpp \ - # Modules/MContraction/WeakHamiltonianNonEye.hpp \ - # Modules/MContraction/WeakNeutral4ptDisc.hpp \ - # Modules/MFermion/GaugeProp.hpp \ - # Modules/MGauge/Load.hpp \ - # Modules/MGauge/Random.hpp \ - # Modules/MGauge/StochEm.hpp \ - # Modules/MLoop/NoiseLoop.hpp \ - # Modules/MScalar/ChargedProp.hpp \ - # Modules/MScalar/FreeProp.hpp \ - # Modules/MScalar/Scalar.hpp \ - # Modules/MSink/Smear.hpp \ - # Modules/MSolver/RBPrecCG.hpp \ - # Modules/MSource/SeqConserved.hpp \ - # Modules/MSource/SeqGamma.hpp \ - # Modules/MSource/Wall.hpp \ - # Modules/MSource/Z2.hpp \ - # Modules/MUtilities/TestSeqConserved.hpp \ - # Modules/MUtilities/TestSeqGamma.hpp + Modules/MContraction/Baryon.hpp \ + Modules/MContraction/DiscLoop.hpp \ + Modules/MContraction/Gamma3pt.hpp \ + Modules/MContraction/Meson.hpp \ + Modules/MContraction/WardIdentity.hpp \ + Modules/MContraction/WeakHamiltonian.hpp \ + Modules/MContraction/WeakHamiltonianEye.hpp \ + Modules/MContraction/WeakHamiltonianNonEye.hpp \ + Modules/MContraction/WeakNeutral4ptDisc.hpp \ + Modules/MLoop/NoiseLoop.hpp \ + Modules/MSink/Smear.hpp \ + Modules/MSolver/RBPrecCG.hpp \ + Modules/MSource/SeqConserved.hpp \ + Modules/MSource/SeqGamma.hpp \ + Modules/MSource/Wall.hpp \ + Modules/MSource/Z2.hpp \ + Modules/MUtilities/TestSeqConserved.hpp \ + Modules/MUtilities/TestSeqGamma.hpp diff --git a/tests/hadrons/Test_hadrons.hpp b/tests/hadrons/Test_hadrons.hpp index 9bd3ee0a..0265f5a6 100644 --- a/tests/hadrons/Test_hadrons.hpp +++ b/tests/hadrons/Test_hadrons.hpp @@ -118,7 +118,7 @@ inline void makeWilsonAction(Application &application, std::string actionName, std::string &gaugeField, double mass, std::string boundary = "1 1 1 -1") { - if (!(Environment::getInstance().hasModule(actionName))) + if (!(VirtualMachine::getInstance().hasModule(actionName))) { MAction::Wilson::Par actionPar; actionPar.gauge = gaugeField; @@ -144,7 +144,7 @@ inline void makeDWFAction(Application &application, std::string actionName, std::string &gaugeField, double mass, double M5, unsigned int Ls, std::string boundary = "1 1 1 -1") { - if (!(Environment::getInstance().hasModule(actionName))) + if (!(VirtualMachine::getInstance().hasModule(actionName))) { MAction::DWF::Par actionPar; actionPar.gauge = gaugeField; @@ -173,7 +173,7 @@ inline void makeDWFAction(Application &application, std::string actionName, inline void makeRBPrecCGSolver(Application &application, std::string &solverName, std::string &actionName, double residual = 1e-8) { - if (!(Environment::getInstance().hasModule(solverName))) + if (!(VirtualMachine::getInstance().hasModule(solverName))) { MSolver::RBPrecCG::Par solverPar; solverPar.action = actionName; @@ -195,7 +195,7 @@ inline void makePointSource(Application &application, std::string srcName, std::string pos) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Point::Par pointPar; pointPar.position = pos; @@ -219,7 +219,7 @@ inline void makeSequentialSource(Application &application, std::string srcName, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::SeqGamma::Par seqPar; seqPar.q = qSrc; @@ -255,7 +255,7 @@ inline void makeConservedSequentialSource(Application &application, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::SeqConserved::Par seqPar; seqPar.q = qSrc; @@ -280,7 +280,7 @@ inline void makeConservedSequentialSource(Application &application, inline void makeNoiseSource(Application &application, std::string &srcName, unsigned int tA, unsigned int tB) { - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Z2::Par noisePar; noisePar.tA = tA; @@ -302,7 +302,7 @@ inline void makeWallSource(Application &application, std::string &srcName, unsigned int tW, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Wall::Par wallPar; wallPar.tW = tW; @@ -324,7 +324,7 @@ inline void makePointSink(Application &application, std::string &sinkFnct, std::string mom = ZERO_MOM) { // If the sink function already exists, don't make it again. - if (!(Environment::getInstance().hasModule(sinkFnct))) + if (!(VirtualMachine::getInstance().hasModule(sinkFnct))) { MSink::Point::Par pointPar; pointPar.mom = mom; @@ -345,7 +345,7 @@ inline void sinkSmear(Application &application, std::string &sinkFnct, std::string &propName, std::string &smearedProp) { // If the propagator has already been smeared, don't smear it again. - if (!(Environment::getInstance().hasModule(smearedProp))) + if (!(VirtualMachine::getInstance().hasModule(smearedProp))) { MSink::Smear::Par smearPar; smearPar.q = propName; @@ -367,7 +367,7 @@ inline void makePropagator(Application &application, std::string &propName, std::string &srcName, std::string &solver) { // If the propagator already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(propName))) + if (!(VirtualMachine::getInstance().hasModule(propName))) { MFermion::GaugeProp::Par quarkPar; quarkPar.source = srcName; @@ -390,7 +390,7 @@ inline void makeLoop(Application &application, std::string &propName, std::string &srcName, std::string &resName) { // If the loop propagator already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(propName))) + if (!(VirtualMachine::getInstance().hasModule(propName))) { MLoop::NoiseLoop::Par loopPar; loopPar.q = resName; @@ -421,7 +421,7 @@ inline void mesonContraction(Application &application, std::string &sink, std::string gammas = "") { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::Meson::Par mesPar; mesPar.output = output; @@ -453,7 +453,7 @@ inline void gamma3ptContraction(Application &application, unsigned int npt, Gamma::Algebra gamma = Gamma::Algebra::Identity) { std::string modName = std::to_string(npt) + "pt_" + label; - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::Gamma3pt::Par gamma3ptPar; gamma3ptPar.output = std::to_string(npt) + "pt/" + label; @@ -487,7 +487,7 @@ inline void weakContraction##top(Application &application, unsigned int npt,\ std::string &label, unsigned int tSnk = 0)\ {\ std::string modName = std::to_string(npt) + "pt_" + label;\ - if (!(Environment::getInstance().hasModule(modName)))\ + if (!(VirtualMachine::getInstance().hasModule(modName)))\ {\ MContraction::WeakHamiltonian##top::Par weakPar;\ weakPar.output = std::to_string(npt) + "pt/" + label;\ @@ -521,7 +521,7 @@ inline void disc0Contraction(Application &application, std::string &label) { std::string modName = "4pt_" + label; - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::WeakNeutral4ptDisc::Par disc0Par; disc0Par.output = "4pt/" + label; @@ -547,7 +547,7 @@ inline void discLoopContraction(Application &application, std::string &q_loop, std::string &modName, Gamma::Algebra gamma = Gamma::Algebra::Identity) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::DiscLoop::Par discPar; discPar.output = "disc/" + modName; @@ -574,7 +574,7 @@ inline void makeWITest(Application &application, std::string &modName, std::string &propName, std::string &actionName, double mass, unsigned int Ls = 1, bool test_axial = false) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::WardIdentity::Par wiPar; if (Ls > 1) @@ -613,7 +613,7 @@ inline void makeSeqCurrComparison(Application &application, std::string &modName std::string &actionName, std::string &origin, unsigned int t_J, unsigned int mu, Current curr) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MUtilities::TestSeqConserved::Par seqPar; seqPar.q = propName; @@ -646,7 +646,7 @@ inline void makeSeqGamComparison(Application &application, std::string &modName, std::string &origin, Gamma::Algebra gamma, unsigned int t_g) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MUtilities::TestSeqGamma::Par seqPar; seqPar.q = propName; From 591a38c487acc0f0abc7dd099e09f26f661f913c Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Thu, 14 Dec 2017 19:42:16 +0000 Subject: [PATCH 114/145] Hadrons: VM fixes --- extras/Hadrons/Modules/MSource/Z2.hpp | 2 +- extras/Hadrons/VirtualMachine.cc | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index 2e864ff0..39840319 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -150,7 +150,7 @@ void TZ2::execute(void) } auto &src = envGet(PropagatorField, getName()); - auto &t = envGet(Lattice>, getName()); + auto &t = envGet(Lattice>, tName_); Complex shift(1., 1.); if (!hasT_) diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 8a6bd149..8b2ea516 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -83,6 +83,24 @@ void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) } m.input.push_back(env().getObjectAddress(ref)); } + auto inCopy = m.input; + // if module has inputs with references, they need to be added as + // an input + for (auto &in: inCopy) + { + int inm = env().getObjectModule(in); + + if (inm > 0) + { + if (getModule(inm)->getReference().size() > 0) + { + for (auto &rin: getModule(inm)->getReference()) + { + m.input.push_back(env().getObjectAddress(rin)); + } + } + } + } module_.push_back(std::move(m)); address = static_cast(module_.size() - 1); moduleAddress_[name] = address; From bcf6f3890c38420eba6449f5190fc688b006fbf7 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Thu, 14 Dec 2017 21:14:10 +0000 Subject: [PATCH 115/145] Hadrons: more fixes after test --- extras/Hadrons/Modules/MContraction/Meson.hpp | 12 ++++++++++-- extras/Hadrons/VirtualMachine.cc | 4 ++-- extras/Hadrons/VirtualMachine.hpp | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 1fd86d3a..3b250a3b 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -99,6 +99,8 @@ public: virtual std::vector getOutput(void); virtual void parseGammaString(std::vector &gammaList); protected: + // execution + virtual void setup(void); // execution virtual void execute(void); }; @@ -160,8 +162,14 @@ void TMeson::parseGammaString(std::vector &gammaList) { // Parse individual contractions from input string. gammaList = strToVec(par().gammas); - } - envTmpLat(LatticeComplex, "c"); + } +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TMeson::setup(void) +{ + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index 8b2ea516..e0035bc1 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -579,8 +579,8 @@ void VirtualMachine::executeProgram(const Program &p) const { // execute module LOG(Message) << SEP << " Measurement step " << i + 1 << "/" - << p.size() << " (module '" << module_[p[i]].name - << "') " << SEP << std::endl; + << p.size() << " (module '" << module_[p[i]].name + << "') " << SEP << std::endl; (*module_[p[i]].data)(); sizeBefore = env().getTotalSize(); // print used memory after execution diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index a411c108..3af7d914 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -59,7 +59,7 @@ public: { Size size; Environment::Storage storage; - unsigned int module; + int module; }; struct MemoryProfile { From e2fe97277bc0dcc65700645f16c547fe3d6b429e Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 19 Dec 2017 20:28:04 +0000 Subject: [PATCH 116/145] Hadrons: getReference use is rare, empty by default --- extras/Hadrons/Module.hpp | 5 ++++- extras/Hadrons/Modules/MAction/DWF.hpp | 9 --------- extras/Hadrons/Modules/MAction/Wilson.hpp | 9 --------- extras/Hadrons/Modules/MContraction/Baryon.hpp | 9 --------- extras/Hadrons/Modules/MContraction/DiscLoop.hpp | 9 --------- extras/Hadrons/Modules/MContraction/Gamma3pt.hpp | 9 --------- extras/Hadrons/Modules/MContraction/Meson.hpp | 9 --------- extras/Hadrons/Modules/MContraction/WardIdentity.hpp | 9 --------- .../Hadrons/Modules/MContraction/WeakHamiltonian.hpp | 1 - .../Hadrons/Modules/MContraction/WeakHamiltonianEye.cc | 7 ------- .../Modules/MContraction/WeakHamiltonianNonEye.cc | 6 ------ .../Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc | 7 ------- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 9 --------- extras/Hadrons/Modules/MGauge/Load.cc | 7 ------- extras/Hadrons/Modules/MGauge/Load.hpp | 1 - extras/Hadrons/Modules/MGauge/Random.cc | 7 ------- extras/Hadrons/Modules/MGauge/Random.hpp | 1 - extras/Hadrons/Modules/MGauge/StochEm.cc | 7 ------- extras/Hadrons/Modules/MGauge/StochEm.hpp | 1 - extras/Hadrons/Modules/MGauge/Unit.cc | 7 ------- extras/Hadrons/Modules/MGauge/Unit.hpp | 1 - extras/Hadrons/Modules/MLoop/NoiseLoop.hpp | 10 ---------- extras/Hadrons/Modules/MSink/Point.hpp | 9 --------- extras/Hadrons/Modules/MSink/Smear.hpp | 9 --------- extras/Hadrons/Modules/MSource/Point.hpp | 9 --------- extras/Hadrons/Modules/MSource/SeqConserved.hpp | 9 --------- extras/Hadrons/Modules/MSource/SeqGamma.hpp | 9 --------- extras/Hadrons/Modules/MSource/Wall.hpp | 9 --------- extras/Hadrons/Modules/MSource/Z2.hpp | 9 --------- extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp | 9 --------- extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp | 9 --------- extras/Hadrons/Modules/templates/Module.cc.template | 8 -------- extras/Hadrons/Modules/templates/Module.hpp.template | 1 - .../Hadrons/Modules/templates/Module_in_NS.cc.template | 8 -------- .../Modules/templates/Module_in_NS.hpp.template | 1 - .../Hadrons/Modules/templates/Module_tmp.hpp.template | 1 - .../Modules/templates/Module_tmp_in_NS.hpp.template | 9 --------- 37 files changed, 4 insertions(+), 245 deletions(-) diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 390573d8..b71f779d 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -155,7 +155,10 @@ public: virtual std::string getRegisteredName(void); // dependencies/products virtual std::vector getInput(void) = 0; - virtual std::vector getReference(void) = 0; + virtual std::vector getReference(void) + { + return std::vector(0); + }; virtual std::vector getOutput(void) = 0; // parse parameters virtual void parseParameters(XmlReader &reader, const std::string name) = 0; diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 0cb9a4cb..d99f1165 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -64,7 +64,6 @@ public: virtual ~TDWF(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -93,14 +92,6 @@ std::vector TDWF::getInput(void) return in; } -template -std::vector TDWF::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TDWF::getOutput(void) { diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index a6b3f0d6..8ef755bb 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -62,7 +62,6 @@ public: virtual ~TWilson(void) = default; // dependencies/products virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -91,14 +90,6 @@ std::vector TWilson::getInput(void) return in; } -template -std::vector TWilson::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TWilson::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index 28f6aa51..1ef2e257 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -71,7 +71,6 @@ public: virtual ~TBaryon(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -100,14 +99,6 @@ std::vector TBaryon::getInput(void) return input; } -template -std::vector TBaryon::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TBaryon::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index c0fbe296..ef50061c 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -67,7 +67,6 @@ public: virtual ~TDiscLoop(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -96,14 +95,6 @@ std::vector TDiscLoop::getInput(void) return in; } -template -std::vector TDiscLoop::getReference(void) -{ - std::vector out = {}; - - return out; -} - template std::vector TDiscLoop::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index 4a6baf3e..fb9a9d4b 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -98,7 +98,6 @@ public: virtual ~TGamma3pt(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -127,14 +126,6 @@ std::vector TGamma3pt::getInput(void) return in; } -template -std::vector TGamma3pt::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TGamma3pt::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 3b250a3b..46bbdb2e 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -95,7 +95,6 @@ public: virtual ~TMeson(void) = default; // dependencies/products virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); virtual void parseGammaString(std::vector &gammaList); protected: @@ -125,14 +124,6 @@ std::vector TMeson::getInput(void) return input; } -template -std::vector TMeson::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TMeson::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index c92c7243..556450a8 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -73,7 +73,6 @@ public: virtual ~TWardIdentity(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -104,14 +103,6 @@ std::vector TWardIdentity::getInput(void) return in; } -template -std::vector TWardIdentity::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TWardIdentity::getOutput(void) { diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp index 2b53c87a..7df40370 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp @@ -99,7 +99,6 @@ public:\ virtual ~T##modname(void) = default;\ /* dependency relation */ \ virtual std::vector getInput(void);\ - virtual std::vector getReference(void);\ virtual std::vector getOutput(void);\ public:\ std::vector VA_label = {"V", "A"};\ diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc index 7a73a7e3..43dfa609 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc @@ -74,13 +74,6 @@ std::vector TWeakHamiltonianEye::getInput(void) return in; } -std::vector TWeakHamiltonianEye::getReference(void) -{ - std::vector out = {}; - - return out; -} - std::vector TWeakHamiltonianEye::getOutput(void) { std::vector out = {}; diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc index c333713d..8a7113e3 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc @@ -74,12 +74,6 @@ std::vector TWeakHamiltonianNonEye::getInput(void) return in; } -std::vector TWeakHamiltonianNonEye::getReference(void) -{ - std::vector out = {}; - - return out; -} std::vector TWeakHamiltonianNonEye::getOutput(void) { std::vector out = {}; diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc index e0f07f6c..18423f3e 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc @@ -76,13 +76,6 @@ std::vector TWeakNeutral4ptDisc::getInput(void) return in; } -std::vector TWeakNeutral4ptDisc::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - std::vector TWeakNeutral4ptDisc::getOutput(void) { std::vector out = {}; diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index e77df287..05b3d17a 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -84,7 +84,6 @@ public: virtual ~TGaugeProp(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -116,14 +115,6 @@ std::vector TGaugeProp::getInput(void) return in; } -template -std::vector TGaugeProp::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TGaugeProp::getOutput(void) { diff --git a/extras/Hadrons/Modules/MGauge/Load.cc b/extras/Hadrons/Modules/MGauge/Load.cc index c2fd49de..b168a010 100644 --- a/extras/Hadrons/Modules/MGauge/Load.cc +++ b/extras/Hadrons/Modules/MGauge/Load.cc @@ -49,13 +49,6 @@ std::vector TLoad::getInput(void) return in; } -std::vector TLoad::getReference(void) -{ - std::vector ref; - - return ref; -} - std::vector TLoad::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/MGauge/Load.hpp b/extras/Hadrons/Modules/MGauge/Load.hpp index a967d714..a338af79 100644 --- a/extras/Hadrons/Modules/MGauge/Load.hpp +++ b/extras/Hadrons/Modules/MGauge/Load.hpp @@ -57,7 +57,6 @@ public: virtual ~TLoad(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/Random.cc b/extras/Hadrons/Modules/MGauge/Random.cc index fdb0d145..97afd338 100644 --- a/extras/Hadrons/Modules/MGauge/Random.cc +++ b/extras/Hadrons/Modules/MGauge/Random.cc @@ -49,13 +49,6 @@ std::vector TRandom::getInput(void) return in; } -std::vector TRandom::getReference(void) -{ - std::vector ref; - - return ref; -} - std::vector TRandom::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/MGauge/Random.hpp b/extras/Hadrons/Modules/MGauge/Random.hpp index 30525113..a07130e4 100644 --- a/extras/Hadrons/Modules/MGauge/Random.hpp +++ b/extras/Hadrons/Modules/MGauge/Random.hpp @@ -50,7 +50,6 @@ public: virtual ~TRandom(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/StochEm.cc b/extras/Hadrons/Modules/MGauge/StochEm.cc index a878ae2f..c5318573 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.cc +++ b/extras/Hadrons/Modules/MGauge/StochEm.cc @@ -47,13 +47,6 @@ std::vector TStochEm::getInput(void) return in; } -std::vector TStochEm::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - std::vector TStochEm::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/MGauge/StochEm.hpp b/extras/Hadrons/Modules/MGauge/StochEm.hpp index efc2e39b..bacb5172 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.hpp +++ b/extras/Hadrons/Modules/MGauge/StochEm.hpp @@ -59,7 +59,6 @@ public: virtual ~TStochEm(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index af31f124..8bee1ecc 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -47,13 +47,6 @@ std::vector TUnit::getInput(void) return std::vector(); } -std::vector TUnit::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - std::vector TUnit::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/MGauge/Unit.hpp b/extras/Hadrons/Modules/MGauge/Unit.hpp index 4b69f0ce..c1650cc7 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.hpp +++ b/extras/Hadrons/Modules/MGauge/Unit.hpp @@ -50,7 +50,6 @@ public: virtual ~TUnit(void) = default; // dependencies/products virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup diff --git a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp index 0feb5efb..512c731a 100644 --- a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp +++ b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp @@ -73,7 +73,6 @@ public: virtual ~TNoiseLoop(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -102,15 +101,6 @@ std::vector TNoiseLoop::getInput(void) return in; } - -template -std::vector TNoiseLoop::getReference(void) -{ - std::vector out = {}; - - return out; -} - template std::vector TNoiseLoop::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 42cae4f6..43be3009 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -60,7 +60,6 @@ public: virtual ~TPoint(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -94,14 +93,6 @@ std::vector TPoint::getInput(void) return in; } -template -std::vector TPoint::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TPoint::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSink/Smear.hpp b/extras/Hadrons/Modules/MSink/Smear.hpp index 03cc861a..e85ab263 100644 --- a/extras/Hadrons/Modules/MSink/Smear.hpp +++ b/extras/Hadrons/Modules/MSink/Smear.hpp @@ -61,7 +61,6 @@ public: virtual ~TSmear(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -90,14 +89,6 @@ std::vector TSmear::getInput(void) return in; } -template -std::vector TSmear::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TSmear::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 6470c77f..1d8241cf 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -71,7 +71,6 @@ public: virtual ~TPoint(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -101,14 +100,6 @@ std::vector TPoint::getInput(void) return in; } -template -std::vector TPoint::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TPoint::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSource/SeqConserved.hpp b/extras/Hadrons/Modules/MSource/SeqConserved.hpp index 9ccbee1b..3e8ef457 100644 --- a/extras/Hadrons/Modules/MSource/SeqConserved.hpp +++ b/extras/Hadrons/Modules/MSource/SeqConserved.hpp @@ -82,7 +82,6 @@ public: virtual ~TSeqConserved(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -111,14 +110,6 @@ std::vector TSeqConserved::getInput(void) return in; } -template -std::vector TSeqConserved::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TSeqConserved::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSource/SeqGamma.hpp b/extras/Hadrons/Modules/MSource/SeqGamma.hpp index d2b3c958..abad5ace 100644 --- a/extras/Hadrons/Modules/MSource/SeqGamma.hpp +++ b/extras/Hadrons/Modules/MSource/SeqGamma.hpp @@ -80,7 +80,6 @@ public: virtual ~TSeqGamma(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -114,14 +113,6 @@ std::vector TSeqGamma::getInput(void) return in; } -template -std::vector TSeqGamma::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TSeqGamma::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSource/Wall.hpp b/extras/Hadrons/Modules/MSource/Wall.hpp index d9814d9e..9d5f1f46 100644 --- a/extras/Hadrons/Modules/MSource/Wall.hpp +++ b/extras/Hadrons/Modules/MSource/Wall.hpp @@ -72,7 +72,6 @@ public: virtual ~TWall(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -106,14 +105,6 @@ std::vector TWall::getInput(void) return in; } -template -std::vector TWall::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TWall::getOutput(void) { diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index 39840319..3593cb34 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -75,7 +75,6 @@ public: virtual ~TZ2(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -109,14 +108,6 @@ std::vector TZ2::getInput(void) return in; } -template -std::vector TZ2::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TZ2::getOutput(void) { diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index 081d2911..0647884c 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -79,7 +79,6 @@ public: virtual ~TTestSeqConserved(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -108,14 +107,6 @@ std::vector TTestSeqConserved::getInput(void) return in; } -template -std::vector TTestSeqConserved::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TTestSeqConserved::getOutput(void) { diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp index 30bd4b69..fd53eab8 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp @@ -63,7 +63,6 @@ public: virtual ~TTestSeqGamma(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); protected: // setup @@ -92,14 +91,6 @@ std::vector TTestSeqGamma::getInput(void) return in; } -template -std::vector TTestSeqGamma::getReference(void) -{ - std::vector ref = {}; - - return ref; -} - template std::vector TTestSeqGamma::getOutput(void) { diff --git a/extras/Hadrons/Modules/templates/Module.cc.template b/extras/Hadrons/Modules/templates/Module.cc.template index 29edadfb..0c509d6d 100644 --- a/extras/Hadrons/Modules/templates/Module.cc.template +++ b/extras/Hadrons/Modules/templates/Module.cc.template @@ -19,14 +19,6 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } -template -std::vector T___FILEBASENAME___::getReference(void) -{ - std::vector in = {}; - - return in; -} - std::vector T___FILEBASENAME___::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/templates/Module.hpp.template b/extras/Hadrons/Modules/templates/Module.hpp.template index b59e168f..fb43260f 100644 --- a/extras/Hadrons/Modules/templates/Module.hpp.template +++ b/extras/Hadrons/Modules/templates/Module.hpp.template @@ -26,7 +26,6 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_in_NS.cc.template b/extras/Hadrons/Modules/templates/Module_in_NS.cc.template index 880129bd..8b2a0ec0 100644 --- a/extras/Hadrons/Modules/templates/Module_in_NS.cc.template +++ b/extras/Hadrons/Modules/templates/Module_in_NS.cc.template @@ -20,14 +20,6 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } -template -std::vector T___FILEBASENAME___::getReference(void) -{ - std::vector in = {}; - - return in; -} - std::vector T___FILEBASENAME___::getOutput(void) { std::vector out = {getName()}; diff --git a/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template b/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template index f90cb052..ea77b12a 100644 --- a/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_in_NS.hpp.template @@ -28,7 +28,6 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_tmp.hpp.template b/extras/Hadrons/Modules/templates/Module_tmp.hpp.template index b4e7f87f..2ee053a9 100644 --- a/extras/Hadrons/Modules/templates/Module_tmp.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_tmp.hpp.template @@ -27,7 +27,6 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); diff --git a/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template b/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template index 9aef1c92..b79c0ad3 100644 --- a/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template +++ b/extras/Hadrons/Modules/templates/Module_tmp_in_NS.hpp.template @@ -29,7 +29,6 @@ public: virtual ~T___FILEBASENAME___(void) = default; // dependency relation virtual std::vector getInput(void); - virtual std::vector getReference(void); virtual std::vector getOutput(void); // setup virtual void setup(void); @@ -57,14 +56,6 @@ std::vector T___FILEBASENAME___::getInput(void) return in; } -template -std::vector T___FILEBASENAME___::getReference(void) -{ - std::vector in = {}; - - return in; -} - template std::vector T___FILEBASENAME___::getOutput(void) { From 65d4f17976ec7920aeccb880a50ad852e7fe7290 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 19 Dec 2017 20:28:32 +0000 Subject: [PATCH 117/145] Hadrons: no errors when trying to recreate a cache --- extras/Hadrons/Environment.hpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index adea13ce..7f1bc26d 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -228,7 +228,11 @@ void Environment::createDerivedObject(const std::string name, MemoryProfiler::stats = nullptr; } } - else + // object already exists, no error if it is a cache, error otherwise + else if ((object_[address].storage != Storage::cache) or + (object_[address].storage != storage) or + (object_[address].name != name) or + (object_[address].type != &typeid(T))) { HADRON_ERROR(Definition, "object '" + name + "' already allocated"); } From 67c3fa0f5f2adda473b7543121a7ad6041547259 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Thu, 21 Dec 2017 11:39:07 +0000 Subject: [PATCH 118/145] Hadrons: all modules are now ported, more tests need to be done --- extras/Hadrons/Modules.hpp | 40 ++--- extras/Hadrons/Modules/MScalar/ChargedProp.cc | 153 +++++++++--------- .../Hadrons/Modules/MScalar/ChargedProp.hpp | 6 +- extras/Hadrons/Modules/MScalar/FreeProp.cc | 27 ++-- extras/Hadrons/Modules/MScalar/FreeProp.hpp | 1 + extras/Hadrons/make_module_list.sh | 30 ++++ extras/Hadrons/modules.inc | 49 +++--- 7 files changed, 165 insertions(+), 141 deletions(-) diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 61a20058..cf381d0f 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -28,33 +28,33 @@ See the full license in the file "LICENSE" in the top level distribution directo *************************************************************************************/ /* END LEGAL */ -#include -#include #include -#include -#include #include -#include #include -#include #include +#include #include +#include +#include +#include #include -#include -#include -#include -#include -#include -// #include -// #include -// #include -#include -#include -#include -#include -#include #include +#include #include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.cc b/extras/Hadrons/Modules/MScalar/ChargedProp.cc index cd8dc244..6cb75a28 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.cc +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.cc @@ -37,90 +37,44 @@ void TChargedProp::setup(void) { phaseName_.push_back("_shiftphase_" + std::to_string(mu)); } - GFSrcName_ = "_" + getName() + "_DinvSrc"; - if (!env().hasRegisteredObject(freeMomPropName_)) + GFSrcName_ = getName() + "_DinvSrc"; + fftName_ = getName() + "_fft"; + + freeMomPropDone_ = env().hasCreatedObject(freeMomPropName_); + GFSrcDone_ = env().hasCreatedObject(GFSrcName_); + phasesDone_ = env().hasCreatedObject(phaseName_[0]); + envCacheLat(ScalarField, freeMomPropName_); + for (unsigned int mu = 0; mu < env().getNd(); ++mu) { - env().registerLattice(freeMomPropName_); + envCacheLat(ScalarField, phaseName_[mu]); } - if (!env().hasRegisteredObject(phaseName_[0])) - { - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - env().registerLattice(phaseName_[mu]); - } - } - if (!env().hasRegisteredObject(GFSrcName_)) - { - env().registerLattice(GFSrcName_); - } - env().registerLattice(getName()); + envCacheLat(ScalarField, GFSrcName_); + envCreateLat(ScalarField, getName()); + envTmpLat(ScalarField, "buf"); + envTmpLat(ScalarField, "result"); + envTmpLat(ScalarField, "Amu"); + envCache(FFT, fftName_, 1, env().getGrid()); } // execution /////////////////////////////////////////////////////////////////// void TChargedProp::execute(void) { // CACHING ANALYTIC EXPRESSIONS - ScalarField &source = *env().getObject(par().source); - Complex ci(0.0,1.0); - FFT fft(env().getGrid()); - - // cache free scalar propagator - if (!env().hasCreatedObject(freeMomPropName_)) - { - LOG(Message) << "Caching momentum space free scalar propagator" - << " (mass= " << par().mass << ")..." << std::endl; - freeMomProp_ = env().createLattice(freeMomPropName_); - SIMPL::MomentumSpacePropagator(*freeMomProp_, par().mass); - } - else - { - freeMomProp_ = env().getObject(freeMomPropName_); - } - // cache G*F*src - if (!env().hasCreatedObject(GFSrcName_)) - - { - GFSrc_ = env().createLattice(GFSrcName_); - fft.FFT_all_dim(*GFSrc_, source, FFT::forward); - *GFSrc_ = (*freeMomProp_)*(*GFSrc_); - } - else - { - GFSrc_ = env().getObject(GFSrcName_); - } - // cache phases - if (!env().hasCreatedObject(phaseName_[0])) - { - std::vector &l = env().getGrid()->_fdimensions; - - LOG(Message) << "Caching shift phases..." << std::endl; - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - Real twoPiL = M_PI*2./l[mu]; - - phase_.push_back(env().createLattice(phaseName_[mu])); - LatticeCoordinate(*(phase_[mu]), mu); - *(phase_[mu]) = exp(ci*twoPiL*(*(phase_[mu]))); - } - } - else - { - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - phase_.push_back(env().getObject(phaseName_[mu])); - } - } + makeCaches(); // PROPAGATOR CALCULATION LOG(Message) << "Computing charged scalar propagator" << " (mass= " << par().mass << ", charge= " << par().charge << ")..." << std::endl; - ScalarField &prop = *env().createLattice(getName()); - ScalarField buf(env().getGrid()); - ScalarField &GFSrc = *GFSrc_, &G = *freeMomProp_; - double q = par().charge; - + auto &prop = envGet(ScalarField, getName()); + auto &GFSrc = envGet(ScalarField, GFSrcName_); + auto &G = envGet(ScalarField, freeMomPropName_); + auto &fft = envGet(FFT, fftName_); + double q = par().charge; + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, buf); + // G*F*Src prop = GFSrc; @@ -146,7 +100,7 @@ void TChargedProp::execute(void) if (!par().output.empty()) { std::string filename = par().output + "." + - std::to_string(env().getTrajectory()); + std::to_string(vm().getTrajectory()); LOG(Message) << "Saving zero-momentum projection to '" << filename << "'..." << std::endl; @@ -166,15 +120,55 @@ void TChargedProp::execute(void) } } +void TChargedProp::makeCaches(void) +{ + auto &freeMomProp = envGet(ScalarField, freeMomPropName_); + auto &GFSrc = envGet(ScalarField, GFSrcName_); + auto &fft = envGet(FFT, fftName_); + + if (!freeMomPropDone_) + { + LOG(Message) << "Caching momentum space free scalar propagator" + << " (mass= " << par().mass << ")..." << std::endl; + SIMPL::MomentumSpacePropagator(freeMomProp, par().mass); + } + if (!GFSrcDone_) + { + FFT fft(env().getGrid()); + auto &source = envGet(ScalarField, par().source); + + LOG(Message) << "Caching G*F*src..." << std::endl; + fft.FFT_all_dim(GFSrc, source, FFT::forward); + GFSrc = freeMomProp*GFSrc; + } + if (!phasesDone_) + { + std::vector &l = env().getGrid()->_fdimensions; + Complex ci(0.0,1.0); + + LOG(Message) << "Caching shift phases..." << std::endl; + for (unsigned int mu = 0; mu < env().getNd(); ++mu) + { + Real twoPiL = M_PI*2./l[mu]; + auto &phmu = envGet(ScalarField, phaseName_[mu]); + + LatticeCoordinate(phmu, mu); + phmu = exp(ci*twoPiL*phmu); + phase_.push_back(&phmu); + } + } +} + void TChargedProp::momD1(ScalarField &s, FFT &fft) { - EmField &A = *env().getObject(par().emField); - ScalarField buf(env().getGrid()), result(env().getGrid()), - Amu(env().getGrid()); + auto &A = envGet(EmField, par().emField); Complex ci(0.0,1.0); - result = zero; + envGetTmp(ScalarField, buf); + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, Amu); + result = zero; for (unsigned int mu = 0; mu < env().getNd(); ++mu) { Amu = peekLorentz(A, mu); @@ -198,12 +192,13 @@ void TChargedProp::momD1(ScalarField &s, FFT &fft) void TChargedProp::momD2(ScalarField &s, FFT &fft) { - EmField &A = *env().getObject(par().emField); - ScalarField buf(env().getGrid()), result(env().getGrid()), - Amu(env().getGrid()); + auto &A = envGet(EmField, par().emField); + + envGetTmp(ScalarField, buf); + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, Amu); result = zero; - for (unsigned int mu = 0; mu < env().getNd(); ++mu) { Amu = peekLorentz(A, mu); diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp index ab6a0184..cfcce28e 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp @@ -43,14 +43,14 @@ protected: // execution virtual void execute(void); private: + void makeCaches(void); void momD1(ScalarField &s, FFT &fft); void momD2(ScalarField &s, FFT &fft); private: - std::string freeMomPropName_, GFSrcName_; + bool freeMomPropDone_, GFSrcDone_, phasesDone_; + std::string freeMomPropName_, GFSrcName_, fftName_; std::vector phaseName_; - ScalarField *freeMomProp_, *GFSrc_; std::vector phase_; - EmField *A; }; MODULE_REGISTER_NS(ChargedProp, TChargedProp, MScalar); diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.cc b/extras/Hadrons/Modules/MScalar/FreeProp.cc index 674867e3..924db288 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.cc +++ b/extras/Hadrons/Modules/MScalar/FreeProp.cc @@ -33,38 +33,31 @@ void TFreeProp::setup(void) { freeMomPropName_ = FREEMOMPROP(par().mass); - if (!env().hasRegisteredObject(freeMomPropName_)) - { - env().registerLattice(freeMomPropName_); - } - env().registerLattice(getName()); + freePropDone_ = env().hasCreatedObject(freeMomPropName_); + envCacheLat(ScalarField, freeMomPropName_); + envCreateLat(ScalarField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TFreeProp::execute(void) { - ScalarField &prop = *env().createLattice(getName()); - ScalarField &source = *env().getObject(par().source); - ScalarField *freeMomProp; + auto &freeMomProp = envGet(ScalarField, freeMomPropName_); + auto &prop = envGet(ScalarField, getName()); + auto &source = envGet(ScalarField, par().source); - if (!env().hasCreatedObject(freeMomPropName_)) + if (!freePropDone_) { LOG(Message) << "Caching momentum space free scalar propagator" << " (mass= " << par().mass << ")..." << std::endl; - freeMomProp = env().createLattice(freeMomPropName_); - SIMPL::MomentumSpacePropagator(*freeMomProp, par().mass); - } - else - { - freeMomProp = env().getObject(freeMomPropName_); + SIMPL::MomentumSpacePropagator(freeMomProp, par().mass); } LOG(Message) << "Computing free scalar propagator..." << std::endl; - SIMPL::FreePropagator(source, prop, *freeMomProp); + SIMPL::FreePropagator(source, prop, freeMomProp); if (!par().output.empty()) { TextWriter writer(par().output + "." + - std::to_string(env().getTrajectory())); + std::to_string(vm().getTrajectory())); std::vector buf; std::vector result; diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.hpp b/extras/Hadrons/Modules/MScalar/FreeProp.hpp index 38372a0c..6b956134 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.hpp +++ b/extras/Hadrons/Modules/MScalar/FreeProp.hpp @@ -40,6 +40,7 @@ protected: virtual void execute(void); private: std::string freeMomPropName_; + bool freePropDone_; }; MODULE_REGISTER_NS(FreeProp, TFreeProp, MScalar); diff --git a/extras/Hadrons/make_module_list.sh b/extras/Hadrons/make_module_list.sh index ddc56ff6..8c6fa4da 100755 --- a/extras/Hadrons/make_module_list.sh +++ b/extras/Hadrons/make_module_list.sh @@ -7,6 +7,36 @@ echo 'modules_hpp =\' >> modules.inc find Modules -name '*.hpp' -type f -print | sed 's/^/ /;$q;s/$/ \\/' >> modules.inc echo '' >> modules.inc rm -f Modules.hpp +echo "/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules.hpp + +Copyright (C) 2015 +Copyright (C) 2016 +Copyright (C) 2017 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file \"LICENSE\" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +" > Modules.hpp for f in `find Modules -name '*.hpp'`; do echo "#include " >> Modules.hpp done diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 2f4d183e..199bb5cd 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -1,38 +1,43 @@ modules_cc =\ - Modules/MGauge/Unit.cc \ Modules/MContraction/WeakHamiltonianEye.cc \ - Modules/MContraction/WeakHamiltonianNonEye.cc \ Modules/MContraction/WeakNeutral4ptDisc.cc \ + Modules/MContraction/WeakHamiltonianNonEye.cc \ Modules/MGauge/Load.cc \ + Modules/MGauge/Unit.cc \ + Modules/MGauge/StochEm.cc \ Modules/MGauge/Random.cc \ - Modules/MGauge/StochEm.cc + Modules/MScalar/FreeProp.cc \ + Modules/MScalar/ChargedProp.cc modules_hpp =\ - Modules/MAction/DWF.hpp \ - Modules/MAction/Wilson.hpp \ - Modules/MSink/Point.hpp \ - Modules/MSource/Point.hpp \ - Modules/MGauge/Load.hpp \ - Modules/MGauge/Random.hpp \ - Modules/MGauge/StochEm.hpp \ - Modules/MGauge/Unit.hpp \ - Modules/MSolver/RBPrecCG.hpp \ - Modules/MFermion/GaugeProp.hpp \ Modules/MContraction/Baryon.hpp \ - Modules/MContraction/DiscLoop.hpp \ - Modules/MContraction/Gamma3pt.hpp \ Modules/MContraction/Meson.hpp \ - Modules/MContraction/WardIdentity.hpp \ Modules/MContraction/WeakHamiltonian.hpp \ - Modules/MContraction/WeakHamiltonianEye.hpp \ Modules/MContraction/WeakHamiltonianNonEye.hpp \ + Modules/MContraction/DiscLoop.hpp \ Modules/MContraction/WeakNeutral4ptDisc.hpp \ - Modules/MLoop/NoiseLoop.hpp \ - Modules/MSink/Smear.hpp \ - Modules/MSolver/RBPrecCG.hpp \ - Modules/MSource/SeqConserved.hpp \ + Modules/MContraction/Gamma3pt.hpp \ + Modules/MContraction/WardIdentity.hpp \ + Modules/MContraction/WeakHamiltonianEye.hpp \ + Modules/MFermion/GaugeProp.hpp \ Modules/MSource/SeqGamma.hpp \ + Modules/MSource/Point.hpp \ Modules/MSource/Wall.hpp \ Modules/MSource/Z2.hpp \ + Modules/MSource/SeqConserved.hpp \ + Modules/MSink/Smear.hpp \ + Modules/MSink/Point.hpp \ + Modules/MSolver/RBPrecCG.hpp \ + Modules/MGauge/Load.hpp \ + Modules/MGauge/Unit.hpp \ + Modules/MGauge/Random.hpp \ + Modules/MGauge/StochEm.hpp \ + Modules/MUtilities/TestSeqGamma.hpp \ Modules/MUtilities/TestSeqConserved.hpp \ - Modules/MUtilities/TestSeqGamma.hpp + Modules/MLoop/NoiseLoop.hpp \ + Modules/MScalar/FreeProp.hpp \ + Modules/MScalar/Scalar.hpp \ + Modules/MScalar/ChargedProp.hpp \ + Modules/MAction/DWF.hpp \ + Modules/MAction/Wilson.hpp + From 185da83454961773a4666d4fff45724abb426f5b Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 26 Dec 2017 14:05:17 +0100 Subject: [PATCH 119/145] Hadrons: new MIO module namespace, NERSC loader moved there --- extras/Hadrons/Modules.hpp | 9 +++--- .../{MGauge/Load.cc => MIO/LoadNersc.cc} | 25 +++++++--------- .../{MGauge/Load.hpp => MIO/LoadNersc.hpp} | 30 ++++++++----------- extras/Hadrons/modules.inc | 8 ++--- 4 files changed, 32 insertions(+), 40 deletions(-) rename extras/Hadrons/Modules/{MGauge/Load.cc => MIO/LoadNersc.cc} (81%) rename extras/Hadrons/Modules/{MGauge/Load.hpp => MIO/LoadNersc.hpp} (75%) diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index cf381d0f..3ae2f9a7 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -2,13 +2,12 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules.hpp +Source file: Modules.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -46,7 +45,6 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include #include -#include #include #include #include @@ -58,3 +56,4 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include #include +#include diff --git a/extras/Hadrons/Modules/MGauge/Load.cc b/extras/Hadrons/Modules/MIO/LoadNersc.cc similarity index 81% rename from extras/Hadrons/Modules/MGauge/Load.cc rename to extras/Hadrons/Modules/MIO/LoadNersc.cc index b168a010..2c35d2e1 100644 --- a/extras/Hadrons/Modules/MGauge/Load.cc +++ b/extras/Hadrons/Modules/MIO/LoadNersc.cc @@ -2,12 +2,10 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MGauge/Load.cc +Source file: LoadNersc.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 -Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -26,30 +24,29 @@ with this program; if not, write to the Free Software Foundation, Inc., See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ - -#include +#include using namespace Grid; using namespace Hadrons; -using namespace MGauge; +using namespace MIO; /****************************************************************************** -* TLoad implementation * +* TLoadNersc implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// -TLoad::TLoad(const std::string name) -: Module(name) +TLoadNersc::TLoadNersc(const std::string name) +: Module(name) {} // dependencies/products /////////////////////////////////////////////////////// -std::vector TLoad::getInput(void) +std::vector TLoadNersc::getInput(void) { std::vector in; return in; } -std::vector TLoad::getOutput(void) +std::vector TLoadNersc::getOutput(void) { std::vector out = {getName()}; @@ -57,13 +54,13 @@ std::vector TLoad::getOutput(void) } // setup /////////////////////////////////////////////////////////////////////// -void TLoad::setup(void) +void TLoadNersc::setup(void) { envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// -void TLoad::execute(void) +void TLoadNersc::execute(void) { FieldMetaData header; std::string fileName = par().file + "." diff --git a/extras/Hadrons/Modules/MGauge/Load.hpp b/extras/Hadrons/Modules/MIO/LoadNersc.hpp similarity index 75% rename from extras/Hadrons/Modules/MGauge/Load.hpp rename to extras/Hadrons/Modules/MIO/LoadNersc.hpp index a338af79..5bd251c3 100644 --- a/extras/Hadrons/Modules/MGauge/Load.hpp +++ b/extras/Hadrons/Modules/MIO/LoadNersc.hpp @@ -2,12 +2,10 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MGauge/Load.hpp +Source file: LoadNersc.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 -Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -26,9 +24,8 @@ with this program; if not, write to the Free Software Foundation, Inc., See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ - -#ifndef Hadrons_MGauge_Load_hpp_ -#define Hadrons_MGauge_Load_hpp_ +#ifndef Hadrons_MIO_LoadNersc_hpp_ +#define Hadrons_MIO_LoadNersc_hpp_ #include #include @@ -37,38 +34,37 @@ See the full license in the file "LICENSE" in the top level distribution directo BEGIN_HADRONS_NAMESPACE /****************************************************************************** - * Load a NERSC configuration * + * Load a NERSC configuration * ******************************************************************************/ -BEGIN_MODULE_NAMESPACE(MGauge) +BEGIN_MODULE_NAMESPACE(MIO) -class LoadPar: Serializable +class LoadNerscPar: Serializable { public: - GRID_SERIALIZABLE_CLASS_MEMBERS(LoadPar, + GRID_SERIALIZABLE_CLASS_MEMBERS(LoadNerscPar, std::string, file); }; -class TLoad: public Module +class TLoadNersc: public Module { public: // constructor - TLoad(const std::string name); + TLoadNersc(const std::string name); // destructor - virtual ~TLoad(void) = default; + virtual ~TLoadNersc(void) = default; // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); -protected: // setup virtual void setup(void); // execution virtual void execute(void); }; -MODULE_REGISTER_NS(Load, TLoad, MGauge); +MODULE_REGISTER_NS(LoadNersc, TLoadNersc, MIO); END_MODULE_NAMESPACE END_HADRONS_NAMESPACE -#endif // Hadrons_MGauge_Load_hpp_ +#endif // Hadrons_MIO_LoadNersc_hpp_ diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 199bb5cd..85fa0971 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -2,12 +2,12 @@ modules_cc =\ Modules/MContraction/WeakHamiltonianEye.cc \ Modules/MContraction/WeakNeutral4ptDisc.cc \ Modules/MContraction/WeakHamiltonianNonEye.cc \ - Modules/MGauge/Load.cc \ Modules/MGauge/Unit.cc \ Modules/MGauge/StochEm.cc \ Modules/MGauge/Random.cc \ Modules/MScalar/FreeProp.cc \ - Modules/MScalar/ChargedProp.cc + Modules/MScalar/ChargedProp.cc \ + Modules/MIO/LoadNersc.cc modules_hpp =\ Modules/MContraction/Baryon.hpp \ @@ -28,7 +28,6 @@ modules_hpp =\ Modules/MSink/Smear.hpp \ Modules/MSink/Point.hpp \ Modules/MSolver/RBPrecCG.hpp \ - Modules/MGauge/Load.hpp \ Modules/MGauge/Unit.hpp \ Modules/MGauge/Random.hpp \ Modules/MGauge/StochEm.hpp \ @@ -39,5 +38,6 @@ modules_hpp =\ Modules/MScalar/Scalar.hpp \ Modules/MScalar/ChargedProp.hpp \ Modules/MAction/DWF.hpp \ - Modules/MAction/Wilson.hpp + Modules/MAction/Wilson.hpp \ + Modules/MIO/LoadNersc.hpp From 8b30c5956c7e78250303a1a80e6fd0cd79a7682a Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Tue, 26 Dec 2017 14:16:47 +0100 Subject: [PATCH 120/145] Hadrons: copyright update --- extras/Hadrons/Application.cc | 3 +- extras/Hadrons/Application.hpp | 3 +- extras/Hadrons/Environment.cc | 3 +- extras/Hadrons/Environment.hpp | 3 +- extras/Hadrons/Exceptions.cc | 4 +-- extras/Hadrons/Exceptions.hpp | 2 +- extras/Hadrons/Factory.hpp | 3 +- extras/Hadrons/GeneticScheduler.hpp | 3 +- extras/Hadrons/Global.cc | 3 +- extras/Hadrons/Global.hpp | 4 +-- extras/Hadrons/Graph.hpp | 3 +- extras/Hadrons/HadronsXmlRun.cc | 3 +- extras/Hadrons/HadronsXmlSchedule.cc | 3 +- extras/Hadrons/Module.cc | 3 +- extras/Hadrons/Module.hpp | 3 +- extras/Hadrons/ModuleFactory.hpp | 3 +- extras/Hadrons/Modules.hpp | 2 +- extras/Hadrons/Modules/MAction/DWF.hpp | 4 +-- extras/Hadrons/Modules/MAction/Wilson.hpp | 4 +-- .../Hadrons/Modules/MContraction/Baryon.hpp | 4 +-- .../Hadrons/Modules/MContraction/DiscLoop.hpp | 5 ++-- .../Hadrons/Modules/MContraction/Gamma3pt.hpp | 5 ++-- extras/Hadrons/Modules/MContraction/Meson.hpp | 6 ++-- .../Modules/MContraction/WardIdentity.hpp | 5 ++-- .../Modules/MContraction/WeakHamiltonian.hpp | 5 ++-- .../MContraction/WeakHamiltonianEye.cc | 5 ++-- .../MContraction/WeakHamiltonianEye.hpp | 5 ++-- .../MContraction/WeakHamiltonianNonEye.cc | 5 ++-- .../MContraction/WeakHamiltonianNonEye.hpp | 5 ++-- .../MContraction/WeakNeutral4ptDisc.cc | 5 ++-- .../MContraction/WeakNeutral4ptDisc.hpp | 5 ++-- extras/Hadrons/Modules/MFermion/GaugeProp.hpp | 6 ++-- extras/Hadrons/Modules/MGauge/Random.cc | 3 +- extras/Hadrons/Modules/MGauge/Random.hpp | 3 +- extras/Hadrons/Modules/MGauge/StochEm.cc | 4 +-- extras/Hadrons/Modules/MGauge/StochEm.hpp | 4 +-- extras/Hadrons/Modules/MGauge/Unit.cc | 3 +- extras/Hadrons/Modules/MGauge/Unit.hpp | 3 +- extras/Hadrons/Modules/MIO/LoadNersc.cc | 3 +- extras/Hadrons/Modules/MIO/LoadNersc.hpp | 3 +- extras/Hadrons/Modules/MLoop/NoiseLoop.hpp | 5 ++-- extras/Hadrons/Modules/MScalar/ChargedProp.cc | 28 +++++++++++++++++++ .../Hadrons/Modules/MScalar/ChargedProp.hpp | 27 ++++++++++++++++++ extras/Hadrons/Modules/MScalar/FreeProp.cc | 27 ++++++++++++++++++ extras/Hadrons/Modules/MScalar/FreeProp.hpp | 27 ++++++++++++++++++ extras/Hadrons/Modules/MScalar/Scalar.hpp | 27 ++++++++++++++++++ extras/Hadrons/Modules/MSink/Point.hpp | 3 +- extras/Hadrons/Modules/MSink/Smear.hpp | 5 ++-- extras/Hadrons/Modules/MSolver/RBPrecCG.hpp | 3 +- extras/Hadrons/Modules/MSource/Point.hpp | 4 +-- .../Hadrons/Modules/MSource/SeqConserved.hpp | 7 +++-- extras/Hadrons/Modules/MSource/SeqGamma.hpp | 5 ++-- extras/Hadrons/Modules/MSource/Wall.hpp | 5 ++-- extras/Hadrons/Modules/MSource/Z2.hpp | 3 +- .../Modules/MUtilities/TestSeqConserved.hpp | 5 ++-- .../Modules/MUtilities/TestSeqGamma.hpp | 5 ++-- extras/Hadrons/VirtualMachine.cc | 2 +- extras/Hadrons/VirtualMachine.hpp | 2 +- scripts/copyright | 5 ++-- 59 files changed, 238 insertions(+), 108 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 9a3366d4..6d5d6776 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Application.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index 4b2ce77b..8cd15433 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Application.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 6de13e86..82b0dda1 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Environment.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 7f1bc26d..e9bfffe1 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Environment.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Exceptions.cc b/extras/Hadrons/Exceptions.cc index bf532c21..eedc03b1 100644 --- a/extras/Hadrons/Exceptions.cc +++ b/extras/Hadrons/Exceptions.cc @@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Exceptions.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -54,4 +54,4 @@ CONST_EXC(Io, Runtime("IO error: " + msg, loc)) CONST_EXC(Memory, Runtime("memory error: " + msg, loc)) CONST_EXC(Parsing, Runtime("parsing error: " + msg, loc)) CONST_EXC(Program, Runtime("program error: " + msg, loc)) -CONST_EXC(System, Runtime("system error: " + msg, loc)) \ No newline at end of file +CONST_EXC(System, Runtime("system error: " + msg, loc)) diff --git a/extras/Hadrons/Exceptions.hpp b/extras/Hadrons/Exceptions.hpp index 8f04ab41..ab588e5e 100644 --- a/extras/Hadrons/Exceptions.hpp +++ b/extras/Hadrons/Exceptions.hpp @@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Exceptions.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Factory.hpp b/extras/Hadrons/Factory.hpp index 65ce03ca..705a639e 100644 --- a/extras/Hadrons/Factory.hpp +++ b/extras/Hadrons/Factory.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Factory.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/GeneticScheduler.hpp b/extras/Hadrons/GeneticScheduler.hpp index f199f1ed..9a6476c3 100644 --- a/extras/Hadrons/GeneticScheduler.hpp +++ b/extras/Hadrons/GeneticScheduler.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/GeneticScheduler.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Global.cc b/extras/Hadrons/Global.cc index 130ede96..fc41424c 100644 --- a/extras/Hadrons/Global.cc +++ b/extras/Hadrons/Global.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Global.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index ebfe94dc..1b4e5f9a 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Global.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Graph.hpp b/extras/Hadrons/Graph.hpp index a9c240fa..67694aa8 100644 --- a/extras/Hadrons/Graph.hpp +++ b/extras/Hadrons/Graph.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Graph.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/HadronsXmlRun.cc b/extras/Hadrons/HadronsXmlRun.cc index 0dff8f9a..07eb096e 100644 --- a/extras/Hadrons/HadronsXmlRun.cc +++ b/extras/Hadrons/HadronsXmlRun.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/HadronsXmlRun.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/HadronsXmlSchedule.cc b/extras/Hadrons/HadronsXmlSchedule.cc index a8ca9a63..6b167690 100644 --- a/extras/Hadrons/HadronsXmlSchedule.cc +++ b/extras/Hadrons/HadronsXmlSchedule.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/HadronsXmlSchedule.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Module.cc b/extras/Hadrons/Module.cc index e5ef0fe4..54978f93 100644 --- a/extras/Hadrons/Module.cc +++ b/extras/Hadrons/Module.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Module.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index b71f779d..2ba425e4 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Module.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/ModuleFactory.hpp b/extras/Hadrons/ModuleFactory.hpp index 48ab305c..d5c703fa 100644 --- a/extras/Hadrons/ModuleFactory.hpp +++ b/extras/Hadrons/ModuleFactory.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/ModuleFactory.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 3ae2f9a7..7f7c5dc7 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -2,7 +2,7 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: Modules.hpp +Source file: extras/Hadrons/Modules.hpp Copyright (C) 2015-2018 diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index d99f1165..4dfd06cf 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MAction/DWF.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index 8ef755bb..6467b3ee 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MAction/Wilson.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index 1ef2e257..625c7108 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Baryon.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index ef50061c..3d08f0eb 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/DiscLoop.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index fb9a9d4b..68701aeb 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Gamma3pt.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 46bbdb2e..5cf504e3 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -4,12 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Meson.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli - Andrew Lawson +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index 556450a8..2801d88c 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WardIdentity.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp index 7df40370..9d8ada98 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc index 43dfa609..b79c09e7 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp index 3a2b9309..24f39f6c 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc index 8a7113e3..e66b6ee7 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp index eb5abe3c..c4cd66f1 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc index 18423f3e..e0a00472 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp index f26d4636..5de2a751 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 05b3d17a..33787a0b 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -4,12 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MFermion/GaugeProp.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli - Andrew Lawson +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MGauge/Random.cc b/extras/Hadrons/Modules/MGauge/Random.cc index 97afd338..962fc243 100644 --- a/extras/Hadrons/Modules/MGauge/Random.cc +++ b/extras/Hadrons/Modules/MGauge/Random.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Random.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MGauge/Random.hpp b/extras/Hadrons/Modules/MGauge/Random.hpp index a07130e4..51a08dbb 100644 --- a/extras/Hadrons/Modules/MGauge/Random.hpp +++ b/extras/Hadrons/Modules/MGauge/Random.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Random.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MGauge/StochEm.cc b/extras/Hadrons/Modules/MGauge/StochEm.cc index c5318573..21b7f626 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.cc +++ b/extras/Hadrons/Modules/MGauge/StochEm.cc @@ -4,9 +4,9 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/StochEm.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MGauge/StochEm.hpp b/extras/Hadrons/Modules/MGauge/StochEm.hpp index bacb5172..87b70880 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.hpp +++ b/extras/Hadrons/Modules/MGauge/StochEm.hpp @@ -4,9 +4,9 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/StochEm.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index 8bee1ecc..38b5f3aa 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Unit.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MGauge/Unit.hpp b/extras/Hadrons/Modules/MGauge/Unit.hpp index c1650cc7..d6ce5a6b 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.hpp +++ b/extras/Hadrons/Modules/MGauge/Unit.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Unit.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MIO/LoadNersc.cc b/extras/Hadrons/Modules/MIO/LoadNersc.cc index 2c35d2e1..f20606fc 100644 --- a/extras/Hadrons/Modules/MIO/LoadNersc.cc +++ b/extras/Hadrons/Modules/MIO/LoadNersc.cc @@ -2,10 +2,11 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: LoadNersc.cc +Source file: extras/Hadrons/Modules/MIO/LoadNersc.cc Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MIO/LoadNersc.hpp b/extras/Hadrons/Modules/MIO/LoadNersc.hpp index 5bd251c3..d6742e1e 100644 --- a/extras/Hadrons/Modules/MIO/LoadNersc.hpp +++ b/extras/Hadrons/Modules/MIO/LoadNersc.hpp @@ -2,10 +2,11 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: LoadNersc.hpp +Source file: extras/Hadrons/Modules/MIO/LoadNersc.hpp Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp index 512c731a..e61bf163 100644 --- a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp +++ b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MLoop/NoiseLoop.hpp -Copyright (C) 2016 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.cc b/extras/Hadrons/Modules/MScalar/ChargedProp.cc index 6cb75a28..da82617f 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.cc +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.cc @@ -1,3 +1,31 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/ChargedProp.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli +Author: James Harrison + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include #include diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp index cfcce28e..4d43aec2 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/ChargedProp.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalar_ChargedProp_hpp_ #define Hadrons_MScalar_ChargedProp_hpp_ diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.cc b/extras/Hadrons/Modules/MScalar/FreeProp.cc index 924db288..ee86b9db 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.cc +++ b/extras/Hadrons/Modules/MScalar/FreeProp.cc @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/FreeProp.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include #include diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.hpp b/extras/Hadrons/Modules/MScalar/FreeProp.hpp index 6b956134..df17f44e 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.hpp +++ b/extras/Hadrons/Modules/MScalar/FreeProp.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/FreeProp.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalar_FreeProp_hpp_ #define Hadrons_MScalar_FreeProp_hpp_ diff --git a/extras/Hadrons/Modules/MScalar/Scalar.hpp b/extras/Hadrons/Modules/MScalar/Scalar.hpp index db702ff2..7272f1b3 100644 --- a/extras/Hadrons/Modules/MScalar/Scalar.hpp +++ b/extras/Hadrons/Modules/MScalar/Scalar.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/Scalar.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_Scalar_hpp_ #define Hadrons_Scalar_hpp_ diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 43be3009..c5f6eff0 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSink/Point.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSink/Smear.hpp b/extras/Hadrons/Modules/MSink/Smear.hpp index e85ab263..e72dece0 100644 --- a/extras/Hadrons/Modules/MSink/Smear.hpp +++ b/extras/Hadrons/Modules/MSink/Smear.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSink/Smear.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index bb4f3f62..54c0f2d8 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSolver/RBPrecCG.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 1d8241cf..ac6df252 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Point.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSource/SeqConserved.hpp b/extras/Hadrons/Modules/MSource/SeqConserved.hpp index 3e8ef457..ee8d8d56 100644 --- a/extras/Hadrons/Modules/MSource/SeqConserved.hpp +++ b/extras/Hadrons/Modules/MSource/SeqConserved.hpp @@ -2,11 +2,12 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MContraction/SeqConserved.hpp +Source file: extras/Hadrons/Modules/MSource/SeqConserved.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSource/SeqGamma.hpp b/extras/Hadrons/Modules/MSource/SeqGamma.hpp index abad5ace..40eda29f 100644 --- a/extras/Hadrons/Modules/MSource/SeqGamma.hpp +++ b/extras/Hadrons/Modules/MSource/SeqGamma.hpp @@ -4,11 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/SeqGamma.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSource/Wall.hpp b/extras/Hadrons/Modules/MSource/Wall.hpp index 9d5f1f46..5853b11a 100644 --- a/extras/Hadrons/Modules/MSource/Wall.hpp +++ b/extras/Hadrons/Modules/MSource/Wall.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Wall.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index 3593cb34..4414e37f 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Z2.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index 0647884c..6ee1e3c2 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp index fd53eab8..df35d887 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc index e0035bc1..d47bafb7 100644 --- a/extras/Hadrons/VirtualMachine.cc +++ b/extras/Hadrons/VirtualMachine.cc @@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/VirtualMachine.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp index 3af7d914..19a74f94 100644 --- a/extras/Hadrons/VirtualMachine.hpp +++ b/extras/Hadrons/VirtualMachine.hpp @@ -4,7 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/VirtualMachine.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/scripts/copyright b/scripts/copyright index cc9ed6e5..a461b54c 100755 --- a/scripts/copyright +++ b/scripts/copyright @@ -11,8 +11,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: $1 -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 EOF @@ -60,4 +59,4 @@ shift done - +rm message tmp.fil From e8ac75055c0566fc56663240e07d2d8340ad5799 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 27 Dec 2017 14:24:29 +0100 Subject: [PATCH 121/145] Hadrons: binary configuration loader --- extras/Hadrons/Global.hpp | 6 +- extras/Hadrons/Modules.hpp | 1 + extras/Hadrons/Modules/MIO/LoadBinary.hpp | 140 ++++++++++++++++++++++ extras/Hadrons/modules.inc | 3 +- 4 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 extras/Hadrons/Modules/MIO/LoadBinary.hpp diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 1b4e5f9a..c68edafd 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -61,6 +61,9 @@ using Grid::operator<<; #ifndef SIMPL #define SIMPL ScalarImplCR #endif +#ifndef GIMPL +#define GIMPL GimplTypesR +#endif BEGIN_HADRONS_NAMESPACE @@ -84,7 +87,8 @@ typedef std::function SolverFn##suffix; #define SINK_TYPE_ALIASES(suffix)\ -typedef std::function SinkFn##suffix; +typedef std::function SinkFn##suffix; #define FGS_TYPE_ALIASES(FImpl, suffix)\ FERM_TYPE_ALIASES(FImpl, suffix)\ diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 7f7c5dc7..e50d2b0b 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -57,3 +57,4 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include #include +#include diff --git a/extras/Hadrons/Modules/MIO/LoadBinary.hpp b/extras/Hadrons/Modules/MIO/LoadBinary.hpp new file mode 100644 index 00000000..5e45dfd8 --- /dev/null +++ b/extras/Hadrons/Modules/MIO/LoadBinary.hpp @@ -0,0 +1,140 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MIO/LoadBinary.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MIO_LoadBinary_hpp_ +#define Hadrons_MIO_LoadBinary_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * Load a binary configurations * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MIO) + +class LoadBinaryPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LoadBinaryPar, + std::string, file, + std::string, format); +}; + +template +class TLoadBinary: public Module +{ +public: + typedef typename Impl::Field Field; + typedef typename Impl::Simd Simd; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + typedef typename sobj::DoublePrecision sobj_double; + typedef BinarySimpleMunger Munger; +public: + // constructor + TLoadBinary(const std::string name); + // destructor + virtual ~TLoadBinary(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +}; + +MODULE_REGISTER_NS(LoadBinary, TLoadBinary, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU2, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU3, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU4, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU5, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU6, TLoadBinary>, MIO); + +/****************************************************************************** + * TLoadBinary implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TLoadBinary::TLoadBinary(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TLoadBinary::getInput(void) +{ + std::vector in; + + return in; +} + +template +std::vector TLoadBinary::getOutput(void) +{ + std::vector out = {getName()}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TLoadBinary::setup(void) +{ + envCreateLat(Field, getName()); +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TLoadBinary::execute(void) +{ + Munger munge; + uint32_t nersc_csum, scidac_csuma, scidac_csumb; + auto &U = envGet(Field, getName()); + std::string filename = par().file + "." + + std::to_string(vm().getTrajectory()); + + LOG(Message) << "Loading " << par().format + << "binary configuration from file '" << filename + << "'" << std::endl; + BinaryIO::readLatticeObject(U, filename, munge, 0, + par().format, nersc_csum, + scidac_csuma, scidac_csumb); + LOG(Message) << "Checksums:" << std::endl; + LOG(Message) << " NERSC " << nersc_csum << std::endl; + LOG(Message) << " SciDAC A " << scidac_csuma << std::endl; + LOG(Message) << " SciDAC B " << scidac_csumb << std::endl; +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MIO_LoadBinary_hpp_ diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 85fa0971..6e1ef6dc 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -39,5 +39,6 @@ modules_hpp =\ Modules/MScalar/ChargedProp.hpp \ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ - Modules/MIO/LoadNersc.hpp + Modules/MIO/LoadNersc.hpp \ + Modules/MIO/LoadBinary.hpp From 0d612039ed4c16c5dc0234bc243a631f6e7173b8 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 29 Dec 2017 16:58:23 +0100 Subject: [PATCH 122/145] Hadrons: prettier Grid logging (non-intrusive) --- extras/Hadrons/Application.cc | 1 + extras/Hadrons/Global.cc | 15 +++++++++++++++ extras/Hadrons/Global.hpp | 2 ++ extras/Hadrons/HadronsXmlRun.cc | 6 ------ extras/Hadrons/HadronsXmlSchedule.cc | 6 ------ lib/log/Log.h | 9 ++++++++- 6 files changed, 26 insertions(+), 13 deletions(-) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 6d5d6776..7ba98ade 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -42,6 +42,7 @@ using namespace Hadrons; // constructors //////////////////////////////////////////////////////////////// Application::Application(void) { + initLogger(); LOG(Message) << "Modules available:" << std::endl; auto list = ModuleFactory::getInstance().getBuilderList(); for (auto &m: list) diff --git a/extras/Hadrons/Global.cc b/extras/Hadrons/Global.cc index fc41424c..942a4243 100644 --- a/extras/Hadrons/Global.cc +++ b/extras/Hadrons/Global.cc @@ -38,6 +38,21 @@ HadronsLogger Hadrons::HadronsLogMessage(1,"Message"); HadronsLogger Hadrons::HadronsLogIterative(1,"Iterative"); HadronsLogger Hadrons::HadronsLogDebug(1,"Debug"); +void Hadrons::initLogger(void) +{ + auto w = std::string("Hadrons").length(); + GridLogError.setTopWidth(w); + GridLogWarning.setTopWidth(w); + GridLogMessage.setTopWidth(w); + GridLogIterative.setTopWidth(w); + GridLogDebug.setTopWidth(w); + HadronsLogError.Active(GridLogError.isActive()); + HadronsLogWarning.Active(GridLogWarning.isActive()); + HadronsLogMessage.Active(GridLogMessage.isActive()); + HadronsLogIterative.Active(GridLogIterative.isActive()); + HadronsLogDebug.Active(GridLogDebug.isActive()); +} + // type utilities ////////////////////////////////////////////////////////////// constexpr unsigned int maxNameSize = 1024u; diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index c68edafd..274e1934 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -112,6 +112,8 @@ extern HadronsLogger HadronsLogMessage; extern HadronsLogger HadronsLogIterative; extern HadronsLogger HadronsLogDebug; +void initLogger(void); + // singleton pattern #define SINGLETON(name)\ public:\ diff --git a/extras/Hadrons/HadronsXmlRun.cc b/extras/Hadrons/HadronsXmlRun.cc index 07eb096e..680f234b 100644 --- a/extras/Hadrons/HadronsXmlRun.cc +++ b/extras/Hadrons/HadronsXmlRun.cc @@ -54,12 +54,6 @@ int main(int argc, char *argv[]) // initialization Grid_init(&argc, &argv); - HadronsLogError.Active(GridLogError.isActive()); - HadronsLogWarning.Active(GridLogWarning.isActive()); - HadronsLogMessage.Active(GridLogMessage.isActive()); - HadronsLogIterative.Active(GridLogIterative.isActive()); - HadronsLogDebug.Active(GridLogDebug.isActive()); - LOG(Message) << "Grid initialized" << std::endl; // execution Application application(parameterFileName); diff --git a/extras/Hadrons/HadronsXmlSchedule.cc b/extras/Hadrons/HadronsXmlSchedule.cc index 6b167690..55f3b231 100644 --- a/extras/Hadrons/HadronsXmlSchedule.cc +++ b/extras/Hadrons/HadronsXmlSchedule.cc @@ -48,12 +48,6 @@ int main(int argc, char *argv[]) // initialization Grid_init(&argc, &argv); - HadronsLogError.Active(GridLogError.isActive()); - HadronsLogWarning.Active(GridLogWarning.isActive()); - HadronsLogMessage.Active(GridLogMessage.isActive()); - HadronsLogIterative.Active(GridLogIterative.isActive()); - HadronsLogDebug.Active(GridLogDebug.isActive()); - LOG(Message) << "Grid initialized" << std::endl; // execution Application application; diff --git a/lib/log/Log.h b/lib/log/Log.h index ddff4c1d..011a7250 100644 --- a/lib/log/Log.h +++ b/lib/log/Log.h @@ -86,6 +86,7 @@ protected: Colours &Painter; int active; int timing_mode; + int topWidth{-1}; static int timestamp; std::string name, topName; std::string COLOUR; @@ -124,11 +125,17 @@ public: Reset(); } } + void setTopWidth(const int w) {topWidth = w;} friend std::ostream& operator<< (std::ostream& stream, Logger& log){ if ( log.active ) { - stream << log.background()<< std::left << log.topName << log.background()<< " : "; + stream << log.background()<< std::left; + if (log.topWidth > 0) + { + stream << std::setw(log.topWidth); + } + stream << log.topName << log.background()<< " : "; stream << log.colour() << std::left << log.name << log.background() << " : "; if ( log.timestamp ) { log.StopWatch->Stop(); From dd62f2f371cce3236f4e38dfd8502b0a68fa01ba Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Fri, 29 Dec 2017 16:58:44 +0100 Subject: [PATCH 123/145] Hadrons: log message fix --- extras/Hadrons/Modules/MIO/LoadBinary.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/Hadrons/Modules/MIO/LoadBinary.hpp b/extras/Hadrons/Modules/MIO/LoadBinary.hpp index 5e45dfd8..d9a8b5f8 100644 --- a/extras/Hadrons/Modules/MIO/LoadBinary.hpp +++ b/extras/Hadrons/Modules/MIO/LoadBinary.hpp @@ -122,7 +122,7 @@ void TLoadBinary::execute(void) + std::to_string(vm().getTrajectory()); LOG(Message) << "Loading " << par().format - << "binary configuration from file '" << filename + << " binary configuration from file '" << filename << "'" << std::endl; BinaryIO::readLatticeObject(U, filename, munge, 0, par().format, nersc_csum, From 1a0163f45c43a304271bb9e8a99052c012b4b736 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:26:11 +0000 Subject: [PATCH 124/145] Updated to do list --- TODO | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/TODO b/TODO index 83bfda5e..95ccf1df 100644 --- a/TODO +++ b/TODO @@ -4,17 +4,17 @@ TODO: Large item work list: 1)- BG/Q port and check ; Andrew says ok. -2)- Christoph's local basis expansion Lanczos --- 3a)- RNG I/O in ILDG/SciDAC (minor) -3b)- Precision conversion and sort out localConvert <-- partial/easy 3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet 4)- Physical propagator interface -5)- Conserved currents 6)- Multigrid Wilson and DWF, compare to other Multigrid implementations 7)- HDCR resume - +---------------------------- Recent DONE +-- Precision conversion and sort out localConvert <-- partial/easy +-- Conserved currents (Andrew) +-- Split grid +-- Christoph's local basis expansion Lanczos -- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O ; <-- DONE ; bmark cori -- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE -- GaugeFix into central location <-- DONE From 7b3ed160aa22c11cfb8e5acbd7e4414a4d718305 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:26:48 +0000 Subject: [PATCH 125/145] Rationalise MPI options --- configure.ac | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/configure.ac b/configure.ac index 496f7fd7..468d9d5f 100644 --- a/configure.ac +++ b/configure.ac @@ -337,15 +337,11 @@ case ${ac_PRECISION} in esac ###################### Shared memory allocation technique under MPI3 -AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmget|shmopen|hugetlbfs], +AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|hugetlbfs], [Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=shmopen]) case ${ac_SHM} in - shmget) - AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] ) - ;; - shmopen) AC_DEFINE([GRID_MPI3_SHMOPEN],[1],[GRID_MPI3_SHMOPEN] ) ;; @@ -367,7 +363,7 @@ AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path], AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing]) ############### communication type selection -AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto|mpi3|mpi3-auto|shmem], +AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto], [Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none]) case ${ac_COMMS} in @@ -375,22 +371,10 @@ case ${ac_COMMS} in AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] ) comms_type='none' ;; - mpi3*) + mpi*) AC_DEFINE([GRID_COMMS_MPI3],[1],[GRID_COMMS_MPI3] ) comms_type='mpi3' ;; - mpit) - AC_DEFINE([GRID_COMMS_MPIT],[1],[GRID_COMMS_MPIT] ) - comms_type='mpit' - ;; - mpi*) - AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) - comms_type='mpi' - ;; - shmem) - AC_DEFINE([GRID_COMMS_SHMEM],[1],[GRID_COMMS_SHMEM] ) - comms_type='shmem' - ;; *) AC_MSG_ERROR([${ac_COMMS} unsupported --enable-comms option]); ;; From 9b32d51cd1a7ec710239ed280a94a3d836117e7a Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:27:14 +0000 Subject: [PATCH 126/145] Simplify comms layer proliferatoin --- benchmarks/Benchmark_comms.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/benchmarks/Benchmark_comms.cc b/benchmarks/Benchmark_comms.cc index a270e3fa..29ccf96c 100644 --- a/benchmarks/Benchmark_comms.cc +++ b/benchmarks/Benchmark_comms.cc @@ -106,7 +106,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; ncomm=0; for(int mu=0;mu<4;mu++){ @@ -202,7 +202,7 @@ int main (int argc, char ** argv) int recv_from_rank; { - std::vector requests; + std::vector requests; Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); Grid.SendToRecvFromBegin(requests, (void *)&xbuf[mu][0], @@ -215,7 +215,7 @@ int main (int argc, char ** argv) comm_proc = mpi_layout[mu]-1; { - std::vector requests; + std::vector requests; Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); Grid.SendToRecvFromBegin(requests, (void *)&xbuf[mu+4][0], @@ -290,7 +290,7 @@ int main (int argc, char ** argv) dbytes=0; ncomm=0; - std::vector requests; + std::vector requests; for(int mu=0;mu<4;mu++){ @@ -383,7 +383,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; dbytes=0; ncomm=0; for(int mu=0;mu<4;mu++){ @@ -481,7 +481,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; dbytes=0; ncomm=0; From 7eeab7f995332ae2a2ce60c318beb77c449fe0db Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:27:43 +0000 Subject: [PATCH 127/145] Simplify comms layers --- lib/communicator/Communicator.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/communicator/Communicator.h b/lib/communicator/Communicator.h index 09ce50dc..d4ec5a13 100644 --- a/lib/communicator/Communicator.h +++ b/lib/communicator/Communicator.h @@ -28,6 +28,7 @@ Author: Peter Boyle #ifndef GRID_COMMUNICATOR_H #define GRID_COMMUNICATOR_H +#include #include #endif From 6ecf2807237f7b476c495cc97d42cd2c9a1c5c72 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:28:04 +0000 Subject: [PATCH 128/145] Simplify comms layer proliferation --- lib/qcd/action/fermion/WilsonCompressor.h | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/qcd/action/fermion/WilsonCompressor.h b/lib/qcd/action/fermion/WilsonCompressor.h index cc5c3c63..b47700ac 100644 --- a/lib/qcd/action/fermion/WilsonCompressor.h +++ b/lib/qcd/action/fermion/WilsonCompressor.h @@ -265,7 +265,6 @@ public: if ( timer3 ) std::cout << GridLogMessage << " timer3 (commsMergeShm) " < same_node; std::vector surface_list; From 0a68470f9a2042af2c5bc443bc3fdb33bfc58e77 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:28:30 +0000 Subject: [PATCH 129/145] Simplify comms layers --- lib/stencil/Stencil.h | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/stencil/Stencil.h b/lib/stencil/Stencil.h index 887d8a7c..69c010f4 100644 --- a/lib/stencil/Stencil.h +++ b/lib/stencil/Stencil.h @@ -105,7 +105,6 @@ template class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal fill in. public: - typedef CartesianCommunicator::CommsRequest_t CommsRequest_t; typedef typename cobj::vector_type vector_type; typedef typename cobj::scalar_type scalar_type; typedef typename cobj::scalar_object scalar_object; From b91282ad46630f006c9c678b33ec0d9448cec8d6 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:28:52 +0000 Subject: [PATCH 130/145] Simplify comms layer proliferation --- lib/util/Init.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 031f8f5a..4f99e491 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -220,11 +220,11 @@ void Grid_init(int *argc,char ***argv) arg= GridCmdOptionPayload(*argv,*argv+*argc,"--shm"); GridCmdOptionInt(arg,MB); uint64_t MB64 = MB; - CartesianCommunicator::MAX_MPI_SHM_BYTES = MB64*1024LL*1024LL; + GlobalSharedMemory::MAX_MPI_SHM_BYTES = MB64*1024LL*1024LL; } if( GridCmdOptionExists(*argv,*argv+*argc,"--shm-hugepages") ){ - CartesianCommunicator::Hugepages = 1; + GlobalSharedMemory::Hugepages = 1; } @@ -392,8 +392,8 @@ void Grid_init(int *argc,char ***argv) Grid_default_latt, Grid_default_mpi); - std::cout << GridLogMessage << "Requesting "<< CartesianCommunicator::MAX_MPI_SHM_BYTES <<" byte stencil comms buffers "< Date: Mon, 8 Jan 2018 11:29:20 +0000 Subject: [PATCH 131/145] Simplify proliferation of comms layers --- scripts/filelist | 2 +- tests/solver/Test_dwf_mrhs_cg_mpi.cc | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/filelist b/scripts/filelist index 8d4b8e1a..74f8e334 100755 --- a/scripts/filelist +++ b/scripts/filelist @@ -6,7 +6,7 @@ home=`pwd` cd $home/lib HFILES=`find . -type f -name '*.h' -not -name '*Hdf5*' -not -path '*/gamma-gen/*' -not -path '*/Old/*' -not -path '*/Eigen/*'` HFILES="$HFILES" -CCFILES=`find . -type f -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*Hdf5*'` +CCFILES=`find . -type f -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'` HPPFILES=`find . -type f -name '*.hpp'` echo HFILES=$HFILES $HPPFILES > Make.inc echo >> Make.inc diff --git a/tests/solver/Test_dwf_mrhs_cg_mpi.cc b/tests/solver/Test_dwf_mrhs_cg_mpi.cc index 7e11d8d1..aa36ebbc 100644 --- a/tests/solver/Test_dwf_mrhs_cg_mpi.cc +++ b/tests/solver/Test_dwf_mrhs_cg_mpi.cc @@ -72,14 +72,17 @@ int main (int argc, char ** argv) int nrhs = 1; int me; for(int i=0;i Date: Mon, 8 Jan 2018 11:30:22 +0000 Subject: [PATCH 132/145] Simplify comms layer proliferation --- lib/Makefile.am | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/lib/Makefile.am b/lib/Makefile.am index 6dd7899e..dc33e7cf 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,28 +1,18 @@ extra_sources= extra_headers= -if BUILD_COMMS_MPI - extra_sources+=communicator/Communicator_mpi.cc - extra_sources+=communicator/Communicator_base.cc -endif if BUILD_COMMS_MPI3 extra_sources+=communicator/Communicator_mpi3.cc extra_sources+=communicator/Communicator_base.cc -endif - -if BUILD_COMMS_MPIT - extra_sources+=communicator/Communicator_mpit.cc - extra_sources+=communicator/Communicator_base.cc -endif - -if BUILD_COMMS_SHMEM - extra_sources+=communicator/Communicator_shmem.cc - extra_sources+=communicator/Communicator_base.cc + extra_sources+=communicator/SharedMemoryMPI.cc + extra_sources+=communicator/SharedMemory.cc endif if BUILD_COMMS_NONE extra_sources+=communicator/Communicator_none.cc extra_sources+=communicator/Communicator_base.cc + extra_sources+=communicator/SharedMemoryNone.cc + extra_sources+=communicator/SharedMemory.cc endif if BUILD_HDF5 From 0091eec23a08fd94ed5711d887019d2359e3503a Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:31:32 +0000 Subject: [PATCH 133/145] Simplify communicator cases --- lib/communicator/Communicator_base.h | 112 ++++----------------------- 1 file changed, 13 insertions(+), 99 deletions(-) diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index 548515cd..a9b99c17 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -32,117 +32,33 @@ Author: Peter Boyle /////////////////////////////////// // Processor layout information /////////////////////////////////// -#ifdef GRID_COMMS_MPI -#include -#endif -#ifdef GRID_COMMS_MPI3 -#include -#endif -#ifdef GRID_COMMS_MPIT -#include -#endif -#ifdef GRID_COMMS_SHMEM -#include -#endif +#include namespace Grid { -class CartesianCommunicator { - public: +class CartesianCommunicator : public SharedMemory { +public: //////////////////////////////////////////// - // Isend/Irecv/Wait, or Sendrecv blocking + // Policies //////////////////////////////////////////// enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential }; static CommunicatorPolicy_t CommunicatorPolicy; static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; } - - /////////////////////////////////////////// - // Up to 65536 ranks per node adequate for now - // 128MB shared memory for comms enought for 48^4 local vol comms - // Give external control (command line override?) of this - /////////////////////////////////////////// - static const int MAXLOG2RANKSPERNODE = 16; - static uint64_t MAX_MPI_SHM_BYTES; static int nCommThreads; - // use explicit huge pages - static int Hugepages; + //////////////////////////////////////////// // Communicator should know nothing of the physics grid, only processor grid. + //////////////////////////////////////////// int _Nprocessors; // How many in all std::vector _processors; // Which dimensions get relayed out over processors lanes. int _processor; // linear processor rank std::vector _processor_coor; // linear processor coordinate - unsigned long _ndimension; - -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT) - static MPI_Comm communicator_world; - - MPI_Comm communicator; - std::vector communicator_halo; - - typedef MPI_Request CommsRequest_t; - -#else - typedef int CommsRequest_t; -#endif - - - //////////////////////////////////////////////////////////////////// - // Helper functionality for SHM Windows common to all other impls - //////////////////////////////////////////////////////////////////// - // Longer term; drop this in favour of a master / slave model with - // cartesian communicator on a subset of ranks, slave ranks controlled - // by group leader with data xfer via shared memory - //////////////////////////////////////////////////////////////////// -#ifdef GRID_COMMS_MPI3 - - static int ShmRank; - static int ShmSize; - static int GroupRank; - static int GroupSize; - static int WorldRank; - static int WorldSize; - - std::vector WorldDims; - std::vector GroupDims; - std::vector ShmDims; - - std::vector GroupCoor; - std::vector ShmCoor; - std::vector WorldCoor; - - static std::vector GroupRanks; - static std::vector MyGroup; - static int ShmSetup; - static MPI_Win ShmWindow; - static MPI_Comm ShmComm; - - std::vector LexicographicToWorldRank; - - static std::vector ShmCommBufs; - -#else - static void ShmInitGeneric(void); - static commVector ShmBufStorageVector; -#endif - - ///////////////////////////////// - // Grid information and queries - // Implemented in Communicator_base.C - ///////////////////////////////// - static void * ShmCommBuf; - - - size_t heap_top; - size_t heap_bytes; - - void *ShmBufferSelf(void); - void *ShmBuffer(int rank); - void *ShmBufferTranslate(int rank,void * local_p); - void *ShmBufferMalloc(size_t bytes); - void ShmBufferFreeAll(void) ; + unsigned long _ndimension; + static Grid_MPI_Comm communicator_world; + Grid_MPI_Comm communicator; + std::vector communicator_halo; //////////////////////////////////////////////// // Must call in Grid startup @@ -158,13 +74,13 @@ class CartesianCommunicator { virtual ~CartesianCommunicator(); private: -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) || defined (GRID_COMMS_MPI3) + //////////////////////////////////////////////// // Private initialise from an MPI communicator // Can use after an MPI_Comm_split, but hidden from user so private //////////////////////////////////////////////// - void InitFromMPICommunicator(const std::vector &processors, MPI_Comm communicator_base); -#endif + void InitFromMPICommunicator(const std::vector &processors, Grid_MPI_Comm communicator_base); + public: //////////////////////////////////////////////////////////////////////////////////////// @@ -181,8 +97,6 @@ class CartesianCommunicator { const std::vector & ThisProcessorCoor(void) ; const std::vector & ProcessorGrid(void) ; int ProcessorCount(void) ; - int NodeCount(void) ; - int RankCount(void) ; //////////////////////////////////////////////////////////////////////////////// // very VERY rarely (Log, serial RNG) we need world without a grid From 357badce5ed7efac2df4c3f5bc5cf71815334c3a Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:32:16 +0000 Subject: [PATCH 134/145] Simplify communicator case proliferation --- lib/communicator/Communicator_base.cc | 288 -------------------------- 1 file changed, 288 deletions(-) diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index 3e561405..edbf26af 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -36,33 +36,9 @@ namespace Grid { /////////////////////////////////////////////////////////////// // Info that is setup once and indept of cartesian layout /////////////////////////////////////////////////////////////// -void * CartesianCommunicator::ShmCommBuf; -uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL; CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent; int CartesianCommunicator::nCommThreads = -1; -int CartesianCommunicator::Hugepages = 0; - -///////////////////////////////// -// Alloc, free shmem region -///////////////////////////////// -void *CartesianCommunicator::ShmBufferMalloc(size_t bytes){ - // bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes - void *ptr = (void *)heap_top; - heap_top += bytes; - heap_bytes+= bytes; - if (heap_bytes >= MAX_MPI_SHM_BYTES) { - std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < row(_ndimension,1); - assert(dim>=0 && dim<_ndimension); - - // Split the communicator - row[dim] = _processors[dim]; - - int me; - CartesianCommunicator Comm(row,*this,me); - Comm.AllToAll(in,out,words,bytes); -} -void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) -{ - // MPI is a pain and uses "int" arguments - // 64*64*64*128*16 == 500Million elements of data. - // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. - // (Turns up on 32^3 x 64 Gparity too) - MPI_Datatype object; - int iwords; - int ibytes; - iwords = words; - ibytes = bytes; - assert(words == iwords); // safe to cast to int ? - assert(bytes == ibytes); // safe to cast to int ? - MPI_Type_contiguous(ibytes,MPI_BYTE,&object); - MPI_Type_commit(&object); - MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); - MPI_Type_free(&object); -} -#endif - -#if defined( GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) -{ - _ndimension = processors.size(); - - int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension); - std::vector parent_processor_coor(_ndimension,0); - std::vector parent_processors (_ndimension,1); - - // Can make 5d grid from 4d etc... - int pad = _ndimension-parent_ndimension; - for(int d=0;d ccoor(_ndimension); // coor within subcommunicator - std::vector scoor(_ndimension); // coor of split within parent - std::vector ssize(_ndimension); // coor of split within parent - - for(int d=0;d<_ndimension;d++){ - ccoor[d] = parent_processor_coor[d] % processors[d]; - scoor[d] = parent_processor_coor[d] / processors[d]; - ssize[d] = parent_processors[d] / processors[d]; - } - int crank; // rank within subcomm ; srank is rank of subcomm within blocks of subcomms - // Mpi uses the reverse Lexico convention to us - Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); - Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); - - MPI_Comm comm_split; - if ( Nchild > 1 ) { - - if(0){ - std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, MPI_Comm communicator_base) -{ - _ndimension = processors.size(); - _processor_coor.resize(_ndimension); - - ///////////////////////////////// - // Count the requested nodes - ///////////////////////////////// - _Nprocessors=1; - _processors = processors; - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - std::vector periodic(_ndimension,1); - MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator); - MPI_Comm_rank(communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); - - if ( 0 && (communicator_base != communicator_world) ) { - std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"< &processors) -{ - InitFromMPICommunicator(processors,communicator_world); -} - -#endif - -#if !defined( GRID_COMMS_MPI3) -int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; -int CartesianCommunicator::RankCount(void) { return ProcessorCount();}; -#endif - -#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT) -double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes, int dir) -{ - std::vector list; - // Discard the "dir" - SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); - SendToRecvFromComplete(list); - return 2.0*bytes; -} -double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes, int dir) -{ - // Discard the "dir" - SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); - return 2.0*bytes; -} -void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall,int dir) -{ - SendToRecvFromComplete(waitall); -} -#endif - -#if !defined( GRID_COMMS_MPI3) - -void CartesianCommunicator::StencilBarrier(void){}; - -commVector CartesianCommunicator::ShmBufStorageVector; - -void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; } - -void *CartesianCommunicator::ShmBuffer(int rank) { - return NULL; -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { - return NULL; -} -void CartesianCommunicator::ShmInitGeneric(void){ -#if 1 - int mmap_flag =0; -#ifdef MAP_ANONYMOUS - mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; -#endif -#ifdef MAP_ANON - mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; -#endif -#ifdef MAP_HUGETLB - if ( Hugepages ) mmap_flag |= MAP_HUGETLB; -#endif - ShmCommBuf =(void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); - if (ShmCommBuf == (void *)MAP_FAILED) { - perror("mmap failed "); - exit(EXIT_FAILURE); - } -#ifdef MADV_HUGEPAGE - if (!Hugepages ) madvise(ShmCommBuf,MAX_MPI_SHM_BYTES,MADV_HUGEPAGE); -#endif -#else - ShmBufStorageVector.resize(MAX_MPI_SHM_BYTES); - ShmCommBuf=(void *)&ShmBufStorageVector[0]; -#endif - bzero(ShmCommBuf,MAX_MPI_SHM_BYTES); -} - -#endif } From 9947cfbf14de0bc323c0a791f21267aadf9488ab Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:33:01 +0000 Subject: [PATCH 135/145] Simplify number of communicator cases --- lib/communicator/Communicator_mpi3.cc | 751 ++++++++------------------ 1 file changed, 213 insertions(+), 538 deletions(-) diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index e41749d4..ef47d617 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -26,89 +26,20 @@ Author: Peter Boyle *************************************************************************************/ /* END LEGAL */ #include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef HAVE_NUMAIF_H -#include -#endif - +#include namespace Grid { -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -int CartesianCommunicator::ShmSetup = 0; +Grid_MPI_Comm CartesianCommunicator::communicator_world; -int CartesianCommunicator::ShmRank; -int CartesianCommunicator::ShmSize; -int CartesianCommunicator::GroupRank; -int CartesianCommunicator::GroupSize; -int CartesianCommunicator::WorldRank; -int CartesianCommunicator::WorldSize; - -MPI_Comm CartesianCommunicator::communicator_world; -MPI_Comm CartesianCommunicator::ShmComm; -MPI_Win CartesianCommunicator::ShmWindow; - -std::vector CartesianCommunicator::GroupRanks; -std::vector CartesianCommunicator::MyGroup; -std::vector CartesianCommunicator::ShmCommBufs; - -int CartesianCommunicator::NodeCount(void) { return GroupSize;}; -int CartesianCommunicator::RankCount(void) { return WorldSize;}; - - -#undef FORCE_COMMS -void *CartesianCommunicator::ShmBufferSelf(void) +//////////////////////////////////////////// +// First initialise of comms system +//////////////////////////////////////////// +void CartesianCommunicator::Init(int *argc, char ***argv) { - return ShmCommBufs[ShmRank]; -} -void *CartesianCommunicator::ShmBuffer(int rank) -{ - int gpeer = GroupRanks[rank]; -#ifdef FORCE_COMMS - return NULL; -#endif - if (gpeer == MPI_UNDEFINED){ - return NULL; - } else { - return ShmCommBufs[gpeer]; - } -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) -{ - static int count =0; - int gpeer = GroupRanks[rank]; - assert(gpeer!=ShmRank); // never send to self - assert(rank!=WorldRank);// never send to self -#ifdef FORCE_COMMS - return NULL; -#endif - if (gpeer == MPI_UNDEFINED){ - return NULL; - } else { - uint64_t offset = (uint64_t)local_p - (uint64_t)ShmCommBufs[ShmRank]; - uint64_t remote = (uint64_t)ShmCommBufs[gpeer]+offset; - return (void *) remote; - } -} - -void CartesianCommunicator::Init(int *argc, char ***argv) { int flag; int provided; - // mtrace(); MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { @@ -119,487 +50,202 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { Grid_quiesce_nodes(); MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - MPI_Comm_rank(communicator_world,&WorldRank); - MPI_Comm_size(communicator_world,&WorldSize); - if ( WorldRank == 0 ) { - std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"< world_ranks(WorldSize); - GroupRanks.resize(WorldSize); - for(int r=0;r()); - int myleader = MyGroup[0]; - - std::vector leaders_1hot(WorldSize,0); - std::vector leaders_group(GroupSize,0); - leaders_1hot [ myleader ] = 1; - - /////////////////////////////////////////////////////////////////// - // global sum leaders over comm world - /////////////////////////////////////////////////////////////////// - int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world); - assert(ierr==0); - /////////////////////////////////////////////////////////////////// - // find the group leaders world rank - /////////////////////////////////////////////////////////////////// - int group=0; - for(int l=0;l shmids(ShmSize); - - if ( ShmRank == 0 ) { - for(int r=0;r coor = _processor_coor; // my coord - assert(std::abs(shift) <_processors[dim]); - - coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,source,_processors); - source = LexicographicToWorldRank[source]; - - coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,dest,_processors); - dest = LexicographicToWorldRank[dest]; - -}// rank is world rank. - + int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); + assert(ierr==0); +} int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) { int rank; - Lexicographic::IndexFromCoor(coor,rank,_processors); - rank = LexicographicToWorldRank[rank]; + int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); + assert(ierr==0); return rank; -}// rank is world rank - +} void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) { - int lr=-1; - for(int r=0;r &processors) +{ + MPI_Comm optimal_comm; + GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm); // Remap using the shared memory optimising routine + InitFromMPICommunicator(processors,optimal_comm); + SetCommunicator(optimal_comm); } ////////////////////////////////// // Try to subdivide communicator ////////////////////////////////// -/* - * Use default in MPI compile - */ -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) - : CartesianCommunicator(processors) +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) { - std::cout << "Attempts to split MPI3 communicators will fail until implemented" <= parent._ndimension); + std::vector parent_processor_coor(_ndimension,0); + std::vector parent_processors (_ndimension,1); + + // Can make 5d grid from 4d etc... + int pad = _ndimension-parent_ndimension; + for(int d=0;d ccoor(_ndimension); // coor within subcommunicator + std::vector scoor(_ndimension); // coor of split within parent + std::vector ssize(_ndimension); // coor of split within parent + + for(int d=0;d<_ndimension;d++){ + ccoor[d] = parent_processor_coor[d] % processors[d]; + scoor[d] = parent_processor_coor[d] / processors[d]; + ssize[d] = parent_processors[d] / processors[d]; + } + + // rank within subcomm ; srank is rank of subcomm within blocks of subcomms + int crank; + // Mpi uses the reverse Lexico convention to us; so reversed routines called + Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions + Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); // ssize is the number of split grids + + MPI_Comm comm_split; + if ( Nchild > 1 ) { + + if(0){ + std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors) -{ - int ierr; - communicator=communicator_world; - +void CartesianCommunicator::InitFromMPICommunicator(const std::vector &processors, MPI_Comm communicator_base) +{ _ndimension = processors.size(); + _processor_coor.resize(_ndimension); + + ///////////////////////////////// + // Count the requested nodes + ///////////////////////////////// + _Nprocessors=1; + _processors = processors; + for(int i=0;i<_ndimension;i++){ + _Nprocessors*=_processors[i]; + } + + std::vector periodic(_ndimension,1); + MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator); + MPI_Comm_rank(communicator,&_processor); + MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); + + if ( 0 && (communicator_base != communicator_world) ) { + std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"< WorldDims = processors; - - ShmDims.resize (_ndimension,1); - GroupDims.resize(_ndimension); - ShmCoor.resize (_ndimension); - GroupCoor.resize(_ndimension); - WorldCoor.resize(_ndimension); - - int dim = 0; - for(int l2=0;l2 coor(_ndimension); - ProcessorCoorFromRank(wr,coor); // from world rank - int ck = RankFromProcessorCoor(coor); - assert(ck==wr); - - if ( wr == WorldRank ) { - for(int j=0;j mcoor = coor; - this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int)); - for(int d = 0 ; d< _ndimension; d++) { - assert(coor[d] == mcoor[d]); - } - } -}; CartesianCommunicator::~CartesianCommunicator() { int MPI_is_finalised; @@ -734,19 +380,15 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector row(_ndimension,1); + assert(dim>=0 && dim<_ndimension); + + // Split the communicator + row[dim] = _processors[dim]; + + int me; + CartesianCommunicator Comm(row,*this,me); + Comm.AllToAll(in,out,words,bytes); +} +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) +{ + // MPI is a pain and uses "int" arguments + // 64*64*64*128*16 == 500Million elements of data. + // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. + // (Turns up on 32^3 x 64 Gparity too) + MPI_Datatype object; + int iwords; + int ibytes; + iwords = words; + ibytes = bytes; + assert(words == iwords); // safe to cast to int ? + assert(bytes == ibytes); // safe to cast to int ? + MPI_Type_contiguous(ibytes,MPI_BYTE,&object); + MPI_Type_commit(&object); + MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); + MPI_Type_free(&object); +} + + + } From 0b85f1bfc8d6ceec46150ae1c75dc048f20629a3 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:33:47 +0000 Subject: [PATCH 136/145] Simplify the communicator proliferation: mpi and none. --- lib/communicator/Communicator_none.cc | 43 +++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/lib/communicator/Communicator_none.cc b/lib/communicator/Communicator_none.cc index 26b330a7..c3763d53 100644 --- a/lib/communicator/Communicator_none.cc +++ b/lib/communicator/Communicator_none.cc @@ -32,14 +32,22 @@ namespace Grid { /////////////////////////////////////////////////////////////////////////////////////////////////// // Info that is setup once and indept of cartesian layout /////////////////////////////////////////////////////////////////////////////////////////////////// +Grid_MPI_Comm CartesianCommunicator::communicator_world; void CartesianCommunicator::Init(int *argc, char *** arv) { - ShmInitGeneric(); + GlobalSharedMemory::Init(communicator_world); + GlobalSharedMemory::SharedMemoryAllocate( + GlobalSharedMemory::MAX_MPI_SHM_BYTES, + GlobalSharedMemory::Hugepages); } CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) - : CartesianCommunicator(processors) { srank=0;} + : CartesianCommunicator(processors) +{ + srank=0; + SetCommunicator(communicator_world); +} CartesianCommunicator::CartesianCommunicator(const std::vector &processors) { @@ -54,6 +62,7 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(_processors[d]==1); _processor_coor[d] = 0; } + SetCommunicator(communicator_world); } CartesianCommunicator::~CartesianCommunicator(){} @@ -121,6 +130,36 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest dest=0; } +double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, + int xmit_to_rank, + void *recv, + int recv_from_rank, + int bytes, int dir) +{ + std::vector list; + // Discard the "dir" + SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); + SendToRecvFromComplete(list); + return 2.0*bytes; +} +double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, + void *xmit, + int xmit_to_rank, + void *recv, + int recv_from_rank, + int bytes, int dir) +{ + // Discard the "dir" + SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); + return 2.0*bytes; +} +void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall,int dir) +{ + SendToRecvFromComplete(waitall); +} + +void CartesianCommunicator::StencilBarrier(void){}; + } From 44f65526e01369c193a8754e97ec959ed8d0a1d4 Mon Sep 17 00:00:00 2001 From: paboyle Date: Mon, 8 Jan 2018 11:35:43 +0000 Subject: [PATCH 137/145] Simplify communicators --- lib/communicator/Communicator_mpi.cc | 222 ----- lib/communicator/Communicator_mpi3_leader.cc | 988 ------------------- lib/communicator/Communicator_mpit.cc | 273 ----- lib/communicator/Communicator_shmem.cc | 357 ------- lib/communicator/SharedMemory.cc | 54 + lib/communicator/SharedMemory.h | 158 +++ lib/communicator/SharedMemoryMPI.cc | 415 ++++++++ lib/communicator/SharedMemoryNone.cc | 150 +++ 8 files changed, 777 insertions(+), 1840 deletions(-) delete mode 100644 lib/communicator/Communicator_mpi.cc delete mode 100644 lib/communicator/Communicator_mpi3_leader.cc delete mode 100644 lib/communicator/Communicator_mpit.cc delete mode 100644 lib/communicator/Communicator_shmem.cc create mode 100644 lib/communicator/SharedMemory.cc create mode 100644 lib/communicator/SharedMemory.h create mode 100644 lib/communicator/SharedMemoryMPI.cc create mode 100644 lib/communicator/SharedMemoryNone.cc diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc deleted file mode 100644 index 2075e4bf..00000000 --- a/lib/communicator/Communicator_mpi.cc +++ /dev/null @@ -1,222 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include -#include - -namespace Grid { - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -MPI_Comm CartesianCommunicator::communicator_world; - -// Should error check all MPI calls. -void CartesianCommunicator::Init(int *argc, char ***argv) { - int flag; - int provided; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); - if ( provided != MPI_THREAD_MULTIPLE ) { - QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; - } - } - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - ShmInitGeneric(); -} - -CartesianCommunicator::~CartesianCommunicator() -{ - int MPI_is_finalised; - MPI_Finalized(&MPI_is_finalised); - if (communicator && !MPI_is_finalised) - MPI_Comm_free(&communicator); -} - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - int myrank = _processor; - int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - MPI_Request xrq; - MPI_Request rrq; - - ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - - assert(ierr==0); - list.push_back(xrq); - list.push_back(rrq); - } else { - // Give the CPU to MPI immediately; can use threads to overlap optionally - ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, - recv,bytes,MPI_CHAR,from, from, - communicator,MPI_STATUS_IGNORE); - assert(ierr==0); - } -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); - } -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - /////////////////////////////////////////////////////// - // Should only be used prior to Grid Init finished. - // Check for this? - /////////////////////////////////////////////////////// -int CartesianCommunicator::RankWorld(void){ - int r; - MPI_Comm_rank(communicator_world,&r); - return r; -} -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - - - -} - diff --git a/lib/communicator/Communicator_mpi3_leader.cc b/lib/communicator/Communicator_mpi3_leader.cc deleted file mode 100644 index 6e26bd3e..00000000 --- a/lib/communicator/Communicator_mpi3_leader.cc +++ /dev/null @@ -1,988 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include "Grid.h" -#include -//#include - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////// -/// Workarounds: -/// i) bloody mac os doesn't implement unnamed semaphores since it is "optional" posix. -/// darwin dispatch semaphores don't seem to be multiprocess. -/// -/// ii) openmpi under --mca shmem posix works with two squadrons per node; -/// openmpi under default mca settings (I think --mca shmem mmap) on MacOS makes two squadrons map the SAME -/// memory as each other, despite their living on different communicators. This appears to be a bug in OpenMPI. -/// -//////////////////////////////////////////////////////////////////////////////////////////////////////////////// -#include -#include -#include -#include -typedef sem_t *Grid_semaphore; - - -#error /*THis is deprecated*/ - -#if 0 -#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED ); -#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED ); -#define SEM_POST(S) assert ( sem_post(S) == 0 ); -#define SEM_WAIT(S) assert ( sem_wait(S) == 0 ); -#else -#define SEM_INIT(S) ; -#define SEM_INIT_EXCL(S) ; -#define SEM_POST(S) ; -#define SEM_WAIT(S) ; -#endif -#include - -namespace Grid { - -enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV }; - -struct Descriptor { - uint64_t buf; - size_t bytes; - int rank; - int tag; - int command; - uint64_t xbuf; - uint64_t rbuf; - int xtag; - int rtag; - int src; - int dest; - MPI_Request request; -}; - -const int pool = 48; - -class SlaveState { -public: - volatile int head; - volatile int start; - volatile int tail; - volatile Descriptor Descrs[pool]; -}; - -class Slave { -public: - Grid_semaphore sem_head; - Grid_semaphore sem_tail; - SlaveState *state; - MPI_Comm squadron; - uint64_t base; - int universe_rank; - int vertical_rank; - char sem_name [NAME_MAX]; - //////////////////////////////////////////////////////////// - // Descriptor circular pointers - //////////////////////////////////////////////////////////// - Slave() {}; - - void Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank); - - void SemInit(void) { - sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - SEM_INIT(sem_head); - sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - SEM_INIT(sem_tail); - } - void SemInitExcl(void) { - sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - SEM_INIT_EXCL(sem_head); - sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - SEM_INIT_EXCL(sem_tail); - } - void WakeUpDMA(void) { - SEM_POST(sem_head); - }; - void WakeUpCompute(void) { - SEM_POST(sem_tail); - }; - void WaitForCommand(void) { - SEM_WAIT(sem_head); - }; - void WaitForComplete(void) { - SEM_WAIT(sem_tail); - }; - void EventLoop (void) { - // std::cout<< " Entering event loop "<head,0,0); - int s=state->start; - if ( s != state->head ) { - _mm_mwait(0,0); - } -#endif - Event(); - } - } - - int Event (void) ; - - uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ; - void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ; - - void WaitAll() { - // std::cout << "Queueing WAIT command "<tail != state->head ); - } -}; - -//////////////////////////////////////////////////////////////////////// -// One instance of a data mover. -// Master and Slave must agree on location in shared memory -//////////////////////////////////////////////////////////////////////// - -class MPIoffloadEngine { -public: - - static std::vector Slaves; - - static int ShmSetup; - - static int UniverseRank; - static int UniverseSize; - - static MPI_Comm communicator_universe; - static MPI_Comm communicator_cached; - - static MPI_Comm HorizontalComm; - static int HorizontalRank; - static int HorizontalSize; - - static MPI_Comm VerticalComm; - static MPI_Win VerticalWindow; - static int VerticalSize; - static int VerticalRank; - - static std::vector VerticalShmBufs; - static std::vector > UniverseRanks; - static std::vector UserCommunicatorToWorldRanks; - - static MPI_Group WorldGroup, CachedGroup; - - static void CommunicatorInit (MPI_Comm &communicator_world, - MPI_Comm &ShmComm, - void * &ShmCommBuf); - - static void MapCommRankToWorldRank(int &hashtag, int & comm_world_peer,int tag, MPI_Comm comm,int commrank); - - ///////////////////////////////////////////////////////// - // routines for master proc must handle any communicator - ///////////////////////////////////////////////////////// - - static void QueueSend(int slave,void *buf, int bytes, int tag, MPI_Comm comm,int rank) { - // std::cout<< " Queueing send "<< bytes<< " slave "<< slave << " to comm "<= units ) { - mywork = myoff = 0; - } else { - mywork = (nwork+me)/units; - myoff = basework * me; - if ( me > backfill ) - myoff+= (me-backfill); - } - return; - }; - - static void QueueRoundRobinSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) { - uint8_t * cxbuf = (uint8_t *) xbuf; - uint8_t * crbuf = (uint8_t *) rbuf; - static int rrp=0; - int procs = VerticalSize-1; - int myoff=0; - int mywork=bytes; - QueueSendRecv(rrp+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src); - rrp = rrp+1; - if ( rrp == (VerticalSize-1) ) rrp = 0; - } - - static void QueueMultiplexedSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) { - uint8_t * cxbuf = (uint8_t *) xbuf; - uint8_t * crbuf = (uint8_t *) rbuf; - int mywork, myoff, procs; - procs = VerticalSize-1; - for(int s=0;s MPIoffloadEngine::Slaves; - -int MPIoffloadEngine::UniverseRank; -int MPIoffloadEngine::UniverseSize; - -MPI_Comm MPIoffloadEngine::communicator_universe; -MPI_Comm MPIoffloadEngine::communicator_cached; -MPI_Group MPIoffloadEngine::WorldGroup; -MPI_Group MPIoffloadEngine::CachedGroup; - -MPI_Comm MPIoffloadEngine::HorizontalComm; -int MPIoffloadEngine::HorizontalRank; -int MPIoffloadEngine::HorizontalSize; - -MPI_Comm MPIoffloadEngine::VerticalComm; -int MPIoffloadEngine::VerticalSize; -int MPIoffloadEngine::VerticalRank; -MPI_Win MPIoffloadEngine::VerticalWindow; -std::vector MPIoffloadEngine::VerticalShmBufs; -std::vector > MPIoffloadEngine::UniverseRanks; -std::vector MPIoffloadEngine::UserCommunicatorToWorldRanks; - -int CartesianCommunicator::NodeCount(void) { return HorizontalSize;}; -int MPIoffloadEngine::ShmSetup = 0; - -void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world, - MPI_Comm &ShmComm, - void * &ShmCommBuf) -{ - int flag; - assert(ShmSetup==0); - - ////////////////////////////////////////////////////////////////////// - // Universe is all nodes prior to squadron grouping - ////////////////////////////////////////////////////////////////////// - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_universe); - MPI_Comm_rank(communicator_universe,&UniverseRank); - MPI_Comm_size(communicator_universe,&UniverseSize); - - ///////////////////////////////////////////////////////////////////// - // Split into groups that can share memory (Verticals) - ///////////////////////////////////////////////////////////////////// -#undef MPI_SHARED_MEM_DEBUG -#ifdef MPI_SHARED_MEM_DEBUG - MPI_Comm_split(communicator_universe,(UniverseRank/4),UniverseRank,&VerticalComm); -#else - MPI_Comm_split_type(communicator_universe, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&VerticalComm); -#endif - MPI_Comm_rank(VerticalComm ,&VerticalRank); - MPI_Comm_size(VerticalComm ,&VerticalSize); - - ////////////////////////////////////////////////////////////////////// - // Split into horizontal groups by rank in squadron - ////////////////////////////////////////////////////////////////////// - MPI_Comm_split(communicator_universe,VerticalRank,UniverseRank,&HorizontalComm); - MPI_Comm_rank(HorizontalComm,&HorizontalRank); - MPI_Comm_size(HorizontalComm,&HorizontalSize); - assert(HorizontalSize*VerticalSize==UniverseSize); - - //////////////////////////////////////////////////////////////////////////////// - // What is my place in the world - //////////////////////////////////////////////////////////////////////////////// - int WorldRank=0; - if(VerticalRank==0) WorldRank = HorizontalRank; - int ierr=MPI_Allreduce(MPI_IN_PLACE,&WorldRank,1,MPI_INT,MPI_SUM,VerticalComm); - assert(ierr==0); - - //////////////////////////////////////////////////////////////////////////////// - // Where is the world in the universe? - //////////////////////////////////////////////////////////////////////////////// - UniverseRanks = std::vector >(HorizontalSize,std::vector(VerticalSize,0)); - UniverseRanks[WorldRank][VerticalRank] = UniverseRank; - for(int w=0;w0 ) size = sizeof(SlaveState); - - sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); - - shm_unlink(shm_name); - - int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); - if ( fd < 0 ) { - perror("failed shm_open"); - assert(0); - } - - ftruncate(fd, size); - - VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if ( VerticalShmBufs[r] == MAP_FAILED ) { - perror("failed mmap"); - assert(0); - } - - /* - for(uint64_t page=0;page0 ) size = sizeof(SlaveState); - - sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); - - int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); - if ( fd<0 ) { - perror("failed shm_open"); - assert(0); - } - VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - - uint64_t * check = (uint64_t *) VerticalShmBufs[r]; - assert(check[0]== WorldRank); - assert(check[1]== r); - // std::cerr<<"SHM "<"<"< cached_ranks(size); - - for(int r=0;r"<>0 )&0xFFFF)^((icomm>>16)&0xFFFF) - ^ ((icomm>>32)&0xFFFF)^((icomm>>48)&0xFFFF); - - // hashtag = (comm_hash<<15) | tag; - hashtag = tag; - -}; - -void Slave::Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank) -{ - squadron=_squadron; - universe_rank=_universe_rank; - vertical_rank=_vertical_rank; - state =_state; - // std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<head = state->tail = state->start = 0; - base = (uint64_t)MPIoffloadEngine::VerticalShmBufs[0]; - int rank; MPI_Comm_rank(_squadron,&rank); -} -#define PERI_PLUS(A) ( (A+1)%pool ) -int Slave::Event (void) { - - static int tail_last; - static int head_last; - static int start_last; - int ierr; - MPI_Status stat; - static int i=0; - - //////////////////////////////////////////////////// - // Try to advance the start pointers - //////////////////////////////////////////////////// - int s=state->start; - if ( s != state->head ) { - switch ( state->Descrs[s].command ) { - case COMMAND_ISEND: - ierr = MPI_Isend((void *)(state->Descrs[s].buf+base), - state->Descrs[s].bytes, - MPI_CHAR, - state->Descrs[s].rank, - state->Descrs[s].tag, - MPIoffloadEngine::communicator_universe, - (MPI_Request *)&state->Descrs[s].request); - assert(ierr==0); - state->start = PERI_PLUS(s); - return 1; - break; - - case COMMAND_IRECV: - ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base), - state->Descrs[s].bytes, - MPI_CHAR, - state->Descrs[s].rank, - state->Descrs[s].tag, - MPIoffloadEngine::communicator_universe, - (MPI_Request *)&state->Descrs[s].request); - - // std::cout<< " Request is "<Descrs[s].request<Descrs[0].request<start = PERI_PLUS(s); - return 1; - break; - - case COMMAND_SENDRECV: - - // fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10); - - ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10, - (void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10, - MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE); - - assert(ierr==0); - - // fprintf(stderr,"Sendrecv done %d %d\n",ierr,i); - // MPI_Barrier(MPIoffloadEngine::HorizontalComm); - // fprintf(stderr,"Barrier\n"); - i++; - - state->start = PERI_PLUS(s); - - return 1; - break; - - case COMMAND_WAITALL: - - for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){ - if ( state->Descrs[t].command != COMMAND_SENDRECV ) { - MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE); - } - }; - s=PERI_PLUS(s); - state->start = s; - state->tail = s; - - WakeUpCompute(); - - return 1; - break; - - default: - assert(0); - break; - } - } - return 0; -} - ////////////////////////////////////////////////////////////////////////////// - // External interaction with the queue - ////////////////////////////////////////////////////////////////////////////// - -void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) -{ - int head =state->head; - int next = PERI_PLUS(head); - - // Set up descriptor - int worldrank; - int hashtag; - MPI_Comm communicator; - MPI_Request request; - uint64_t relative; - - relative = (uint64_t)xbuf - base; - state->Descrs[head].xbuf = relative; - - relative= (uint64_t)rbuf - base; - state->Descrs[head].rbuf = relative; - - state->Descrs[head].bytes = bytes; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest); - state->Descrs[head].dest = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].xtag = hashtag; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src); - state->Descrs[head].src = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].rtag = hashtag; - - state->Descrs[head].command= COMMAND_SENDRECV; - - // Block until FIFO has space - while( state->tail==next ); - - // Msync on weak order architectures - - // Advance pointer - state->head = next; - -}; -uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank) -{ - ///////////////////////////////////////// - // Spin; if FIFO is full until not full - ///////////////////////////////////////// - int head =state->head; - int next = PERI_PLUS(head); - - // Set up descriptor - int worldrank; - int hashtag; - MPI_Comm communicator; - MPI_Request request; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,tag,comm,commrank); - - uint64_t relative= (uint64_t)buf - base; - state->Descrs[head].buf = relative; - state->Descrs[head].bytes = bytes; - state->Descrs[head].rank = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].tag = hashtag; - state->Descrs[head].command= command; - - /* - if ( command == COMMAND_ISEND ) { - std::cout << "QueueSend from "<< universe_rank <<" to commrank " << commrank - << " to worldrank " << worldrank <tail==next ); - - // Msync on weak order architectures - // Advance pointer - state->head = next; - - return 0; -} - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// - -MPI_Comm CartesianCommunicator::communicator_world; - -void CartesianCommunicator::Init(int *argc, char ***argv) -{ - int flag; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init(argc,argv); - } - communicator_world = MPI_COMM_WORLD; - MPI_Comm ShmComm; - MPIoffloadEngine::CommunicatorInit (communicator_world,ShmComm,ShmCommBuf); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -CartesianCommunicator::CartesianCommunicator(const std::vector &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - int Size; - MPI_Comm_size(communicator_world,&Size); - assert(Size==_Nprocessors); - - _processor_coor.resize(_ndimension); - MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); - MPI_Comm_rank (communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); -}; - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - MPI_Request xrq; - MPI_Request rrq; - int rank = _processor; - int ierr; - ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - - assert(ierr==0); - - list.push_back(xrq); - list.push_back(rrq); -} - -void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - uint64_t xmit_i = (uint64_t) xmit; - uint64_t recv_i = (uint64_t) recv; - uint64_t shm = (uint64_t) ShmCommBuf; - // assert xmit and recv lie in shared memory region - assert( (xmit_i >= shm) && (xmit_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); - assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); - assert(from!=_processor); - assert(dest!=_processor); - - MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); - - //MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); - - //MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest); - //MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from); -} - -void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &list) -{ - MPIoffloadEngine::WaitAll(); - //this->Barrier(); -} - -void CartesianCommunicator::StencilBarrier(void) { } - -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - -void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; } - -void *CartesianCommunicator::ShmBuffer(int rank) { - return NULL; -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { - return NULL; -} - - -}; - diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc deleted file mode 100644 index bceea0d8..00000000 --- a/lib/communicator/Communicator_mpit.cc +++ /dev/null @@ -1,273 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include -#include - -namespace Grid { - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -MPI_Comm CartesianCommunicator::communicator_world; - -// Should error check all MPI calls. -void CartesianCommunicator::Init(int *argc, char ***argv) { - int flag; - int provided; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); - if ( provided != MPI_THREAD_MULTIPLE ) { - QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; - } - } - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - ShmInitGeneric(); -} - -CartesianCommunicator::~CartesianCommunicator() -{ - int MPI_is_finalised; - MPI_Finalized(&MPI_is_finalised); - if (communicator && !MPI_is_finalised){ - MPI_Comm_free(&communicator); - for(int i=0;i< communicator_halo.size();i++){ - MPI_Comm_free(&communicator_halo[i]); - } - } -} - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - int myrank = _processor; - int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - MPI_Request xrq; - MPI_Request rrq; - - ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - - assert(ierr==0); - list.push_back(xrq); - list.push_back(rrq); - } else { - // Give the CPU to MPI immediately; can use threads to overlap optionally - ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, - recv,bytes,MPI_CHAR,from, from, - communicator,MPI_STATUS_IGNORE); - assert(ierr==0); - } -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); - } -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - /////////////////////////////////////////////////////// - // Should only be used prior to Grid Init finished. - // Check for this? - /////////////////////////////////////////////////////// -int CartesianCommunicator::RankWorld(void){ - int r; - MPI_Comm_rank(communicator_world,&r); - return r; -} -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - -double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes,int dir) -{ - int myrank = _processor; - int ierr; - int ncomm =communicator_halo.size(); - int commdir=dir%ncomm; - - // std::cout << " sending on communicator "< &waitall,int dir) -{ - int nreq=waitall.size(); - MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE); -} -double CartesianCommunicator::StencilSendToRecvFrom(void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes,int dir) -{ - int myrank = _processor; - int ierr; - // std::cout << " sending on communicator "< - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include - -namespace Grid { - - // Should error check all MPI calls. -#define SHMEM_VET(addr) - -#define SHMEM_VET_DEBUG(addr) { \ - if ( ! shmem_addr_accessible(addr,_processor) ) {\ - std::fprintf(stderr,"%d Inaccessible shmem address %lx %s %s\n",_processor,addr,__FUNCTION__,#addr); \ - BACKTRACEFILE(); \ - }\ -} - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// - -typedef struct HandShake_t { - uint64_t seq_local; - uint64_t seq_remote; -} HandShake; - -std::array make_psync_init(void) { - std::array ret; - ret.fill(SHMEM_SYNC_VALUE); - return ret; -} -static std::array psync_init = make_psync_init(); - -static Vector< HandShake > XConnections; -static Vector< HandShake > RConnections; - -void CartesianCommunicator::Init(int *argc, char ***argv) { - shmem_init(); - XConnections.resize(shmem_n_pes()); - RConnections.resize(shmem_n_pes()); - for(int pe =0 ; pe &processors,const CartesianCommunicator &parent) - : CartesianCommunicator(processors) -{ - std::cout << "Attempts to split SHMEM communicators will fail " < &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - _processor_coor.resize(_ndimension); - - _processor = shmem_my_pe(); - - Lexicographic::CoorFromIndex(_processor_coor,_processor,_processors); - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - int Size = shmem_n_pes(); - - - assert(Size==_Nprocessors); -} - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - static long long source ; - static long long dest ; - static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - // int nreduce=1; - // int pestart=0; - // int logStride=0; - - source = u; - dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); // necessary? - u = dest; -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - static long long source ; - static long long dest ; - static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - // int nreduce=1; - // int pestart=0; - // int logStride=0; - - source = u; - dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); // necessary? - u = dest; -} -void CartesianCommunicator::GlobalSum(float &f){ - static float source ; - static float dest ; - static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - source = f; - dest =0.0; - shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - f = dest; -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - static float source ; - static float dest = 0 ; - static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - if ( shmem_addr_accessible(f,_processor) ){ - shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - return; - } - - for(int i=0;i psync = psync_init; - - source = d; - dest = 0; - shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - d = dest; -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - static double source ; - static double dest ; - static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - - if ( shmem_addr_accessible(d,_processor) ){ - shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - return; - } - - for(int i=0;i coor = _processor_coor; - - assert(std::abs(shift) <_processors[dim]); - - coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,source,_processors); - - coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,dest,_processors); - -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - Lexicographic::IndexFromCoor(coor,rank,_processors); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - Lexicographic::CoorFromIndex(coor,rank,_processors); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - SHMEM_VET(xmit); - SHMEM_VET(recv); - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - static uint64_t seq; - - assert(recv!=xmit); - volatile HandShake *RecvSeq = (volatile HandShake *) & RConnections[sender]; - volatile HandShake *SendSeq = (volatile HandShake *) & XConnections[receiver]; - - if ( _processor == sender ) { - - // Check he has posted a receive - while(SendSeq->seq_remote == SendSeq->seq_local); - - // Advance our send count - seq = ++(SendSeq->seq_local); - - // Send this packet - SHMEM_VET(recv); - shmem_putmem(recv,xmit,bytes,receiver); - shmem_fence(); - - //Notify him we're done - shmem_putmem((void *)&(RecvSeq->seq_remote),&seq,sizeof(seq),receiver); - shmem_fence(); - } - if ( _processor == receiver ) { - - // Post a receive - seq = ++(RecvSeq->seq_local); - shmem_putmem((void *)&(SendSeq->seq_remote),&seq,sizeof(seq),sender); - - // Now wait until he has advanced our reception counter - while(RecvSeq->seq_remote != RecvSeq->seq_local); - - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - SHMEM_VET(xmit); - SHMEM_VET(recv); - // shmem_putmem_nb(recv,xmit,bytes,dest,NULL); - shmem_putmem(recv,xmit,bytes,dest); - - if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all(); -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - // shmem_quiet(); // I'm done - if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too -} -void CartesianCommunicator::Barrier(void) -{ - shmem_barrier_all(); -} -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - static std::array psync = psync_init; - static uint32_t word; - uint32_t *array = (uint32_t *) data; - assert( (bytes % 4)==0); - int words = bytes/4; - - if ( shmem_addr_accessible(data,_processor) ){ - shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data()); - return; - } - - for(int w=0;w psync = psync_init; - static uint32_t word; - uint32_t *array = (uint32_t *) data; - assert( (bytes % 4)==0); - int words = bytes/4; - - for(int w=0;w + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +// static data + +uint64_t GlobalSharedMemory::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL; +int GlobalSharedMemory::Hugepages = 0; +int GlobalSharedMemory::ShmSetup; + +std::vector GlobalSharedMemory::WorldShmCommBufs; + +Grid_MPI_Comm GlobalSharedMemory::WorldShmComm; +int GlobalSharedMemory::WorldShmRank; +int GlobalSharedMemory::WorldShmSize; +std::vector GlobalSharedMemory::WorldShmRanks; + +Grid_MPI_Comm GlobalSharedMemory::WorldComm; +int GlobalSharedMemory::WorldSize; +int GlobalSharedMemory::WorldRank; + +int GlobalSharedMemory::WorldNodes; +int GlobalSharedMemory::WorldNode; + + +} diff --git a/lib/communicator/SharedMemory.h b/lib/communicator/SharedMemory.h new file mode 100644 index 00000000..2bb112e5 --- /dev/null +++ b/lib/communicator/SharedMemory.h @@ -0,0 +1,158 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/communicator/SharedMemory.cc + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + + +// TODO +// 1) move includes into SharedMemory.cc +// +// 2) split shared memory into a) optimal communicator creation from comm world +// +// b) shared memory buffers container +// -- static globally shared; init once +// -- per instance set of buffers. +// + +#pragma once + +#include + +#if defined (GRID_COMMS_MPI3) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_NUMAIF_H +#include +#endif + +namespace Grid { + +#if defined (GRID_COMMS_MPI3) + typedef MPI_Comm Grid_MPI_Comm; + typedef MPI_Request CommsRequest_t; +#else + typedef int CommsRequest_t; + typedef int Grid_MPI_Comm; +#endif + +class GlobalSharedMemory { + private: + // Init once lock on the buffer allocation + static int ShmSetup; + static const int MAXLOG2RANKSPERNODE = 16; + + public: + static uint64_t MAX_MPI_SHM_BYTES; + static int Hugepages; + + static std::vector WorldShmCommBufs; + + static Grid_MPI_Comm WorldComm; + static int WorldRank; + static int WorldSize; + + static Grid_MPI_Comm WorldShmComm; + static int WorldShmRank; + static int WorldShmSize; + + static int WorldNodes; + static int WorldNode; + + static std::vector WorldShmRanks; + + ////////////////////////////////////////////////////////////////////////////////////// + // Create an optimal reordered communicator that makes MPI_Cart_create get it right + ////////////////////////////////////////////////////////////////////////////////////// + static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD + static void OptimalCommunicator(const std::vector &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian + /////////////////////////////////////////////////// + // Provide shared memory facilities off comm world + /////////////////////////////////////////////////// + static void SharedMemoryAllocate(uint64_t bytes, int flags); + static void SharedMemoryFree(void); + +}; + +////////////////////////////// +// one per communicator +////////////////////////////// +class SharedMemory +{ + private: + static const int MAXLOG2RANKSPERNODE = 16; + + size_t heap_top; + size_t heap_bytes; + size_t heap_size; + + protected: + + Grid_MPI_Comm ShmComm; // for barriers + int ShmRank; + int ShmSize; + std::vector ShmCommBufs; + std::vector ShmRanks;// Mapping comm ranks to Shm ranks + + public: + SharedMemory() {}; + /////////////////////////////////////////////////////////////////////////////////////// + // set the buffers & sizes + /////////////////////////////////////////////////////////////////////////////////////// + void SetCommunicator(Grid_MPI_Comm comm); + + //////////////////////////////////////////////////////////////////////// + // For this instance ; disjoint buffer sets between splits if split grid + //////////////////////////////////////////////////////////////////////// + void ShmBarrier(void); + + /////////////////////////////////////////////////// + // Call on any instance + /////////////////////////////////////////////////// + void SharedMemoryTest(void); + void *ShmBufferSelf(void); + void *ShmBuffer (int rank); + void *ShmBufferTranslate(int rank,void * local_p); + void *ShmBufferMalloc(size_t bytes); + void ShmBufferFreeAll(void) ; + + ////////////////////////////////////////////////////////////////////////// + // Make info on Nodes & ranks and Shared memory available + ////////////////////////////////////////////////////////////////////////// + int NodeCount(void) { return GlobalSharedMemory::WorldNodes;}; + int RankCount(void) { return GlobalSharedMemory::WorldSize;}; + +}; + +} diff --git a/lib/communicator/SharedMemoryMPI.cc b/lib/communicator/SharedMemoryMPI.cc new file mode 100644 index 00000000..af4f9702 --- /dev/null +++ b/lib/communicator/SharedMemoryMPI.cc @@ -0,0 +1,415 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/communicator/SharedMemory.cc + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +/*Construct from an MPI communicator*/ +void GlobalSharedMemory::Init(Grid_MPI_Comm comm) +{ + WorldComm = comm; + MPI_Comm_rank(WorldComm,&WorldRank); + MPI_Comm_size(WorldComm,&WorldSize); + // WorldComm, WorldSize, WorldRank + + ///////////////////////////////////////////////////////////////////// + // Split into groups that can share memory + ///////////////////////////////////////////////////////////////////// + MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm); + MPI_Comm_rank(WorldShmComm ,&WorldShmRank); + MPI_Comm_size(WorldShmComm ,&WorldShmSize); + // WorldShmComm, WorldShmSize, WorldShmRank + + // WorldNodes + WorldNodes = WorldSize/WorldShmSize; + assert( (WorldNodes * WorldShmSize) == WorldSize ); + + // FIXME: Check all WorldShmSize are the same ? + + ///////////////////////////////////////////////////////////////////// + // find world ranks in our SHM group (i.e. which ranks are on our node) + ///////////////////////////////////////////////////////////////////// + MPI_Group WorldGroup, ShmGroup; + MPI_Comm_group (WorldComm, &WorldGroup); + MPI_Comm_group (WorldShmComm, &ShmGroup); + + std::vector world_ranks(WorldSize); for(int r=0;r MyGroup; + MyGroup.resize(WorldShmSize); + for(int rank=0;rank()); + int myleader = MyGroup[0]; + + std::vector leaders_1hot(WorldSize,0); + std::vector leaders_group(WorldNodes,0); + leaders_1hot [ myleader ] = 1; + + /////////////////////////////////////////////////////////////////// + // global sum leaders over comm world + /////////////////////////////////////////////////////////////////// + int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,WorldComm); + assert(ierr==0); + + /////////////////////////////////////////////////////////////////// + // find the group leaders world rank + /////////////////////////////////////////////////////////////////// + int group=0; + for(int l=0;l &processors,Grid_MPI_Comm & optimal_comm) +{ + //////////////////////////////////////////////////////////////// + // Assert power of two shm_size. + //////////////////////////////////////////////////////////////// + int log2size = -1; + for(int i=0;i<=MAXLOG2RANKSPERNODE;i++){ + if ( (0x1< processor_coor(ndimension); + std::vector WorldDims = processors; std::vector ShmDims (ndimension,1); std::vector NodeDims (ndimension); + std::vector ShmCoor (ndimension); std::vector NodeCoor (ndimension); std::vector WorldCoor(ndimension); + int dim = 0; + for(int l2=0;l2 ranks(size); for(int r=0;r= heap_size) { + std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +/*Construct from an MPI communicator*/ +void GlobalSharedMemory::Init(Grid_MPI_Comm comm) +{ + WorldComm = 0; + WorldRank = 0; + WorldSize = 1; + WorldShmComm = 0 ; + WorldShmRank = 0 ; + WorldShmSize = 1 ; + WorldNodes = 1 ; + WorldNode = 0 ; + WorldShmRanks.resize(WorldSize); WorldShmRanks[0] = 0; + WorldShmCommBufs.resize(1); +} + +void GlobalSharedMemory::OptimalCommunicator(const std::vector &processors,Grid_MPI_Comm & optimal_comm) +{ + optimal_comm = WorldComm; +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// Hugetlbfs mapping intended, use anonymous mmap +//////////////////////////////////////////////////////////////////////////////////////////// +void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) +{ + void * ShmCommBuf ; + MAX_MPI_SHM_BYTES=bytes; + int mmap_flag =0; +#ifdef MAP_ANONYMOUS + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; +#endif +#ifdef MAP_ANON + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; +#endif +#ifdef MAP_HUGETLB + if ( flags ) mmap_flag |= MAP_HUGETLB; +#endif + ShmCommBuf =(void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); + if (ShmCommBuf == (void *)MAP_FAILED) { + perror("mmap failed "); + exit(EXIT_FAILURE); + } +#ifdef MADV_HUGEPAGE + if (!Hugepages ) madvise(ShmCommBuf,bytes,MADV_HUGEPAGE); +#endif + bzero(ShmCommBuf,bytes); + WorldShmCommBufs[0] = ShmCommBuf; +}; + +void GlobalSharedMemory::SharedMemoryFree(void) +{ + assert(ShmSetup); + assert(0); // unimplemented +} + + //////////////////////////////////////////////////////// + // Global shared functionality finished + // Now move to per communicator functionality + //////////////////////////////////////////////////////// +void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) +{ + ShmRanks.resize(1); + ShmCommBufs.resize(1); + ShmRanks[0] = 0; + ShmRank = 0; + ShmSize = 1; + ////////////////////////////////////////////////////////////////////// + // Map ShmRank to WorldShmRank and use the right buffer + ////////////////////////////////////////////////////////////////////// + ShmCommBufs[0] = GlobalSharedMemory::WorldShmCommBufs[0]; + heap_size = GlobalSharedMemory::MAX_MPI_SHM_BYTES; + ShmBufferFreeAll(); + return; +} +////////////////////////////////////////////////////////////////// +// On node barrier +////////////////////////////////////////////////////////////////// +void SharedMemory::ShmBarrier(void){ return ; } + +////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Test the shared memory is working +////////////////////////////////////////////////////////////////////////////////////////////////////////// +void SharedMemory::SharedMemoryTest(void) { return; } + +void *SharedMemory::ShmBufferSelf(void) +{ + return ShmCommBufs[ShmRank]; +} +void *SharedMemory::ShmBuffer(int rank) +{ + return NULL; +} +void *SharedMemory::ShmBufferTranslate(int rank,void * local_p) +{ + return NULL; +} + +///////////////////////////////// +// Alloc, free shmem region ; common to MPI and none? +///////////////////////////////// +void *SharedMemory::ShmBufferMalloc(size_t bytes){ + void *ptr = (void *)heap_top; + heap_top += bytes; + heap_bytes+= bytes; + if (heap_bytes >= heap_size) { + std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < Date: Mon, 8 Jan 2018 11:36:39 +0000 Subject: [PATCH 138/145] Synthetic test of lanczos --- ..._dwf_compressed_lanczos_reorg_synthetic.cc | 330 ++++++++++++++++++ 1 file changed, 330 insertions(+) create mode 100644 tests/lanczos/Test_dwf_compressed_lanczos_reorg_synthetic.cc diff --git a/tests/lanczos/Test_dwf_compressed_lanczos_reorg_synthetic.cc b/tests/lanczos/Test_dwf_compressed_lanczos_reorg_synthetic.cc new file mode 100644 index 00000000..132dff4e --- /dev/null +++ b/tests/lanczos/Test_dwf_compressed_lanczos_reorg_synthetic.cc @@ -0,0 +1,330 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_dwf_compressed_lanczos_reorg.cc + + Copyright (C) 2017 + +Author: Leans heavily on Christoph Lehner's code +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +/* + * Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features + * in Grid that were intended to be used to support blocked Aggregates, from + */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +class ProjectedHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedHermOp(LinearOperatorBase& linop, Aggregation &aggregate) : + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + FineField fin(FineGrid); + FineField fout(FineGrid); + + _Aggregate.PromoteFromSubspace(in,fin); + _Linop.HermOp(fin,fout); + _Aggregate.ProjectToSubspace(out,fout); + } +}; + +template +class ProjectedFunctionHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, + Aggregation &aggregate) : + _poly(poly), + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + + FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; + FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; + + _Aggregate.PromoteFromSubspace(in,fin); + _poly(_Linop,fin,fout); + _Aggregate.ProjectToSubspace(out,fout); + } +}; + +// Make serializable Lanczos params + +template +class CoarseFineIRL +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice CoarseField; + typedef Lattice FineField; + +private: + GridBase *_CoarseGrid; + GridBase *_FineGrid; + int _checkerboard; + LinearOperatorBase & _FineOp; + Aggregation _Aggregate; + +public: + CoarseFineIRL(GridBase *FineGrid, + GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) : + _CoarseGrid(CoarseGrid), + _FineGrid(FineGrid), + _Aggregate(CoarseGrid,FineGrid,checkerboard), + _FineOp(FineOp), + _checkerboard(checkerboard) + {}; + + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = ::sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void testFine(void) + { + int Nk = nbasis; + _Aggregate.subspace.resize(Nk,_FineGrid); + _Aggregate.subspace[0]=1.0; + _Aggregate.subspace[0].checkerboard=_checkerboard; + normalise(_Aggregate.subspace[0]); + PlainHermOp Op(_FineOp); + for(int k=1;k Cheby(alpha,beta,Npoly); + FunctionHermOp ChebyOp(Cheby,_FineOp); + PlainHermOp Op(_FineOp); + + int Nk = nbasis; + + std::vector eval(Nm); + + FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; + + ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nk,Nk,Nm,resid,MaxIt,betastp,MinRes); + _Aggregate.subspace.resize(Nm,_FineGrid); + IRL.calc(eval,_Aggregate.subspace,src,Nk,false); + _Aggregate.subspace.resize(Nk,_FineGrid); + for(int k=0;k Cheby(alpha,beta,Npoly); + ProjectedHermOp Op(_FineOp,_Aggregate); + ProjectedFunctionHermOp ChebyOp(Cheby,_FineOp,_Aggregate); + + std::vector eval(Nm); + std::vector evec(Nm,_CoarseGrid); + + CoarseField src(_CoarseGrid); src=1.0; + + ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,Nk,Nk,Nm,resid,MaxIt,betastp,MinRes); + IRL.calc(eval,evec,src,Nk,false); + + // We got the evalues of the Cheby operator; + // Reconstruct eigenvalues of original operator via Chebyshev inverse + for (int i=0;i, blockSize, + std::string, config, + std::vector < std::complex >, omega, + RealD, mass, + RealD, M5 + ); +}; + +int main (int argc, char ** argv) { + + Grid_init(&argc,&argv); + + CompressedLanczosParams Params; + { + Params.omega.resize(10); + Params.blockSize.resize(5); + XmlWriter writer("Params_template.xml"); + write(writer,"Params",Params); + std::cout << GridLogMessage << " Written Params_template.xml" < blockSize = Params.blockSize; + + // Grids + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector fineLatt = GridDefaultLatt(); + int dims=fineLatt.size(); + assert(blockSize.size()==dims+1); + std::vector coarseLatt(dims); + std::vector coarseLatt5d ; + + for (int d=0;d seeds4({1,2,3,4}); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + SU3::HotConfiguration(RNG4, Umu); + } + std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt() << " Ls: " << Ls << std::endl; + + // ZMobius EO Operator + ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, Params.omega,1.,0.); + SchurDiagTwoOperator HermOp(Ddwf); + + // Eigenvector storage + LanczosParams fine =Params.FineParams; + LanczosParams coarse=Params.CoarseParams; + const int Nm1 = fine.Nm; + const int Nm2 = coarse.Nm; + + std::cout << GridLogMessage << "Keep " << fine.Nk << " full vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << coarse.Nk << " total vectors" << std::endl; + assert(Nm2 >= Nm1); + + const int nbasis= 70; + CoarseFineIRL IRL(FrbGrid,CoarseGrid5rb,HermOp,Odd); + + std::cout << GridLogMessage << "Constructed CoarseFine IRL" << std::endl; + + std::cout << GridLogMessage << "Performing fine grid IRL Nk "<< nbasis<<" Nm "< Date: Mon, 8 Jan 2018 14:06:53 +0000 Subject: [PATCH 139/145] Clean up --- lib/communicator/Communicator_base.h | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index a9b99c17..359846c9 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -184,15 +184,10 @@ public: template void AllToAll(int dim,std::vector &in, std::vector &out){ assert(dim>=0); assert(dim<_ndimension); - int numnode = _processors[dim]; - // std::cerr << " AllToAll in.size() "< Date: Mon, 8 Jan 2018 15:20:26 +0000 Subject: [PATCH 140/145] Allow resize of the shared memory buffers --- TODO | 28 +++++++++++---- lib/communicator/SharedMemory.cc | 40 ++++++++++++++++++++- lib/communicator/SharedMemory.h | 10 ++++-- lib/communicator/SharedMemoryMPI.cc | 52 +++++++++------------------- lib/communicator/SharedMemoryNone.cc | 40 +++++---------------- 5 files changed, 93 insertions(+), 77 deletions(-) diff --git a/TODO b/TODO index 95ccf1df..746302ca 100644 --- a/TODO +++ b/TODO @@ -1,16 +1,32 @@ TODO: --------------- -Large item work list: +Code item work list + +a) namespaces & indentation + GRID_BEGIN_NAMESPACE(); + GRID_END_NAMESPACE(); +-- delete QCD namespace + +b) GPU branch +- start branch +- Increase Macro use in core library support; prepare for change +- Audit volume of "device" code +- Virtual function audit +- Start port once Nvidia box is up +- Cut down volume of code for first port? How? + +Physics item work list: 1)- BG/Q port and check ; Andrew says ok. -3a)- RNG I/O in ILDG/SciDAC (minor) -3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet -4)- Physical propagator interface -6)- Multigrid Wilson and DWF, compare to other Multigrid implementations -7)- HDCR resume +2)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet +3)- Physical propagator interface +4)- Multigrid Wilson and DWF, compare to other Multigrid implementations +5)- HDCR resume + ---------------------------- Recent DONE +-- RNG I/O in ILDG/SciDAC (minor) -- Precision conversion and sort out localConvert <-- partial/easy -- Conserved currents (Andrew) -- Split grid diff --git a/lib/communicator/SharedMemory.cc b/lib/communicator/SharedMemory.cc index f9d5e5bc..4682d420 100644 --- a/lib/communicator/SharedMemory.cc +++ b/lib/communicator/SharedMemory.cc @@ -34,7 +34,9 @@ namespace Grid { uint64_t GlobalSharedMemory::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL; int GlobalSharedMemory::Hugepages = 0; -int GlobalSharedMemory::ShmSetup; +int GlobalSharedMemory::_ShmSetup; +int GlobalSharedMemory::_ShmAlloc; +uint64_t GlobalSharedMemory::_ShmAllocBytes; std::vector GlobalSharedMemory::WorldShmCommBufs; @@ -50,5 +52,41 @@ int GlobalSharedMemory::WorldRank; int GlobalSharedMemory::WorldNodes; int GlobalSharedMemory::WorldNode; +void GlobalSharedMemory::SharedMemoryFree(void) +{ + assert(_ShmAlloc); + assert(_ShmAllocBytes>0); + for(int r=0;r= heap_size) { + std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < &processors,Grid_MPI_Comm & optimal_comm) @@ -180,8 +182,8 @@ void GlobalSharedMemory::OptimalCommunicator(const std::vector &processors, #ifdef GRID_MPI3_SHMMMAP void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) { - GlobalSharedMemory::MAX_MPI_SHM_BYTES = bytes; - assert(ShmSetup==0); ShmSetup=1; + assert(_ShmSetup==1); + assert(_ShmAlloc==0); ////////////////////////////////////////////////////////////////////////////////////////////////////////// // allocate the shared windows for our group ////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -214,8 +216,11 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) perror("failed mmap"); assert(0); } assert(((uint64_t)ptr&0x3F)==0); + close(fd); WorldShmCommBufs[r] =ptr; } + _ShmAlloc=1; + _ShmAllocBytes = bytes; }; #endif // MMAP @@ -227,8 +232,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) //////////////////////////////////////////////////////////////////////////////////////////// void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) { - GlobalSharedMemory::MAX_MPI_SHM_BYTES = bytes; - assert(ShmSetup==0); ShmSetup=1; + assert(_ShmSetup==1); + assert(_ShmAlloc==0); MPI_Barrier(WorldShmComm); WorldShmCommBufs.resize(WorldShmSize); @@ -258,6 +263,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) assert(((uint64_t)ptr&0x3F)==0); WorldShmCommBufs[r] =ptr; + close(fd); } } @@ -277,17 +283,15 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); } assert(((uint64_t)ptr&0x3F)==0); WorldShmCommBufs[r] =ptr; + + close(fd); } } + _ShmAlloc=1; + _ShmAllocBytes = bytes; } #endif -void GlobalSharedMemory::SharedMemoryFree(void) -{ - assert(ShmSetup); - assert(0); // unimplemented -} - //////////////////////////////////////////////////////// // Global shared functionality finished // Now move to per communicator functionality @@ -310,7 +314,8 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) ////////////////////////////////////////////////////////////////////// // Map ShmRank to WorldShmRank and use the right buffer ////////////////////////////////////////////////////////////////////// - heap_size = GlobalSharedMemory::MAX_MPI_SHM_BYTES; + assert (GlobalSharedMemory::ShmAlloc()==1); + heap_size = GlobalSharedMemory::ShmAllocBytes(); for(int r=0;r= heap_size) { - std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < &processors,Grid_MPI_Comm & optimal_comm) @@ -56,7 +58,8 @@ void GlobalSharedMemory::OptimalCommunicator(const std::vector &processors, void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) { void * ShmCommBuf ; - MAX_MPI_SHM_BYTES=bytes; + assert(_ShmSetup==1); + assert(_ShmAlloc==0); int mmap_flag =0; #ifdef MAP_ANONYMOUS mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; @@ -77,20 +80,17 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) #endif bzero(ShmCommBuf,bytes); WorldShmCommBufs[0] = ShmCommBuf; + _ShmAllocBytes=bytes; + _ShmAlloc=1; }; -void GlobalSharedMemory::SharedMemoryFree(void) -{ - assert(ShmSetup); - assert(0); // unimplemented -} - //////////////////////////////////////////////////////// // Global shared functionality finished // Now move to per communicator functionality //////////////////////////////////////////////////////// void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) { + assert(GlobalSharedMemory::ShmAlloc()==1); ShmRanks.resize(1); ShmCommBufs.resize(1); ShmRanks[0] = 0; @@ -100,7 +100,7 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) // Map ShmRank to WorldShmRank and use the right buffer ////////////////////////////////////////////////////////////////////// ShmCommBufs[0] = GlobalSharedMemory::WorldShmCommBufs[0]; - heap_size = GlobalSharedMemory::MAX_MPI_SHM_BYTES; + heap_size = GlobalSharedMemory::ShmAllocBytes(); ShmBufferFreeAll(); return; } @@ -114,10 +114,6 @@ void SharedMemory::ShmBarrier(void){ return ; } ////////////////////////////////////////////////////////////////////////////////////////////////////////// void SharedMemory::SharedMemoryTest(void) { return; } -void *SharedMemory::ShmBufferSelf(void) -{ - return ShmCommBufs[ShmRank]; -} void *SharedMemory::ShmBuffer(int rank) { return NULL; @@ -127,24 +123,4 @@ void *SharedMemory::ShmBufferTranslate(int rank,void * local_p) return NULL; } -///////////////////////////////// -// Alloc, free shmem region ; common to MPI and none? -///////////////////////////////// -void *SharedMemory::ShmBufferMalloc(size_t bytes){ - void *ptr = (void *)heap_top; - heap_top += bytes; - heap_bytes+= bytes; - if (heap_bytes >= heap_size) { - std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < Date: Wed, 10 Jan 2018 10:59:58 +0000 Subject: [PATCH 141/145] Hadrons: result file macro with trajectory number --- extras/Hadrons/Global.cc | 7 +++++++ extras/Hadrons/Global.hpp | 13 +++++++++---- extras/Hadrons/Modules/MContraction/Baryon.hpp | 2 +- extras/Hadrons/Modules/MContraction/DiscLoop.hpp | 2 +- extras/Hadrons/Modules/MContraction/Gamma3pt.hpp | 2 +- extras/Hadrons/Modules/MContraction/Meson.hpp | 2 +- .../Modules/MContraction/WeakHamiltonianEye.cc | 2 +- .../Modules/MContraction/WeakHamiltonianNonEye.cc | 2 +- .../Modules/MContraction/WeakNeutral4ptDisc.cc | 2 +- extras/Hadrons/Modules/MScalar/ChargedProp.cc | 2 +- 10 files changed, 24 insertions(+), 12 deletions(-) diff --git a/extras/Hadrons/Global.cc b/extras/Hadrons/Global.cc index 942a4243..9a90a08c 100644 --- a/extras/Hadrons/Global.cc +++ b/extras/Hadrons/Global.cc @@ -67,3 +67,10 @@ std::string Hadrons::typeName(const std::type_info *info) return name; } + +// default writers/readers ///////////////////////////////////////////////////// +#ifdef HAVE_HDF5 +const std::string Hadrons::resultFileExt = "h5"; +#else +const std::string Hadrons::resultFileExt = "xml"; +#endif diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 274e1934..fc069ed6 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -167,14 +167,19 @@ std::string typeName(void) } // default writers/readers +extern const std::string resultFileExt; + #ifdef HAVE_HDF5 -typedef Hdf5Reader CorrReader; -typedef Hdf5Writer CorrWriter; +typedef Hdf5Reader ResultReader; +typedef Hdf5Writer ResultWriter; #else -typedef XmlReader CorrReader; -typedef XmlWriter CorrWriter; +typedef XmlReader ResultReader; +typedef XmlWriter ResultWriter; #endif +#define RESULT_FILE_NAME(name) \ +name + "." + std::to_string(vm().getTrajectory()) + "." + resultFileExt + END_HADRONS_NAMESPACE #include diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index 625c7108..8966d95b 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -122,7 +122,7 @@ void TBaryon::execute(void) << " quarks '" << par().q1 << "', '" << par().q2 << "', and '" << par().q3 << "'" << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q1 = envGet(PropagatorField1, par().q1); auto &q2 = envGet(PropagatorField2, par().q2); auto &q3 = envGet(PropagatorField3, par().q2); diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index 3d08f0eb..539abbbb 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -119,7 +119,7 @@ void TDiscLoop::execute(void) << "' using '" << par().q_loop << "' with " << par().gamma << " insertion." << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q_loop = envGet(PropagatorField, par().q_loop); Gamma gamma(par().gamma); std::vector buf; diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index 68701aeb..b4327a13 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -153,7 +153,7 @@ void TGamma3pt::execute(void) // Initialise variables. q2 and q3 are normal propagators, q1 may be // sink smeared. - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q1 = envGet(SlicedPropagator1, par().q1); auto &q2 = envGet(PropagatorField2, par().q2); auto &q3 = envGet(PropagatorField2, par().q3); diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index 5cf504e3..0197534d 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -172,7 +172,7 @@ void TMeson::execute(void) << " quarks '" << par().q1 << "' and '" << par().q2 << "'" << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); std::vector buf; std::vector result; Gamma g5(Gamma::Algebra::Gamma5); diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc index b79c09e7..1d257fc7 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc @@ -104,7 +104,7 @@ void TWeakHamiltonianEye::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q1 = envGet(SlicedPropagator, par().q1); auto &q2 = envGet(PropagatorField, par().q2); auto &q3 = envGet(PropagatorField, par().q3); diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc index e66b6ee7..2ad2e7dc 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc @@ -104,7 +104,7 @@ void TWeakHamiltonianNonEye::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q1 = envGet(PropagatorField, par().q1); auto &q2 = envGet(PropagatorField, par().q2); auto &q3 = envGet(PropagatorField, par().q3); diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc index e0a00472..2c94b2ba 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc @@ -104,7 +104,7 @@ void TWeakNeutral4ptDisc::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); auto &q1 = envGet(PropagatorField, par().q1); auto &q2 = envGet(PropagatorField, par().q2); auto &q3 = envGet(PropagatorField, par().q3); diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.cc b/extras/Hadrons/Modules/MScalar/ChargedProp.cc index da82617f..1470f1ad 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.cc +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.cc @@ -133,7 +133,7 @@ void TChargedProp::execute(void) LOG(Message) << "Saving zero-momentum projection to '" << filename << "'..." << std::endl; - CorrWriter writer(filename); + ResultWriter writer(RESULT_FILE_NAME(par().output)); std::vector vecBuf; std::vector result; From 29f026c3758b6e5c1cd2fcaf6f11066f015d0284 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 10 Jan 2018 11:01:03 +0000 Subject: [PATCH 142/145] Hadrons: scalar SU(N) tr(phi^n) 1-pt function --- extras/Hadrons/Modules.hpp | 30 +--- extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp | 155 ++++++++++++++++++++ extras/Hadrons/make_module_list.sh | 30 ---- extras/Hadrons/modules.inc | 1 + 4 files changed, 157 insertions(+), 59 deletions(-) create mode 100644 extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index e50d2b0b..523ac101 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -1,32 +1,3 @@ -/************************************************************************************* - -Grid physics library, www.github.com/paboyle/Grid - -Source file: extras/Hadrons/Modules.hpp - -Copyright (C) 2015-2018 - -Author: Antonin Portelli -Author: Lanny91 - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along -with this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -See the full license in the file "LICENSE" in the top level distribution directory -*************************************************************************************/ -/* END LEGAL */ - #include #include #include @@ -56,5 +27,6 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include #include +#include #include #include diff --git a/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp new file mode 100644 index 00000000..8c6bead7 --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp @@ -0,0 +1,155 @@ +#ifndef Hadrons_MScalarSUN_TrPhi_hpp_ +#define Hadrons_MScalarSUN_TrPhi_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * TrPhi * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TrPhiPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TrPhiPar, + std::string, field, + unsigned int, maxPow, + std::string, output); +}; + +template +class TTrPhi: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, op, + Complex, value); + }; +public: + // constructor + TTrPhi(const std::string name); + // destructor + virtual ~TTrPhi(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +private: + // output name generator + std::string outName(const unsigned int n); +}; + +MODULE_REGISTER_NS(TrPhiSU2, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU3, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU4, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU5, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU6, TTrPhi>, MScalarSUN); + +/****************************************************************************** + * TTrPhi implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTrPhi::TTrPhi(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTrPhi::getInput(void) +{ + std::vector in = {par().field}; + + return in; +} + +template +std::vector TTrPhi::getOutput(void) +{ + std::vector out; + + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + out.push_back(outName(n)); + } + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTrPhi::setup(void) +{ + if (par().maxPow < 2) + { + HADRON_ERROR(Size, "'maxPow' should be at least equal to 2"); + } + envTmpLat(Field, "phi2"); + envTmpLat(Field, "buf"); + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + envCreateLat(ComplexField, outName(n)); + } +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTrPhi::execute(void) +{ + LOG(Message) << "Computing tr(phi^n) for n even up to " << par().maxPow + << "..." << std::endl; + + std::vector result; + auto &phi = envGet(Field, par().field); + + envGetTmp(Field, phi2); + envGetTmp(Field, buf); + buf = 1.; + phi2 = -phi*phi; + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + auto &phin = envGet(ComplexField, outName(n)); + + buf = buf*phi2; + phin = trace(buf); + if (!par().output.empty()) + { + Result r; + + r.op = "phi" + std::to_string(n); + r.value = TensorRemove(sum(phin)); + result.push_back(r); + } + } + if (result.size() > 0) + { + ResultWriter writer(RESULT_FILE_NAME(par().output)); + + write(writer, "trphi", result); + } +} + +// output name generator /////////////////////////////////////////////////////// +template +std::string TTrPhi::outName(const unsigned int n) +{ + return getName() + "_" + std::to_string(n); +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TrPhi_hpp_ diff --git a/extras/Hadrons/make_module_list.sh b/extras/Hadrons/make_module_list.sh index 8c6fa4da..ddc56ff6 100755 --- a/extras/Hadrons/make_module_list.sh +++ b/extras/Hadrons/make_module_list.sh @@ -7,36 +7,6 @@ echo 'modules_hpp =\' >> modules.inc find Modules -name '*.hpp' -type f -print | sed 's/^/ /;$q;s/$/ \\/' >> modules.inc echo '' >> modules.inc rm -f Modules.hpp -echo "/************************************************************************************* - -Grid physics library, www.github.com/paboyle/Grid - -Source file: extras/Hadrons/Modules.hpp - -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 - -Author: Antonin Portelli - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License along -with this program; if not, write to the Free Software Foundation, Inc., -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -See the full license in the file \"LICENSE\" in the top level distribution directory -*************************************************************************************/ -/* END LEGAL */ -" > Modules.hpp for f in `find Modules -name '*.hpp'`; do echo "#include " >> Modules.hpp done diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 6e1ef6dc..00ef323f 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -39,6 +39,7 @@ modules_hpp =\ Modules/MScalar/ChargedProp.hpp \ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ + Modules/MScalarSUN/TrPhi.hpp \ Modules/MIO/LoadNersc.hpp \ Modules/MIO/LoadBinary.hpp From b7cd7213085c7050f17c81ba2757c1d8ae63c914 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 10 Jan 2018 11:25:59 +0000 Subject: [PATCH 143/145] Hadrons: scalar SU(N) tr(mag^n) --- extras/Hadrons/Modules.hpp | 1 + extras/Hadrons/Modules/MScalarSUN/TrMag.hpp | 119 ++++++++++++++++++++ extras/Hadrons/modules.inc | 1 + 3 files changed, 121 insertions(+) create mode 100644 extras/Hadrons/Modules/MScalarSUN/TrMag.hpp diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 523ac101..1d059a79 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp new file mode 100644 index 00000000..f33784fa --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp @@ -0,0 +1,119 @@ +#ifndef Hadrons_MScalarSUN_TrMag_hpp_ +#define Hadrons_MScalarSUN_TrMag_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * TrMag * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TrMagPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TrMagPar, + std::string, field, + unsigned int, maxPow, + std::string, output); +}; + +template +class TTrMag: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, op, + Real, value); + }; +public: + // constructor + TTrMag(const std::string name); + // destructor + virtual ~TTrMag(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +}; + +MODULE_REGISTER_NS(TrMagSU2, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU3, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU4, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU5, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU6, TTrMag>, MScalarSUN); + +/****************************************************************************** + * TTrMag implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTrMag::TTrMag(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTrMag::getInput(void) +{ + std::vector in = {par().field}; + + return in; +} + +template +std::vector TTrMag::getOutput(void) +{ + std::vector out = {}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTrMag::setup(void) +{} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTrMag::execute(void) +{ + LOG(Message) << "Computing tr(mag^n) for n even up to " << par().maxPow + << "..." << std::endl; + + std::vector result; + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &phi = envGet(Field, par().field); + + auto m2 = sum(phi), mn = m2; + + m2 = -m2*m2; + mn = 1.; + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + Result r; + + mn = mn*m2; + r.op = "tr(mag^" + std::to_string(n) + ")"; + r.value = TensorRemove(trace(mn)).real(); + result.push_back(r); + } + write(writer, "trmag", result); +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TrMag_hpp_ diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 00ef323f..cea4dc2a 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -39,6 +39,7 @@ modules_hpp =\ Modules/MScalar/ChargedProp.hpp \ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ + Modules/MScalarSUN/TrMag.hpp \ Modules/MScalarSUN/TrPhi.hpp \ Modules/MIO/LoadNersc.hpp \ Modules/MIO/LoadBinary.hpp From d9d1f43ba208fed87fae930e182e4b6cd550da6f Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 10 Jan 2018 11:29:49 +0000 Subject: [PATCH 144/145] Hadrons: code cleaning --- extras/Hadrons/Modules.hpp | 28 ++++++++++++++++ extras/Hadrons/Modules/MScalarSUN/TrMag.hpp | 31 +++++++++++++++-- extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp | 37 ++++++++++++++++++--- 3 files changed, 89 insertions(+), 7 deletions(-) diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index 1d059a79..eea16839 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -1,3 +1,31 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli +Author: Lanny91 + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include #include #include diff --git a/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp index f33784fa..96eb794e 100644 --- a/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp +++ b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TrMag.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalarSUN_TrMag_hpp_ #define Hadrons_MScalarSUN_TrMag_hpp_ @@ -8,7 +35,7 @@ BEGIN_HADRONS_NAMESPACE /****************************************************************************** - * TrMag * + * Module to compute tr(mag^n) * ******************************************************************************/ BEGIN_MODULE_NAMESPACE(MScalarSUN) @@ -55,7 +82,7 @@ MODULE_REGISTER_NS(TrMagSU5, TTrMag>, MScalarSUN); MODULE_REGISTER_NS(TrMagSU6, TTrMag>, MScalarSUN); /****************************************************************************** - * TTrMag implementation * + * TTrMag implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// template diff --git a/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp index 8c6bead7..4586663d 100644 --- a/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp +++ b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalarSUN_TrPhi_hpp_ #define Hadrons_MScalarSUN_TrPhi_hpp_ @@ -8,7 +35,7 @@ BEGIN_HADRONS_NAMESPACE /****************************************************************************** - * TrPhi * + * Module to compute tr(phi^n) * ******************************************************************************/ BEGIN_MODULE_NAMESPACE(MScalarSUN) @@ -32,7 +59,7 @@ public: public: GRID_SERIALIZABLE_CLASS_MEMBERS(Result, std::string, op, - Complex, value); + Real, value); }; public: // constructor @@ -58,7 +85,7 @@ MODULE_REGISTER_NS(TrPhiSU5, TTrPhi>, MScalarSUN); MODULE_REGISTER_NS(TrPhiSU6, TTrPhi>, MScalarSUN); /****************************************************************************** - * TTrPhi implementation * + * TTrPhi implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// template @@ -128,8 +155,8 @@ void TTrPhi::execute(void) { Result r; - r.op = "phi" + std::to_string(n); - r.value = TensorRemove(sum(phin)); + r.op = "tr(phi^" + std::to_string(n) + ")"; + r.value = TensorRemove(sum(phin)).real(); result.push_back(r); } } From ec16eacc6a70abdaf248a13dd82df2a619e8e9f9 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 10 Jan 2018 22:10:58 +0000 Subject: [PATCH 145/145] Hadrons: scalar SU(N) 2-pt function --- extras/Hadrons/Modules.hpp | 1 + .../Hadrons/Modules/MScalarSUN/TwoPoint.hpp | 184 ++++++++++++++++++ extras/Hadrons/modules.inc | 1 + 3 files changed, 186 insertions(+) create mode 100644 extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index eea16839..5834fb3f 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -56,6 +56,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include #include +#include #include #include #include diff --git a/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp b/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp new file mode 100644 index 00000000..abfbf609 --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp @@ -0,0 +1,184 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MScalarSUN_TwoPoint_hpp_ +#define Hadrons_MScalarSUN_TwoPoint_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * 2-pt functions for a given set of operators * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TwoPointPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TwoPointPar, + std::vector, op, + std::string, output); +}; + +template +class TTwoPoint: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, sink, + std::string, source, + std::vector, data); + }; +public: + // constructor + TTwoPoint(const std::string name); + // destructor + virtual ~TTwoPoint(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +private: + // make 2-pt function + template + std::vector makeTwoPoint(const std::vector &sink, + const std::vector &source); +}; + +MODULE_REGISTER_NS(TwoPointSU2, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU3, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU4, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU5, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU6, TTwoPoint>, MScalarSUN); + +/****************************************************************************** + * TTwoPoint implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTwoPoint::TTwoPoint(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTwoPoint::getInput(void) +{ + return par().op; +} + +template +std::vector TTwoPoint::getOutput(void) +{ + std::vector out = {}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTwoPoint::setup(void) +{ + const unsigned int nt = env().getDim().back(); + envTmp(std::vector>, "slicedOp", 1, par().op.size(), + std::vector(nt)); +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTwoPoint::execute(void) +{ + LOG(Message) << "Computing 2-point functions for operators:" << std::endl; + for (auto &o: par().op) + { + LOG(Message) << " '" << o << "'" << std::endl; + } + + ResultWriter writer(RESULT_FILE_NAME(par().output)); + const unsigned int nd = env().getDim().size(); + std::vector result; + + envGetTmp(std::vector>, slicedOp); + for (unsigned int i = 0; i < par().op.size(); ++i) + { + auto &op = envGet(ComplexField, par().op[i]); + + sliceSum(op, slicedOp[i], nd - 1); + } + for (unsigned int i = 0; i < par().op.size(); ++i) + for (unsigned int j = 0; j < par().op.size(); ++j) + { + Result r; + + r.sink = par().op[i]; + r.source = par().op[j]; + r.data = makeTwoPoint(slicedOp[i], slicedOp[j]); + result.push_back(r); + } + write(writer, "twopt", result); +} + +// make 2-pt function ////////////////////////////////////////////////////////// +template +template +std::vector TTwoPoint::makeTwoPoint( + const std::vector &sink, + const std::vector &source) +{ + assert(sink.size() == source.size()); + + unsigned int nt = sink.size(); + std::vector res(nt, 0.); + + for (unsigned int dt = 0; dt < nt; ++dt) + { + for (unsigned int t = 0; t < nt; ++t) + { + res[dt] += TensorRemove(trace(sink[(t+dt)%nt]*source[t])); + } + res[dt] *= 1./static_cast(nt); + } + + return res; +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TwoPoint_hpp_ diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index cea4dc2a..b1ccb8cc 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -40,6 +40,7 @@ modules_hpp =\ Modules/MAction/DWF.hpp \ Modules/MAction/Wilson.hpp \ Modules/MScalarSUN/TrMag.hpp \ + Modules/MScalarSUN/TwoPoint.hpp \ Modules/MScalarSUN/TrPhi.hpp \ Modules/MIO/LoadNersc.hpp \ Modules/MIO/LoadBinary.hpp