/************************************************************************************* Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/A2AMesonField.hpp Copyright (C) 2015-2018 Author: Antonin Portelli Author: Peter Boyle Author: paboyle This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ #ifndef Hadrons_MContraction_A2AMesonField_hpp_ #define Hadrons_MContraction_A2AMesonField_hpp_ #include #include #include #include #include #include BEGIN_HADRONS_NAMESPACE /****************************************************************************** * All-to-all meson field creation * ******************************************************************************/ BEGIN_MODULE_NAMESPACE(MContraction) typedef std::pair GammaPair; class A2AMesonFieldPar: Serializable { public: GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonFieldPar, int, cacheBlock, int, block, std::string, v, std::string, w, std::string, output, std::string, gammas, std::vector, mom); }; template class TA2AMesonField : public Module { public: FERM_TYPE_ALIASES(FImpl,); SOLVER_TYPE_ALIASES(FImpl,); typedef Eigen::TensorMap> MesonField; public: // constructor TA2AMesonField(const std::string name); // destructor virtual ~TA2AMesonField(void){}; // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); // setup virtual void setup(void); // execution virtual void execute(void); private: // Arithmetic kernel. Move to Grid?? void makeBlock(MesonField &mat, const FermionField *lhs, const FermionField *rhs, std::vector gamma, const std::vector &mom, int orthogdim, double &t0, double &t1, double &t2, double &t3); // IO std::string ioname(unsigned int m, unsigned int g) const; std::string filename(unsigned int m, unsigned int g) const; void initFile(unsigned int m, unsigned int g); void saveBlock(const MesonField &mf, unsigned int m, unsigned int g, unsigned int i, unsigned int j); private: bool hasPhase_{false}; std::string momphName_; std::vector gamma_; std::vector> mom_; }; MODULE_REGISTER(A2AMesonField, ARG(TA2AMesonField), MContraction); MODULE_REGISTER(ZA2AMesonField, ARG(TA2AMesonField), MContraction); /****************************************************************************** * TA2AMesonField implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// template TA2AMesonField::TA2AMesonField(const std::string name) : Module(name) , momphName_(name + "_momph") { } // dependencies/products /////////////////////////////////////////////////////// template std::vector TA2AMesonField::getInput(void) { std::vector in = {par().v, par().w}; return in; } template std::vector TA2AMesonField::getOutput(void) { std::vector out = {}; return out; } // setup /////////////////////////////////////////////////////////////////////// template void TA2AMesonField::setup(void) { gamma_.clear(); mom_.clear(); if (par().gammas == "all") { gamma_ = { Gamma::Algebra::Gamma5, Gamma::Algebra::Identity, Gamma::Algebra::GammaX, Gamma::Algebra::GammaY, Gamma::Algebra::GammaZ, Gamma::Algebra::GammaT, Gamma::Algebra::GammaXGamma5, Gamma::Algebra::GammaYGamma5, Gamma::Algebra::GammaZGamma5, Gamma::Algebra::GammaTGamma5, Gamma::Algebra::SigmaXY, Gamma::Algebra::SigmaXZ, Gamma::Algebra::SigmaXT, Gamma::Algebra::SigmaYZ, Gamma::Algebra::SigmaYT, Gamma::Algebra::SigmaZT }; } else { gamma_ = strToVec(par().gammas); } for (auto &pstr: par().mom) { auto p = strToVec(pstr); if (p.size() != env().getNd() - 1) { HADRONS_ERROR(Size, "Momentum has " + std::to_string(p.size()) + " components instead of " + std::to_string(env().getNd() - 1)); } mom_.push_back(p); } envCache(std::vector, momphName_, 1, par().mom.size(), env().getGrid()); envTmpLat(LatticeComplex, "coor"); // preallocate memory for meson field block auto tgp = env().getDim().back()*gamma_.size()*mom_.size(); envTmp(Vector, "mfBuf", 1, tgp*par().block*par().block); envTmp(Vector, "mfCache", 1, tgp*par().cacheBlock*par().cacheBlock); } // execution /////////////////////////////////////////////////////////////////// template void TA2AMesonField::execute(void) { auto &v = envGet(std::vector, par().v); auto &w = envGet(std::vector, par().w); int nt = env().getDim().back(); int N_i = w.size(); int N_j = v.size(); int ngamma = gamma_.size(); int nmom = mom_.size(); int block = par().block; int cacheBlock = par().cacheBlock; LOG(Message) << "Computing all-to-all meson fields" << std::endl; LOG(Message) << "W: '" << par().w << "' V: '" << par().v << "'" << std::endl; LOG(Message) << "Meson field size: " << nt << "*" << N_i << "*" << N_j << " (" << sizeString(nt*N_i*N_j*sizeof(Complex)) << ")" << std::endl; LOG(Message) << "Momenta:" << std::endl; for (auto &p: mom_) { LOG(Message) << " " << p << std::endl; } LOG(Message) << "Spin structures:" << std::endl; for (auto &g: gamma_) { LOG(Message) << " " << g << std::endl; } /////////////////////////////////////////////// // Momentum setup /////////////////////////////////////////////// auto &ph = envGet(std::vector, momphName_); if (!hasPhase_) { MODULE_TIMER("Momentum phases"); for (unsigned int j = 0; j < nmom; ++j) { Complex i(0.0,1.0); std::vector p; envGetTmp(LatticeComplex, coor); ph[j] = zero; for(unsigned int mu = 0; mu < mom_[j].size(); mu++) { LatticeCoordinate(coor, mu); ph[j] = ph[j] + (mom_[j][mu]/env().getDim(mu))*coor; } ph[j] = exp((Real)(2*M_PI)*i*ph[j]); } hasPhase_ = true; } ////////////////////////////////////////////////////////////////////////// // i,j is first loop over SchurBlock factors reusing 5D matrices // ii,jj is second loop over cacheBlock factors for high perf contractoin // iii,jjj are loops within cacheBlock // Total index is sum of these i+ii+iii etc... ////////////////////////////////////////////////////////////////////////// double flops = 0.0; double bytes = 0.0; double vol = env().getVolume(); double t_schur=0; double t_contr=0; double t_int_0=0; double t_int_1=0; double t_int_2=0; double t_int_3=0; envGetTmp(Vector, mfBuf); envGetTmp(Vector, mfCache); double t0 = usecond(); int NBlock_i = N_i/block + (((N_i % block) != 0) ? 1 : 0); int NBlock_j = N_j/block + (((N_j % block) != 0) ? 1 : 0); for(int i=0;i(nmom*ngamma*nt*N_ii*N_jj*sizeof(Complex)); ioTime = static_cast(this->getTimer("IO").count()); LOG(Message) << "HDF5 IO " << blockSize/ioTime*1.0e6/1024/1024 << " MB/s" << std::endl; } double nodes = env().getGrid()->NodeCount(); double t_kernel = t_int_0 + t_int_1; LOG(Message) << "Perf " << flops/(t_kernel)/1.0e3/nodes << " Gflop/s/node " << std::endl; LOG(Message) << "Perf " << bytes/(t_kernel)/1.0e3/nodes << " GB/s/node " << std::endl; } ////////////////////////////////////////////////////////////////////////////////// // Cache blocked arithmetic routine // Could move to Grid ??? ////////////////////////////////////////////////////////////////////////////////// template void TA2AMesonField::makeBlock(MesonField &mat, const FermionField *lhs_wi, const FermionField *rhs_vj, std::vector gamma, const std::vector &mom, int orthogdim, double &t0, double &t1, double &t2, double &t3) { typedef typename FImpl::SiteSpinor vobj; typedef typename vobj::scalar_object sobj; typedef typename vobj::scalar_type scalar_type; typedef typename vobj::vector_type vector_type; typedef iSpinMatrix SpinMatrix_v; typedef iSpinMatrix SpinMatrix_s; int Lblock = mat.dimension(3); int Rblock = mat.dimension(4); GridBase *grid = lhs_wi[0]._grid; const int Nd = grid->_ndimension; const int Nsimd = grid->Nsimd(); int Nt = grid->GlobalDimensions()[orthogdim]; int Ngamma = gamma.size(); int Nmom = mom.size(); int fd=grid->_fdimensions[orthogdim]; int ld=grid->_ldimensions[orthogdim]; int rd=grid->_rdimensions[orthogdim]; // will locally sum vectors first // sum across these down to scalars // splitting the SIMD int MFrvol = rd*Lblock*Rblock*Nmom; int MFlvol = ld*Lblock*Rblock*Nmom; Vector lvSum(MFrvol); parallel_for (int r = 0; r < MFrvol; r++) { lvSum[r] = zero; } Vector lsSum(MFlvol); parallel_for (int r = 0; r < MFlvol; r++) { lsSum[r]=scalar_type(0.0); } int e1= grid->_slice_nblock[orthogdim]; int e2= grid->_slice_block [orthogdim]; int stride=grid->_slice_stride[orthogdim]; t0-=usecond(); MODULE_TIMER("Colour trace * mom."); // Nested parallelism would be ok // Wasting cores here. Test case r parallel_for(int r=0;r_ostride[orthogdim]; // base offset for start of plane for(int n=0;n icoor(Nd); std::vector extracted(Nsimd); for(int i=0;iiCoorFromIindex(icoor,idx); int ldx = rt+icoor[orthogdim]*rd; int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx; lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx]; } } } t1+=usecond(); assert(mat.dimension(0) == Nmom); assert(mat.dimension(1) == Ngamma); assert(mat.dimension(2) == Nt); t2-=usecond(); // ld loop and local only?? MODULE_TIMER("Spin trace"); int pd = grid->_processors[orthogdim]; int pc = grid->_processor_coor[orthogdim]; parallel_for_nest2(int lt=0;ltGlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock); t3+=usecond(); } // IO template std::string TA2AMesonField::ioname(unsigned int m, unsigned int g) const { std::stringstream ss; ss << gamma_[g] << "_"; for (unsigned int mu = 0; mu < mom_[m].size(); ++mu) { ss << mom_[m][mu] << ((mu == mom_[m].size() - 1) ? "" : "_"); } return ss.str(); } template std::string TA2AMesonField::filename(unsigned int m, unsigned int g) const { return par().output + "." + std::to_string(vm().getTrajectory()) + "/" + ioname(m, g) + ".h5"; } template void TA2AMesonField::initFile(unsigned int m, unsigned int g) { #ifdef HAVE_HDF5 std::string f = filename(m, g); GridBase *grid = env().getGrid(); auto &v = envGet(std::vector, par().v); auto &w = envGet(std::vector, par().w); int nt = env().getDim().back(); int N_i = w.size(); int N_j = v.size(); makeFileDir(f, grid); if (grid->IsBoss()) { Hdf5Writer writer(f); std::vector dim = {static_cast(nt), static_cast(N_i), static_cast(N_j)}; H5NS::DataSpace dataspace(dim.size(), dim.data()); H5NS::DataSet dataset; push(writer, ioname(m, g)); write(writer, "momentum", mom_[m]); write(writer, "gamma", gamma_[g]); auto &group = writer.getGroup(); dataset = group.createDataSet("mesonField", Hdf5Type::type(), dataspace); } #else HADRONS_ERROR(Implementation, "meson field I/O needs HDF5 library"); #endif } template void TA2AMesonField::saveBlock(const MesonField &mf, unsigned int m, unsigned int g, unsigned int i, unsigned int j) { #ifdef HAVE_HDF5 std::string f = filename(m, g); GridBase *grid = env().getGrid(); if (grid->IsBoss()) { Hdf5Reader reader(f); hsize_t nt = mf.dimension(2), Ni = mf.dimension(3), Nj = mf.dimension(4); std::vector count = {nt, Ni, Nj}, offset = {0, static_cast(i), static_cast(j)}, stride = {1, 1, 1}, block = {1, 1, 1}; H5NS::DataSpace memspace(count.size(), count.data()), dataspace; H5NS::DataSet dataset; size_t shift; push(reader, ioname(m, g)); auto &group = reader.getGroup(); dataset = group.openDataSet("mesonField"); dataspace = dataset.getSpace(); dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data(), stride.data(), block.data()); shift = (m*mf.dimension(1) + g)*nt*Ni*Nj; dataset.write(mf.data() + shift, Hdf5Type::type(), memspace, dataspace); } #else HADRONS_ERROR(Implementation, "meson field I/O needs HDF5 library"); #endif } END_MODULE_NAMESPACE END_HADRONS_NAMESPACE #endif // Hadrons_MContraction_A2AMesonField_hpp_