diff --git a/.gitignore b/.gitignore index d743ee06..399f2f6b 100644 --- a/.gitignore +++ b/.gitignore @@ -93,6 +93,7 @@ build*/* *.xcodeproj/* build.sh .vscode +*.code-workspace # Eigen source # ################ @@ -122,4 +123,3 @@ make-bin-BUCK.sh ##################### lib/qcd/spin/gamma-gen/*.h lib/qcd/spin/gamma-gen/*.cc - diff --git a/TODO b/TODO index 83bfda5e..746302ca 100644 --- a/TODO +++ b/TODO @@ -1,20 +1,36 @@ TODO: --------------- -Large item work list: +Code item work list + +a) namespaces & indentation + GRID_BEGIN_NAMESPACE(); + GRID_END_NAMESPACE(); +-- delete QCD namespace + +b) GPU branch +- start branch +- Increase Macro use in core library support; prepare for change +- Audit volume of "device" code +- Virtual function audit +- Start port once Nvidia box is up +- Cut down volume of code for first port? How? + +Physics item work list: 1)- BG/Q port and check ; Andrew says ok. -2)- Christoph's local basis expansion Lanczos --- -3a)- RNG I/O in ILDG/SciDAC (minor) -3b)- Precision conversion and sort out localConvert <-- partial/easy -3c)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet -4)- Physical propagator interface -5)- Conserved currents -6)- Multigrid Wilson and DWF, compare to other Multigrid implementations -7)- HDCR resume +2)- Consistent linear solver flop count/rate -- PARTIAL, time but no flop/s yet +3)- Physical propagator interface +4)- Multigrid Wilson and DWF, compare to other Multigrid implementations +5)- HDCR resume +---------------------------- Recent DONE +-- RNG I/O in ILDG/SciDAC (minor) +-- Precision conversion and sort out localConvert <-- partial/easy +-- Conserved currents (Andrew) +-- Split grid +-- Christoph's local basis expansion Lanczos -- MultiRHS with spread out extra dim -- Go through filesystem with SciDAC I/O ; <-- DONE ; bmark cori -- Lanczos Remove DenseVector, DenseMatrix; Use Eigen instead. <-- DONE -- GaugeFix into central location <-- DONE diff --git a/benchmarks/Benchmark_comms.cc b/benchmarks/Benchmark_comms.cc index a270e3fa..29ccf96c 100644 --- a/benchmarks/Benchmark_comms.cc +++ b/benchmarks/Benchmark_comms.cc @@ -106,7 +106,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; ncomm=0; for(int mu=0;mu<4;mu++){ @@ -202,7 +202,7 @@ int main (int argc, char ** argv) int recv_from_rank; { - std::vector requests; + std::vector requests; Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); Grid.SendToRecvFromBegin(requests, (void *)&xbuf[mu][0], @@ -215,7 +215,7 @@ int main (int argc, char ** argv) comm_proc = mpi_layout[mu]-1; { - std::vector requests; + std::vector requests; Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); Grid.SendToRecvFromBegin(requests, (void *)&xbuf[mu+4][0], @@ -290,7 +290,7 @@ int main (int argc, char ** argv) dbytes=0; ncomm=0; - std::vector requests; + std::vector requests; for(int mu=0;mu<4;mu++){ @@ -383,7 +383,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; dbytes=0; ncomm=0; for(int mu=0;mu<4;mu++){ @@ -481,7 +481,7 @@ int main (int argc, char ** argv) for(int i=0;i requests; + std::vector requests; dbytes=0; ncomm=0; diff --git a/bootstrap.sh b/bootstrap.sh index dfb6735d..bdf748df 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -3,9 +3,7 @@ EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2' echo "-- deploying Eigen source..." -wget ${EIGEN_URL} --no-check-certificate -./scripts/update_eigen.sh `basename ${EIGEN_URL}` -rm `basename ${EIGEN_URL}` +wget ${EIGEN_URL} --no-check-certificate && ./scripts/update_eigen.sh `basename ${EIGEN_URL}` && rm `basename ${EIGEN_URL}` echo '-- generating Make.inc files...' ./scripts/filelist diff --git a/configure.ac b/configure.ac index b11d6b42..468d9d5f 100644 --- a/configure.ac +++ b/configure.ac @@ -337,15 +337,11 @@ case ${ac_PRECISION} in esac ###################### Shared memory allocation technique under MPI3 -AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmget|shmopen|hugetlbfs], +AC_ARG_ENABLE([shm],[AC_HELP_STRING([--enable-shm=shmopen|hugetlbfs], [Select SHM allocation technique])],[ac_SHM=${enable_shm}],[ac_SHM=shmopen]) case ${ac_SHM} in - shmget) - AC_DEFINE([GRID_MPI3_SHMGET],[1],[GRID_MPI3_SHMGET] ) - ;; - shmopen) AC_DEFINE([GRID_MPI3_SHMOPEN],[1],[GRID_MPI3_SHMOPEN] ) ;; @@ -367,7 +363,7 @@ AC_ARG_ENABLE([shmpath],[AC_HELP_STRING([--enable-shmpath=path], AC_DEFINE_UNQUOTED([GRID_SHM_PATH],["$ac_SHMPATH"],[Path to a hugetlbfs filesystem for MMAPing]) ############### communication type selection -AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto|mpi3|mpi3-auto|shmem], +AC_ARG_ENABLE([comms],[AC_HELP_STRING([--enable-comms=none|mpi|mpi-auto], [Select communications])],[ac_COMMS=${enable_comms}],[ac_COMMS=none]) case ${ac_COMMS} in @@ -375,22 +371,10 @@ case ${ac_COMMS} in AC_DEFINE([GRID_COMMS_NONE],[1],[GRID_COMMS_NONE] ) comms_type='none' ;; - mpi3*) + mpi*) AC_DEFINE([GRID_COMMS_MPI3],[1],[GRID_COMMS_MPI3] ) comms_type='mpi3' ;; - mpit) - AC_DEFINE([GRID_COMMS_MPIT],[1],[GRID_COMMS_MPIT] ) - comms_type='mpit' - ;; - mpi*) - AC_DEFINE([GRID_COMMS_MPI],[1],[GRID_COMMS_MPI] ) - comms_type='mpi' - ;; - shmem) - AC_DEFINE([GRID_COMMS_SHMEM],[1],[GRID_COMMS_SHMEM] ) - comms_type='shmem' - ;; *) AC_MSG_ERROR([${ac_COMMS} unsupported --enable-comms option]); ;; @@ -550,6 +534,7 @@ AC_CONFIG_FILES(tests/forces/Makefile) AC_CONFIG_FILES(tests/hadrons/Makefile) AC_CONFIG_FILES(tests/hmc/Makefile) AC_CONFIG_FILES(tests/solver/Makefile) +AC_CONFIG_FILES(tests/lanczos/Makefile) AC_CONFIG_FILES(tests/smearing/Makefile) AC_CONFIG_FILES(tests/qdpxx/Makefile) AC_CONFIG_FILES(tests/testu01/Makefile) diff --git a/extras/Hadrons/Application.cc b/extras/Hadrons/Application.cc index 90ebcfd7..7ba98ade 100644 --- a/extras/Hadrons/Application.cc +++ b/extras/Hadrons/Application.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Application.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -43,6 +42,7 @@ using namespace Hadrons; // constructors //////////////////////////////////////////////////////////////// Application::Application(void) { + initLogger(); LOG(Message) << "Modules available:" << std::endl; auto list = ModuleFactory::getInstance().getBuilderList(); for (auto &m: list) @@ -73,12 +73,6 @@ Application::Application(const std::string parameterFileName) parameterFileName_ = parameterFileName; } -// environment shortcut //////////////////////////////////////////////////////// -Environment & Application::env(void) const -{ - return Environment::getInstance(); -} - // access ////////////////////////////////////////////////////////////////////// void Application::setPar(const Application::GlobalPar &par) { @@ -94,14 +88,13 @@ const Application::GlobalPar & Application::getPar(void) // execute ///////////////////////////////////////////////////////////////////// void Application::run(void) { - if (!parameterFileName_.empty() and (env().getNModule() == 0)) + if (!parameterFileName_.empty() and (vm().getNModule() == 0)) { parseParameterFile(parameterFileName_); } - if (!scheduled_) - { - schedule(); - } + vm().printContent(); + env().printContent(); + schedule(); printSchedule(); configLoop(); } @@ -124,12 +117,20 @@ void Application::parseParameterFile(const std::string parameterFileName) LOG(Message) << "Building application from '" << parameterFileName << "'..." << std::endl; read(reader, "parameters", par); setPar(par); - push(reader, "modules"); - push(reader, "module"); + if (!push(reader, "modules")) + { + HADRON_ERROR(Parsing, "Cannot open node 'modules' in parameter file '" + + parameterFileName + "'"); + } + if (!push(reader, "module")) + { + HADRON_ERROR(Parsing, "Cannot open node 'modules/module' in parameter file '" + + parameterFileName + "'"); + } do { read(reader, "id", id); - env().createModule(id.name, id.type, reader); + vm().createModule(id.name, id.type, reader); } while (reader.nextElement("module")); pop(reader); pop(reader); @@ -139,7 +140,7 @@ void Application::saveParameterFile(const std::string parameterFileName) { XmlWriter writer(parameterFileName); ObjectId id; - const unsigned int nMod = env().getNModule(); + const unsigned int nMod = vm().getNModule(); LOG(Message) << "Saving application to '" << parameterFileName << "'..." << std::endl; write(writer, "parameters", getPar()); @@ -147,10 +148,10 @@ void Application::saveParameterFile(const std::string parameterFileName) for (unsigned int i = 0; i < nMod; ++i) { push(writer, "module"); - id.name = env().getModuleName(i); - id.type = env().getModule(i)->getRegisteredName(); + id.name = vm().getModuleName(i); + id.type = vm().getModule(i)->getRegisteredName(); write(writer, "id", id); - env().getModule(i)->saveParameters(writer, "options"); + vm().getModule(i)->saveParameters(writer, "options"); pop(writer); } pop(writer); @@ -158,95 +159,13 @@ void Application::saveParameterFile(const std::string parameterFileName) } // schedule computation //////////////////////////////////////////////////////// -#define MEM_MSG(size)\ -sizeString((size)*locVol_) << " (" << sizeString(size) << "/site)" - -#define DEFINE_MEMPEAK \ -GeneticScheduler::ObjFunc memPeak = \ -[this](const std::vector &program)\ -{\ - unsigned int memPeak;\ - bool msg;\ - \ - msg = HadronsLogMessage.isActive();\ - HadronsLogMessage.Active(false);\ - env().dryRun(true);\ - memPeak = env().executeProgram(program);\ - env().dryRun(false);\ - env().freeAll();\ - HadronsLogMessage.Active(true);\ - \ - return memPeak;\ -} - void Application::schedule(void) { - DEFINE_MEMPEAK; - - // build module dependency graph - LOG(Message) << "Building module graph..." << std::endl; - auto graph = env().makeModuleGraph(); - auto con = graph.getConnectedComponents(); - - // constrained topological sort using a genetic algorithm - LOG(Message) << "Scheduling computation..." << std::endl; - LOG(Message) << " #module= " << graph.size() << std::endl; - LOG(Message) << " population size= " << par_.genetic.popSize << std::endl; - LOG(Message) << " max. generation= " << par_.genetic.maxGen << std::endl; - LOG(Message) << " max. cst. generation= " << par_.genetic.maxCstGen << std::endl; - LOG(Message) << " mutation rate= " << par_.genetic.mutationRate << std::endl; - - unsigned int k = 0, gen, prevPeak, nCstPeak = 0; - std::random_device rd; - GeneticScheduler::Parameters par; - - par.popSize = par_.genetic.popSize; - par.mutationRate = par_.genetic.mutationRate; - par.seed = rd(); - memPeak_ = 0; - CartesianCommunicator::BroadcastWorld(0, &(par.seed), sizeof(par.seed)); - for (unsigned int i = 0; i < con.size(); ++i) + if (!scheduled_ and !loadedSchedule_) { - GeneticScheduler scheduler(con[i], memPeak, par); - - gen = 0; - do - { - LOG(Debug) << "Generation " << gen << ":" << std::endl; - scheduler.nextGeneration(); - if (gen != 0) - { - if (prevPeak == scheduler.getMinValue()) - { - nCstPeak++; - } - else - { - nCstPeak = 0; - } - } - - prevPeak = scheduler.getMinValue(); - if (gen % 10 == 0) - { - LOG(Iterative) << "Generation " << gen << ": " - << MEM_MSG(scheduler.getMinValue()) << std::endl; - } - - gen++; - } while ((gen < par_.genetic.maxGen) - and (nCstPeak < par_.genetic.maxCstGen)); - auto &t = scheduler.getMinSchedule(); - if (scheduler.getMinValue() > memPeak_) - { - memPeak_ = scheduler.getMinValue(); - } - for (unsigned int j = 0; j < t.size(); ++j) - { - program_.push_back(t[j]); - } + program_ = vm().schedule(par_.genetic); + scheduled_ = true; } - scheduled_ = true; } void Application::saveSchedule(const std::string filename) @@ -256,21 +175,19 @@ void Application::saveSchedule(const std::string filename) if (!scheduled_) { - HADRON_ERROR("Computation not scheduled"); + HADRON_ERROR(Definition, "Computation not scheduled"); } LOG(Message) << "Saving current schedule to '" << filename << "'..." << std::endl; for (auto address: program_) { - program.push_back(env().getModuleName(address)); + program.push_back(vm().getModuleName(address)); } write(writer, "schedule", program); } void Application::loadSchedule(const std::string filename) { - DEFINE_MEMPEAK; - TextReader reader(filename); std::vector program; @@ -280,24 +197,24 @@ void Application::loadSchedule(const std::string filename) program_.clear(); for (auto &name: program) { - program_.push_back(env().getModuleAddress(name)); + program_.push_back(vm().getModuleAddress(name)); } - scheduled_ = true; - memPeak_ = memPeak(program_); + loadedSchedule_ = true; } void Application::printSchedule(void) { if (!scheduled_) { - HADRON_ERROR("Computation not scheduled"); + HADRON_ERROR(Definition, "Computation not scheduled"); } - LOG(Message) << "Schedule (memory peak: " << MEM_MSG(memPeak_) << "):" + auto peak = vm().memoryNeeded(program_); + LOG(Message) << "Schedule (memory needed: " << sizeString(peak) << "):" << std::endl; for (unsigned int i = 0; i < program_.size(); ++i) { LOG(Message) << std::setw(4) << i + 1 << ": " - << env().getModuleName(program_[i]) << std::endl; + << vm().getModuleName(program_[i]) << std::endl; } } @@ -310,8 +227,8 @@ void Application::configLoop(void) { LOG(Message) << BIG_SEP << " Starting measurement for trajectory " << t << " " << BIG_SEP << std::endl; - env().setTrajectory(t); - env().executeProgram(program_); + vm().setTrajectory(t); + vm().executeProgram(program_); } LOG(Message) << BIG_SEP << " End of measurement " << BIG_SEP << std::endl; env().freeAll(); diff --git a/extras/Hadrons/Application.hpp b/extras/Hadrons/Application.hpp index fce9b6eb..8cd15433 100644 --- a/extras/Hadrons/Application.hpp +++ b/extras/Hadrons/Application.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Application.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -31,8 +30,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Application_hpp_ #include -#include -#include +#include #include BEGIN_HADRONS_NAMESPACE @@ -51,25 +49,13 @@ public: unsigned int, end, unsigned int, step); }; - class GeneticPar: Serializable - { - public: - GeneticPar(void): - popSize{20}, maxGen{1000}, maxCstGen{100}, mutationRate{.1} {}; - public: - GRID_SERIALIZABLE_CLASS_MEMBERS(GeneticPar, - unsigned int, popSize, - unsigned int, maxGen, - unsigned int, maxCstGen, - double , mutationRate); - }; class GlobalPar: Serializable { public: GRID_SERIALIZABLE_CLASS_MEMBERS(GlobalPar, - TrajRange, trajCounter, - GeneticPar, genetic, - std::string, seed); + TrajRange, trajCounter, + VirtualMachine::GeneticPar, genetic, + std::string, seed); }; public: // constructors @@ -100,14 +86,15 @@ public: void configLoop(void); private: // environment shortcut - Environment & env(void) const; + DEFINE_ENV_ALIAS; + // virtual machine shortcut + DEFINE_VM_ALIAS; private: - long unsigned int locVol_; - std::string parameterFileName_{""}; - GlobalPar par_; - std::vector program_; - Environment::Size memPeak_; - bool scheduled_{false}; + long unsigned int locVol_; + std::string parameterFileName_{""}; + GlobalPar par_; + VirtualMachine::Program program_; + bool scheduled_{false}, loadedSchedule_{false}; }; /****************************************************************************** @@ -117,14 +104,16 @@ private: template void Application::createModule(const std::string name) { - env().createModule(name); + vm().createModule(name); + scheduled_ = false; } template void Application::createModule(const std::string name, const typename M::Par &par) { - env().createModule(name, par); + vm().createModule(name, par); + scheduled_ = false; } END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/Environment.cc b/extras/Hadrons/Environment.cc index 0e7a4326..82b0dda1 100644 --- a/extras/Hadrons/Environment.cc +++ b/extras/Hadrons/Environment.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Environment.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -35,6 +34,9 @@ using namespace Grid; using namespace QCD; using namespace Hadrons; +#define ERROR_NO_ADDRESS(address)\ +HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); + /****************************************************************************** * Environment implementation * ******************************************************************************/ @@ -56,28 +58,6 @@ Environment::Environment(void) rng4d_.reset(new GridParallelRNG(grid4d_.get())); } -// dry run ///////////////////////////////////////////////////////////////////// -void Environment::dryRun(const bool isDry) -{ - dryRun_ = isDry; -} - -bool Environment::isDryRun(void) const -{ - return dryRun_; -} - -// trajectory number /////////////////////////////////////////////////////////// -void Environment::setTrajectory(const unsigned int traj) -{ - traj_ = traj; -} - -unsigned int Environment::getTrajectory(void) const -{ - return traj_; -} - // grids /////////////////////////////////////////////////////////////////////// void Environment::createGrid(const unsigned int Ls) { @@ -105,7 +85,7 @@ GridCartesian * Environment::getGrid(const unsigned int Ls) const } catch(std::out_of_range &) { - HADRON_ERROR("no grid with Ls= " << Ls); + HADRON_ERROR(Definition, "no grid with Ls= " + std::to_string(Ls)); } } @@ -124,7 +104,7 @@ GridRedBlackCartesian * Environment::getRbGrid(const unsigned int Ls) const } catch(std::out_of_range &) { - HADRON_ERROR("no red-black 5D grid with Ls= " << Ls); + HADRON_ERROR(Definition, "no red-black 5D grid with Ls= " + std::to_string(Ls)); } } @@ -143,6 +123,11 @@ int Environment::getDim(const unsigned int mu) const return dim_[mu]; } +unsigned long int Environment::getLocalVolume(void) const +{ + return locVol_; +} + // random number generator ///////////////////////////////////////////////////// void Environment::setSeed(const std::vector &seed) { @@ -154,291 +139,6 @@ GridParallelRNG * Environment::get4dRng(void) const return rng4d_.get(); } -// module management /////////////////////////////////////////////////////////// -void Environment::pushModule(Environment::ModPt &pt) -{ - std::string name = pt->getName(); - - if (!hasModule(name)) - { - std::vector inputAddress; - unsigned int address; - ModuleInfo m; - - m.data = std::move(pt); - m.type = typeIdPt(*m.data.get()); - m.name = name; - auto input = m.data->getInput(); - for (auto &in: input) - { - if (!hasObject(in)) - { - addObject(in , -1); - } - m.input.push_back(objectAddress_[in]); - } - auto output = m.data->getOutput(); - module_.push_back(std::move(m)); - address = static_cast(module_.size() - 1); - moduleAddress_[name] = address; - for (auto &out: output) - { - if (!hasObject(out)) - { - addObject(out, address); - } - else - { - if (object_[objectAddress_[out]].module < 0) - { - object_[objectAddress_[out]].module = address; - } - else - { - HADRON_ERROR("object '" + out - + "' is already produced by module '" - + module_[object_[getObjectAddress(out)].module].name - + "' (while pushing module '" + name + "')"); - } - } - } - } - else - { - HADRON_ERROR("module '" + name + "' already exists"); - } -} - -unsigned int Environment::getNModule(void) const -{ - return module_.size(); -} - -void Environment::createModule(const std::string name, const std::string type, - XmlReader &reader) -{ - auto &factory = ModuleFactory::getInstance(); - auto pt = factory.create(type, name); - - pt->parseParameters(reader, "options"); - pushModule(pt); -} - -ModuleBase * Environment::getModule(const unsigned int address) const -{ - if (hasModule(address)) - { - return module_[address].data.get(); - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -ModuleBase * Environment::getModule(const std::string name) const -{ - return getModule(getModuleAddress(name)); -} - -unsigned int Environment::getModuleAddress(const std::string name) const -{ - if (hasModule(name)) - { - return moduleAddress_.at(name); - } - else - { - HADRON_ERROR("no module with name '" + name + "'"); - } -} - -std::string Environment::getModuleName(const unsigned int address) const -{ - if (hasModule(address)) - { - return module_[address].name; - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -std::string Environment::getModuleType(const unsigned int address) const -{ - if (hasModule(address)) - { - return typeName(module_[address].type); - } - else - { - HADRON_ERROR("no module with address " + std::to_string(address)); - } -} - -std::string Environment::getModuleType(const std::string name) const -{ - return getModuleType(getModuleAddress(name)); -} - -std::string Environment::getModuleNamespace(const unsigned int address) const -{ - std::string type = getModuleType(address), ns; - - auto pos2 = type.rfind("::"); - auto pos1 = type.rfind("::", pos2 - 2); - - return type.substr(pos1 + 2, pos2 - pos1 - 2); -} - -std::string Environment::getModuleNamespace(const std::string name) const -{ - return getModuleNamespace(getModuleAddress(name)); -} - -bool Environment::hasModule(const unsigned int address) const -{ - return (address < module_.size()); -} - -bool Environment::hasModule(const std::string name) const -{ - return (moduleAddress_.find(name) != moduleAddress_.end()); -} - -Graph Environment::makeModuleGraph(void) const -{ - Graph moduleGraph; - - for (unsigned int i = 0; i < module_.size(); ++i) - { - moduleGraph.addVertex(i); - for (auto &j: module_[i].input) - { - moduleGraph.addEdge(object_[j].module, i); - } - } - - return moduleGraph; -} - -#define BIG_SEP "===============" -#define SEP "---------------" -#define MEM_MSG(size)\ -sizeString((size)*locVol_) << " (" << sizeString(size) << "/site)" - -Environment::Size -Environment::executeProgram(const std::vector &p) -{ - Size memPeak = 0, sizeBefore, sizeAfter; - std::vector> freeProg; - bool continueCollect, nothingFreed; - - // build garbage collection schedule - freeProg.resize(p.size()); - for (unsigned int i = 0; i < object_.size(); ++i) - { - auto pred = [i, this](const unsigned int j) - { - auto &in = module_[j].input; - auto it = std::find(in.begin(), in.end(), i); - - return (it != in.end()) or (j == object_[i].module); - }; - auto it = std::find_if(p.rbegin(), p.rend(), pred); - if (it != p.rend()) - { - freeProg[p.rend() - it - 1].insert(i); - } - } - - // program execution - for (unsigned int i = 0; i < p.size(); ++i) - { - // execute module - if (!isDryRun()) - { - LOG(Message) << SEP << " Measurement step " << i+1 << "/" - << p.size() << " (module '" << module_[p[i]].name - << "') " << SEP << std::endl; - } - (*module_[p[i]].data)(); - sizeBefore = getTotalSize(); - // print used memory after execution - if (!isDryRun()) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) - << std::endl; - } - if (sizeBefore > memPeak) - { - memPeak = sizeBefore; - } - // garbage collection for step i - if (!isDryRun()) - { - LOG(Message) << "Garbage collection..." << std::endl; - } - nothingFreed = true; - do - { - continueCollect = false; - auto toFree = freeProg[i]; - for (auto &j: toFree) - { - // continue garbage collection while there are still - // objects without owners - continueCollect = continueCollect or !hasOwners(j); - if(freeObject(j)) - { - // if an object has been freed, remove it from - // the garbage collection schedule - freeProg[i].erase(j); - nothingFreed = false; - } - } - } while (continueCollect); - // any remaining objects in step i garbage collection schedule - // is scheduled for step i + 1 - if (i + 1 < p.size()) - { - for (auto &j: freeProg[i]) - { - freeProg[i + 1].insert(j); - } - } - // print used memory after garbage collection if necessary - if (!isDryRun()) - { - sizeAfter = getTotalSize(); - if (sizeBefore != sizeAfter) - { - LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) - << std::endl; - } - else - { - LOG(Message) << "Nothing to free" << std::endl; - } - } - } - - return memPeak; -} - -Environment::Size Environment::executeProgram(const std::vector &p) -{ - std::vector pAddress; - - for (auto &n: p) - { - pAddress.push_back(getModuleAddress(n)); - } - - return executeProgram(pAddress); -} - // general memory management /////////////////////////////////////////////////// void Environment::addObject(const std::string name, const int moduleAddress) { @@ -448,46 +148,25 @@ void Environment::addObject(const std::string name, const int moduleAddress) info.name = name; info.module = moduleAddress; + info.data = nullptr; object_.push_back(std::move(info)); objectAddress_[name] = static_cast(object_.size() - 1); } else { - HADRON_ERROR("object '" + name + "' already exists"); + HADRON_ERROR(Definition, "object '" + name + "' already exists"); } } -void Environment::registerObject(const unsigned int address, - const unsigned int size, const unsigned int Ls) +void Environment::setObjectModule(const unsigned int objAddress, + const int modAddress) { - if (!hasRegisteredObject(address)) - { - if (hasObject(address)) - { - object_[address].size = size; - object_[address].Ls = Ls; - object_[address].isRegistered = true; - } - else - { - HADRON_ERROR("no object with address " + std::to_string(address)); - } - } - else - { - HADRON_ERROR("object with address " + std::to_string(address) - + " already registered"); - } + object_[objAddress].module = modAddress; } -void Environment::registerObject(const std::string name, - const unsigned int size, const unsigned int Ls) +unsigned int Environment::getMaxAddress(void) const { - if (!hasObject(name)) - { - addObject(name); - } - registerObject(getObjectAddress(name), size, Ls); + return object_.size(); } unsigned int Environment::getObjectAddress(const std::string name) const @@ -498,7 +177,7 @@ unsigned int Environment::getObjectAddress(const std::string name) const } else { - HADRON_ERROR("no object with name '" + name + "'"); + HADRON_ERROR(Definition, "no object with name '" + name + "'"); } } @@ -510,13 +189,13 @@ std::string Environment::getObjectName(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } std::string Environment::getObjectType(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { if (object_[address].type) { @@ -527,14 +206,9 @@ std::string Environment::getObjectType(const unsigned int address) const return ""; } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -545,18 +219,13 @@ std::string Environment::getObjectType(const std::string name) const Environment::Size Environment::getObjectSize(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { return object_[address].size; } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -565,7 +234,24 @@ Environment::Size Environment::getObjectSize(const std::string name) const return getObjectSize(getObjectAddress(name)); } -unsigned int Environment::getObjectModule(const unsigned int address) const +Environment::Storage Environment::getObjectStorage(const unsigned int address) const +{ + if (hasObject(address)) + { + return object_[address].storage; + } + else + { + ERROR_NO_ADDRESS(address); + } +} + +Environment::Storage Environment::getObjectStorage(const std::string name) const +{ + return getObjectStorage(getObjectAddress(name)); +} + +int Environment::getObjectModule(const unsigned int address) const { if (hasObject(address)) { @@ -573,29 +259,24 @@ unsigned int Environment::getObjectModule(const unsigned int address) const } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } -unsigned int Environment::getObjectModule(const std::string name) const +int Environment::getObjectModule(const std::string name) const { return getObjectModule(getObjectAddress(name)); } unsigned int Environment::getObjectLs(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { return object_[address].Ls; } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) - + " exists but is not registered"); - } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + ERROR_NO_ADDRESS(address); } } @@ -616,30 +297,6 @@ bool Environment::hasObject(const std::string name) const return ((it != objectAddress_.end()) and hasObject(it->second)); } -bool Environment::hasRegisteredObject(const unsigned int address) const -{ - if (hasObject(address)) - { - return object_[address].isRegistered; - } - else - { - return false; - } -} - -bool Environment::hasRegisteredObject(const std::string name) const -{ - if (hasObject(name)) - { - return hasRegisteredObject(getObjectAddress(name)); - } - else - { - return false; - } -} - bool Environment::hasCreatedObject(const unsigned int address) const { if (hasObject(address)) @@ -680,92 +337,27 @@ Environment::Size Environment::getTotalSize(void) const for (auto &o: object_) { - if (o.isRegistered) - { - size += o.size; - } + size += o.size; } return size; } -void Environment::addOwnership(const unsigned int owner, - const unsigned int property) +void Environment::freeObject(const unsigned int address) { - if (hasObject(property)) + if (hasCreatedObject(address)) { - object_[property].owners.insert(owner); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(property)); - } - if (hasObject(owner)) - { - object_[owner].properties.insert(property); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(owner)); + LOG(Message) << "Destroying object '" << object_[address].name + << "'" << std::endl; } + object_[address].size = 0; + object_[address].type = nullptr; + object_[address].data.reset(nullptr); } -void Environment::addOwnership(const std::string owner, - const std::string property) +void Environment::freeObject(const std::string name) { - addOwnership(getObjectAddress(owner), getObjectAddress(property)); -} - -bool Environment::hasOwners(const unsigned int address) const -{ - - if (hasObject(address)) - { - return (!object_[address].owners.empty()); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(address)); - } -} - -bool Environment::hasOwners(const std::string name) const -{ - return hasOwners(getObjectAddress(name)); -} - -bool Environment::freeObject(const unsigned int address) -{ - if (!hasOwners(address)) - { - if (!isDryRun() and object_[address].isRegistered) - { - LOG(Message) << "Destroying object '" << object_[address].name - << "'" << std::endl; - } - for (auto &p: object_[address].properties) - { - object_[p].owners.erase(address); - } - object_[address].size = 0; - object_[address].Ls = 0; - object_[address].isRegistered = false; - object_[address].type = nullptr; - object_[address].owners.clear(); - object_[address].properties.clear(); - object_[address].data.reset(nullptr); - - return true; - } - else - { - return false; - } -} - -bool Environment::freeObject(const std::string name) -{ - return freeObject(getObjectAddress(name)); + freeObject(getObjectAddress(name)); } void Environment::freeAll(void) @@ -776,18 +368,24 @@ void Environment::freeAll(void) } } -void Environment::printContent(void) +void Environment::protectObjects(const bool protect) { - LOG(Message) << "Modules: " << std::endl; - for (unsigned int i = 0; i < module_.size(); ++i) - { - LOG(Message) << std::setw(4) << i << ": " - << getModuleName(i) << std::endl; - } - LOG(Message) << "Objects: " << std::endl; + protect_ = protect; +} + +bool Environment::objectsProtected(void) const +{ + return protect_; +} + +// print environment content /////////////////////////////////////////////////// +void Environment::printContent(void) const +{ + LOG(Debug) << "Objects: " << std::endl; for (unsigned int i = 0; i < object_.size(); ++i) { - LOG(Message) << std::setw(4) << i << ": " - << getObjectName(i) << std::endl; + LOG(Debug) << std::setw(4) << i << ": " + << getObjectName(i) << " (" + << sizeString(getObjectSize(i)) << ")" << std::endl; } } diff --git a/extras/Hadrons/Environment.hpp b/extras/Hadrons/Environment.hpp index 13264bd5..e9bfffe1 100644 --- a/extras/Hadrons/Environment.hpp +++ b/extras/Hadrons/Environment.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Environment.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -31,20 +30,12 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Environment_hpp_ #include -#include - -#ifndef SITE_SIZE_TYPE -#define SITE_SIZE_TYPE unsigned int -#endif BEGIN_HADRONS_NAMESPACE /****************************************************************************** * Global environment * ******************************************************************************/ -// forward declaration of Module -class ModuleBase; - class Object { public: @@ -66,123 +57,78 @@ private: std::unique_ptr objPt_{nullptr}; }; +#define DEFINE_ENV_ALIAS \ +inline Environment & env(void) const\ +{\ + return Environment::getInstance();\ +} + class Environment { SINGLETON(Environment); public: typedef SITE_SIZE_TYPE Size; - typedef std::unique_ptr ModPt; typedef std::unique_ptr GridPt; typedef std::unique_ptr GridRbPt; typedef std::unique_ptr RngPt; - typedef std::unique_ptr LatticePt; + enum class Storage {object, cache, temporary}; private: - struct ModuleInfo - { - const std::type_info *type{nullptr}; - std::string name; - ModPt data{nullptr}; - std::vector input; - }; struct ObjInfo { Size size{0}; + Storage storage{Storage::object}; unsigned int Ls{0}; - bool isRegistered{false}; const std::type_info *type{nullptr}; std::string name; int module{-1}; - std::set owners, properties; std::unique_ptr data{nullptr}; }; public: - // dry run - void dryRun(const bool isDry); - bool isDryRun(void) const; - // trajectory number - void setTrajectory(const unsigned int traj); - unsigned int getTrajectory(void) const; // grids void createGrid(const unsigned int Ls); GridCartesian * getGrid(const unsigned int Ls = 1) const; GridRedBlackCartesian * getRbGrid(const unsigned int Ls = 1) const; std::vector getDim(void) const; int getDim(const unsigned int mu) const; + unsigned long int getLocalVolume(void) const; unsigned int getNd(void) const; // random number generator void setSeed(const std::vector &seed); GridParallelRNG * get4dRng(void) const; - // module management - void pushModule(ModPt &pt); - template - void createModule(const std::string name); - template - void createModule(const std::string name, - const typename M::Par &par); - void createModule(const std::string name, - const std::string type, - XmlReader &reader); - unsigned int getNModule(void) const; - ModuleBase * getModule(const unsigned int address) const; - ModuleBase * getModule(const std::string name) const; - template - M * getModule(const unsigned int address) const; - template - M * getModule(const std::string name) const; - unsigned int getModuleAddress(const std::string name) const; - std::string getModuleName(const unsigned int address) const; - std::string getModuleType(const unsigned int address) const; - std::string getModuleType(const std::string name) const; - std::string getModuleNamespace(const unsigned int address) const; - std::string getModuleNamespace(const std::string name) const; - bool hasModule(const unsigned int address) const; - bool hasModule(const std::string name) const; - Graph makeModuleGraph(void) const; - Size executeProgram(const std::vector &p); - Size executeProgram(const std::vector &p); // general memory management void addObject(const std::string name, const int moduleAddress = -1); - void registerObject(const unsigned int address, - const unsigned int size, - const unsigned int Ls = 1); - void registerObject(const std::string name, - const unsigned int size, - const unsigned int Ls = 1); - template - unsigned int lattice4dSize(void) const; - template - void registerLattice(const unsigned int address, - const unsigned int Ls = 1); - template - void registerLattice(const std::string name, - const unsigned int Ls = 1); - template - void setObject(const unsigned int address, T *object); - template - void setObject(const std::string name, T *object); + template + void createDerivedObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args); + template + void createObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args); + void setObjectModule(const unsigned int objAddress, + const int modAddress); template T * getObject(const unsigned int address) const; template T * getObject(const std::string name) const; - template - T * createLattice(const unsigned int address); - template - T * createLattice(const std::string name); + unsigned int getMaxAddress(void) const; unsigned int getObjectAddress(const std::string name) const; std::string getObjectName(const unsigned int address) const; std::string getObjectType(const unsigned int address) const; std::string getObjectType(const std::string name) const; Size getObjectSize(const unsigned int address) const; Size getObjectSize(const std::string name) const; - unsigned int getObjectModule(const unsigned int address) const; - unsigned int getObjectModule(const std::string name) const; + Storage getObjectStorage(const unsigned int address) const; + Storage getObjectStorage(const std::string name) const; + int getObjectModule(const unsigned int address) const; + int getObjectModule(const std::string name) const; unsigned int getObjectLs(const unsigned int address) const; unsigned int getObjectLs(const std::string name) const; bool hasObject(const unsigned int address) const; bool hasObject(const std::string name) const; - bool hasRegisteredObject(const unsigned int address) const; - bool hasRegisteredObject(const std::string name) const; bool hasCreatedObject(const unsigned int address) const; bool hasCreatedObject(const std::string name) const; bool isObject5d(const unsigned int address) const; @@ -192,20 +138,17 @@ public: template bool isObjectOfType(const std::string name) const; Environment::Size getTotalSize(void) const; - void addOwnership(const unsigned int owner, - const unsigned int property); - void addOwnership(const std::string owner, - const std::string property); - bool hasOwners(const unsigned int address) const; - bool hasOwners(const std::string name) const; - bool freeObject(const unsigned int address); - bool freeObject(const std::string name); + void freeObject(const unsigned int address); + void freeObject(const std::string name); void freeAll(void); - void printContent(void); + void protectObjects(const bool protect); + bool objectsProtected(void) const; + // print environment content + void printContent(void) const; private: // general - bool dryRun_{false}; - unsigned int traj_, locVol_; + unsigned long int locVol_; + bool protect_{true}; // grids std::vector dim_; GridPt grid4d_; @@ -215,11 +158,6 @@ private: unsigned int nd_; // random number generator RngPt rng4d_; - // module and related maps - std::vector module_; - std::map moduleAddress_; - // lattice store - std::map lattice_; // object store std::vector object_; std::map objectAddress_; @@ -256,116 +194,85 @@ void Holder::reset(T *pt) /****************************************************************************** * Environment template implementation * ******************************************************************************/ -// module management /////////////////////////////////////////////////////////// -template -void Environment::createModule(const std::string name) +// general memory management /////////////////////////////////////////////////// +template +void Environment::createDerivedObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args) { - ModPt pt(new M(name)); + if (!hasObject(name)) + { + addObject(name); + } - pushModule(pt); -} - -template -void Environment::createModule(const std::string name, - const typename M::Par &par) -{ - ModPt pt(new M(name)); + unsigned int address = getObjectAddress(name); - static_cast(pt.get())->setPar(par); - pushModule(pt); -} - -template -M * Environment::getModule(const unsigned int address) const -{ - if (auto *pt = dynamic_cast(getModule(address))) + if (!object_[address].data or !objectsProtected()) { - return pt; + MemoryStats memStats; + + if (!MemoryProfiler::stats) + { + MemoryProfiler::stats = &memStats; + } + size_t initMem = MemoryProfiler::stats->currentlyAllocated; + object_[address].storage = storage; + object_[address].Ls = Ls; + object_[address].data.reset(new Holder(new T(std::forward(args)...))); + object_[address].size = MemoryProfiler::stats->maxAllocated - initMem; + object_[address].type = &typeid(T); + if (MemoryProfiler::stats == &memStats) + { + MemoryProfiler::stats = nullptr; + } } - else + // object already exists, no error if it is a cache, error otherwise + else if ((object_[address].storage != Storage::cache) or + (object_[address].storage != storage) or + (object_[address].name != name) or + (object_[address].type != &typeid(T))) { - HADRON_ERROR("module '" + module_[address].name - + "' does not have type " + typeid(M).name() - + "(object type: " + getModuleType(address) + ")"); + HADRON_ERROR(Definition, "object '" + name + "' already allocated"); } } -template -M * Environment::getModule(const std::string name) const +template +void Environment::createObject(const std::string name, + const Environment::Storage storage, + const unsigned int Ls, + Ts && ... args) { - return getModule(getModuleAddress(name)); -} - -template -unsigned int Environment::lattice4dSize(void) const -{ - return sizeof(typename T::vector_object)/getGrid()->Nsimd(); -} - -template -void Environment::registerLattice(const unsigned int address, - const unsigned int Ls) -{ - createGrid(Ls); - registerObject(address, Ls*lattice4dSize(), Ls); -} - -template -void Environment::registerLattice(const std::string name, const unsigned int Ls) -{ - createGrid(Ls); - registerObject(name, Ls*lattice4dSize(), Ls); -} - -template -void Environment::setObject(const unsigned int address, T *object) -{ - if (hasRegisteredObject(address)) - { - object_[address].data.reset(new Holder(object)); - object_[address].type = &typeid(T); - } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); - } - else - { - HADRON_ERROR("no object with address " + std::to_string(address)); - } -} - -template -void Environment::setObject(const std::string name, T *object) -{ - setObject(getObjectAddress(name), object); + createDerivedObject(name, storage, Ls, std::forward(args)...); } template T * Environment::getObject(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { - if (auto h = dynamic_cast *>(object_[address].data.get())) + if (hasCreatedObject(address)) { - return h->getPt(); + if (auto h = dynamic_cast *>(object_[address].data.get())) + { + return h->getPt(); + } + else + { + HADRON_ERROR(Definition, "object with address " + std::to_string(address) + + " does not have type '" + typeName(&typeid(T)) + + "' (has type '" + getObjectType(address) + "')"); + } } else { - HADRON_ERROR("object with address " + std::to_string(address) + - " does not have type '" + typeName(&typeid(T)) + - "' (has type '" + getObjectType(address) + "')"); + HADRON_ERROR(Definition, "object with address " + std::to_string(address) + + " is empty"); } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); - } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); } } @@ -375,26 +282,10 @@ T * Environment::getObject(const std::string name) const return getObject(getObjectAddress(name)); } -template -T * Environment::createLattice(const unsigned int address) -{ - GridCartesian *g = getGrid(getObjectLs(address)); - - setObject(address, new T(g)); - - return getObject(address); -} - -template -T * Environment::createLattice(const std::string name) -{ - return createLattice(getObjectAddress(name)); -} - template bool Environment::isObjectOfType(const unsigned int address) const { - if (hasRegisteredObject(address)) + if (hasObject(address)) { if (auto h = dynamic_cast *>(object_[address].data.get())) { @@ -405,14 +296,9 @@ bool Environment::isObjectOfType(const unsigned int address) const return false; } } - else if (hasObject(address)) - { - HADRON_ERROR("object with address " + std::to_string(address) + - " exists but is not registered"); - } else { - HADRON_ERROR("no object with address " + std::to_string(address)); + HADRON_ERROR(Definition, "no object with address " + std::to_string(address)); } } diff --git a/extras/Hadrons/Exceptions.cc b/extras/Hadrons/Exceptions.cc new file mode 100644 index 00000000..eedc03b1 --- /dev/null +++ b/extras/Hadrons/Exceptions.cc @@ -0,0 +1,57 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Exceptions.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +#ifndef ERR_SUFF +#define ERR_SUFF " (" + loc + ")" +#endif + +#define CONST_EXC(name, init) \ +name::name(std::string msg, std::string loc)\ +:init\ +{} + +using namespace Grid; +using namespace Hadrons; +using namespace Exceptions; + +// logic errors +CONST_EXC(Logic, logic_error(msg + ERR_SUFF)) +CONST_EXC(Definition, Logic("definition error: " + msg, loc)) +CONST_EXC(Implementation, Logic("implementation error: " + msg, loc)) +CONST_EXC(Range, Logic("range error: " + msg, loc)) +CONST_EXC(Size, Logic("size error: " + msg, loc)) +// runtime errors +CONST_EXC(Runtime, runtime_error(msg + ERR_SUFF)) +CONST_EXC(Argument, Runtime("argument error: " + msg, loc)) +CONST_EXC(Io, Runtime("IO error: " + msg, loc)) +CONST_EXC(Memory, Runtime("memory error: " + msg, loc)) +CONST_EXC(Parsing, Runtime("parsing error: " + msg, loc)) +CONST_EXC(Program, Runtime("program error: " + msg, loc)) +CONST_EXC(System, Runtime("system error: " + msg, loc)) diff --git a/extras/Hadrons/Exceptions.hpp b/extras/Hadrons/Exceptions.hpp new file mode 100644 index 00000000..ab588e5e --- /dev/null +++ b/extras/Hadrons/Exceptions.hpp @@ -0,0 +1,72 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Exceptions.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#ifndef Hadrons_Exceptions_hpp_ +#define Hadrons_Exceptions_hpp_ + +#include +#ifndef Hadrons_Global_hpp_ +#include +#endif + +#define SRC_LOC std::string(__FUNCTION__) + " at " + std::string(__FILE__) + ":"\ + + std::to_string(__LINE__) +#define HADRON_ERROR(exc, msg)\ +LOG(Error) << msg << std::endl;\ +throw(Exceptions::exc(msg, SRC_LOC)); + +#define DECL_EXC(name, base) \ +class name: public base\ +{\ +public:\ + name(std::string msg, std::string loc);\ +} + +BEGIN_HADRONS_NAMESPACE + +namespace Exceptions +{ + // logic errors + DECL_EXC(Logic, std::logic_error); + DECL_EXC(Definition, Logic); + DECL_EXC(Implementation, Logic); + DECL_EXC(Range, Logic); + DECL_EXC(Size, Logic); + // runtime errors + DECL_EXC(Runtime, std::runtime_error); + DECL_EXC(Argument, Runtime); + DECL_EXC(Io, Runtime); + DECL_EXC(Memory, Runtime); + DECL_EXC(Parsing, Runtime); + DECL_EXC(Program, Runtime); + DECL_EXC(System, Runtime); +} + +END_HADRONS_NAMESPACE + +#endif // Hadrons_Exceptions_hpp_ diff --git a/extras/Hadrons/Factory.hpp b/extras/Hadrons/Factory.hpp index da86acae..705a639e 100644 --- a/extras/Hadrons/Factory.hpp +++ b/extras/Hadrons/Factory.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Factory.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -95,7 +94,7 @@ std::unique_ptr Factory::create(const std::string type, } catch (std::out_of_range &) { - HADRON_ERROR("object of type '" + type + "' unknown"); + HADRON_ERROR(Argument, "object of type '" + type + "' unknown"); } return func(name); diff --git a/extras/Hadrons/GeneticScheduler.hpp b/extras/Hadrons/GeneticScheduler.hpp index d0c52596..9a6476c3 100644 --- a/extras/Hadrons/GeneticScheduler.hpp +++ b/extras/Hadrons/GeneticScheduler.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/GeneticScheduler.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -38,13 +37,13 @@ BEGIN_HADRONS_NAMESPACE /****************************************************************************** * Scheduler based on a genetic algorithm * ******************************************************************************/ -template +template class GeneticScheduler { public: - typedef std::vector Gene; - typedef std::pair GenePair; - typedef std::function ObjFunc; + typedef std::vector Gene; + typedef std::pair GenePair; + typedef std::function ObjFunc; struct Parameters { double mutationRate; @@ -65,7 +64,7 @@ public: void benchmarkCrossover(const unsigned int nIt); // print population friend std::ostream & operator<<(std::ostream &out, - const GeneticScheduler &s) + const GeneticScheduler &s) { out << "["; for (auto &p: s.population_) @@ -87,19 +86,19 @@ private: void mutation(Gene &m, const Gene &c); private: - Graph &graph_; - const ObjFunc &func_; - const Parameters par_; - std::multimap population_; - std::mt19937 gen_; + Graph &graph_; + const ObjFunc &func_; + const Parameters par_; + std::multimap population_; + std::mt19937 gen_; }; /****************************************************************************** * template implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// -template -GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, +template +GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, const Parameters &par) : graph_(graph) , func_(func) @@ -109,22 +108,22 @@ GeneticScheduler::GeneticScheduler(Graph &graph, const ObjFunc &func, } // access ////////////////////////////////////////////////////////////////////// -template -const typename GeneticScheduler::Gene & -GeneticScheduler::getMinSchedule(void) +template +const typename GeneticScheduler::Gene & +GeneticScheduler::getMinSchedule(void) { return population_.begin()->second; } -template -int GeneticScheduler::getMinValue(void) +template +int GeneticScheduler::getMinValue(void) { return population_.begin()->first; } // breed a new generation ////////////////////////////////////////////////////// -template -void GeneticScheduler::nextGeneration(void) +template +void GeneticScheduler::nextGeneration(void) { // random initialization of the population if necessary if (population_.size() != par_.popSize) @@ -158,8 +157,8 @@ void GeneticScheduler::nextGeneration(void) } // evolution steps ///////////////////////////////////////////////////////////// -template -void GeneticScheduler::initPopulation(void) +template +void GeneticScheduler::initPopulation(void) { population_.clear(); for (unsigned int i = 0; i < par_.popSize; ++i) @@ -170,8 +169,8 @@ void GeneticScheduler::initPopulation(void) } } -template -void GeneticScheduler::doCrossover(void) +template +void GeneticScheduler::doCrossover(void) { auto p = selectPair(); Gene &p1 = *(p.first), &p2 = *(p.second); @@ -185,8 +184,8 @@ void GeneticScheduler::doCrossover(void) } } -template -void GeneticScheduler::doMutation(void) +template +void GeneticScheduler::doMutation(void) { std::uniform_real_distribution mdis(0., 1.); std::uniform_int_distribution pdis(0, population_.size() - 1); @@ -206,40 +205,35 @@ void GeneticScheduler::doMutation(void) } // genetic operators /////////////////////////////////////////////////////////// -template -typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) +template +typename GeneticScheduler::GenePair GeneticScheduler::selectPair(void) { std::vector prob; unsigned int ind; Gene *p1, *p2; + const double max = population_.rbegin()->first; + for (auto &c: population_) { - prob.push_back(1./c.first); - } - do - { - double probCpy; - - std::discrete_distribution dis1(prob.begin(), prob.end()); - auto rIt = population_.begin(); - ind = dis1(gen_); - std::advance(rIt, ind); - p1 = &(rIt->second); - probCpy = prob[ind]; - prob[ind] = 0.; - std::discrete_distribution dis2(prob.begin(), prob.end()); - rIt = population_.begin(); - std::advance(rIt, dis2(gen_)); - p2 = &(rIt->second); - prob[ind] = probCpy; - } while (p1 == p2); + prob.push_back(std::exp((c.first-1.)/max)); + } + std::discrete_distribution dis1(prob.begin(), prob.end()); + auto rIt = population_.begin(); + ind = dis1(gen_); + std::advance(rIt, ind); + p1 = &(rIt->second); + prob[ind] = 0.; + std::discrete_distribution dis2(prob.begin(), prob.end()); + rIt = population_.begin(); + std::advance(rIt, dis2(gen_)); + p2 = &(rIt->second); return std::make_pair(p1, p2); } -template -void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, +template +void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, const Gene &p2) { Gene buf; @@ -273,8 +267,8 @@ void GeneticScheduler::crossover(Gene &c1, Gene &c2, const Gene &p1, } } -template -void GeneticScheduler::mutation(Gene &m, const Gene &c) +template +void GeneticScheduler::mutation(Gene &m, const Gene &c) { Gene buf; std::uniform_int_distribution dis(0, c.size() - 1); @@ -303,8 +297,8 @@ void GeneticScheduler::mutation(Gene &m, const Gene &c) } } -template -void GeneticScheduler::benchmarkCrossover(const unsigned int nIt) +template +void GeneticScheduler::benchmarkCrossover(const unsigned int nIt) { Gene p1, p2, c1, c2; double neg = 0., eq = 0., pos = 0., total; diff --git a/extras/Hadrons/Global.cc b/extras/Hadrons/Global.cc index 7b0b8fb6..9a90a08c 100644 --- a/extras/Hadrons/Global.cc +++ b/extras/Hadrons/Global.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Global.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -39,31 +38,19 @@ HadronsLogger Hadrons::HadronsLogMessage(1,"Message"); HadronsLogger Hadrons::HadronsLogIterative(1,"Iterative"); HadronsLogger Hadrons::HadronsLogDebug(1,"Debug"); -// pretty size formatting ////////////////////////////////////////////////////// -std::string Hadrons::sizeString(long unsigned int bytes) - +void Hadrons::initLogger(void) { - constexpr unsigned int bufSize = 256; - const char *suffixes[7] = {"", "K", "M", "G", "T", "P", "E"}; - char buf[256]; - long unsigned int s = 0; - double count = bytes; - - while (count >= 1024 && s < 7) - { - s++; - count /= 1024; - } - if (count - floor(count) == 0.0) - { - snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]); - } - else - { - snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]); - } - - return std::string(buf); + auto w = std::string("Hadrons").length(); + GridLogError.setTopWidth(w); + GridLogWarning.setTopWidth(w); + GridLogMessage.setTopWidth(w); + GridLogIterative.setTopWidth(w); + GridLogDebug.setTopWidth(w); + HadronsLogError.Active(GridLogError.isActive()); + HadronsLogWarning.Active(GridLogWarning.isActive()); + HadronsLogMessage.Active(GridLogMessage.isActive()); + HadronsLogIterative.Active(GridLogIterative.isActive()); + HadronsLogDebug.Active(GridLogDebug.isActive()); } // type utilities ////////////////////////////////////////////////////////////// @@ -80,3 +67,10 @@ std::string Hadrons::typeName(const std::type_info *info) return name; } + +// default writers/readers ///////////////////////////////////////////////////// +#ifdef HAVE_HDF5 +const std::string Hadrons::resultFileExt = "h5"; +#else +const std::string Hadrons::resultFileExt = "xml"; +#endif diff --git a/extras/Hadrons/Global.hpp b/extras/Hadrons/Global.hpp index 371256e8..fc069ed6 100644 --- a/extras/Hadrons/Global.hpp +++ b/extras/Hadrons/Global.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Global.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -35,6 +35,10 @@ See the full license in the file "LICENSE" in the top level distribution directo #include #include +#ifndef SITE_SIZE_TYPE +#define SITE_SIZE_TYPE size_t +#endif + #define BEGIN_HADRONS_NAMESPACE \ namespace Grid {\ using namespace QCD;\ @@ -57,6 +61,9 @@ using Grid::operator<<; #ifndef SIMPL #define SIMPL ScalarImplCR #endif +#ifndef GIMPL +#define GIMPL GimplTypesR +#endif BEGIN_HADRONS_NAMESPACE @@ -80,7 +87,8 @@ typedef std::function SolverFn##suffix; #define SINK_TYPE_ALIASES(suffix)\ -typedef std::function SinkFn##suffix; +typedef std::function SinkFn##suffix; #define FGS_TYPE_ALIASES(FImpl, suffix)\ FERM_TYPE_ALIASES(FImpl, suffix)\ @@ -96,11 +104,6 @@ public: }; #define LOG(channel) std::cout << HadronsLog##channel -#define HADRON_ERROR(msg)\ -LOG(Error) << msg << " (" << __FUNCTION__ << " at " << __FILE__ << ":"\ - << __LINE__ << ")" << std::endl;\ -abort(); - #define DEBUG_VAR(var) LOG(Debug) << #var << "= " << (var) << std::endl; extern HadronsLogger HadronsLogError; @@ -109,6 +112,8 @@ extern HadronsLogger HadronsLogMessage; extern HadronsLogger HadronsLogIterative; extern HadronsLogger HadronsLogDebug; +void initLogger(void); + // singleton pattern #define SINGLETON(name)\ public:\ @@ -134,9 +139,6 @@ public:\ private:\ name(void) = default; -// pretty size formating -std::string sizeString(long unsigned int bytes); - // type utilities template const std::type_info * typeIdPt(const T &x) @@ -165,14 +167,21 @@ std::string typeName(void) } // default writers/readers +extern const std::string resultFileExt; + #ifdef HAVE_HDF5 -typedef Hdf5Reader CorrReader; -typedef Hdf5Writer CorrWriter; +typedef Hdf5Reader ResultReader; +typedef Hdf5Writer ResultWriter; #else -typedef XmlReader CorrReader; -typedef XmlWriter CorrWriter; +typedef XmlReader ResultReader; +typedef XmlWriter ResultWriter; #endif +#define RESULT_FILE_NAME(name) \ +name + "." + std::to_string(vm().getTrajectory()) + "." + resultFileExt + END_HADRONS_NAMESPACE +#include + #endif // Hadrons_Global_hpp_ diff --git a/extras/Hadrons/Graph.hpp b/extras/Hadrons/Graph.hpp index df255517..67694aa8 100644 --- a/extras/Hadrons/Graph.hpp +++ b/extras/Hadrons/Graph.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Graph.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -185,7 +184,7 @@ void Graph::removeVertex(const T &value) } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); } // remove all edges containing the vertex @@ -214,7 +213,7 @@ void Graph::removeEdge(const Edge &e) } else { - HADRON_ERROR("edge " << e << " does not exists"); + HADRON_ERROR(Range, "edge does not exists"); } } @@ -260,7 +259,7 @@ void Graph::mark(const T &value, const bool doMark) } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); } } @@ -298,7 +297,7 @@ bool Graph::isMarked(const T &value) const } else { - HADRON_ERROR("vertex " << value << " does not exists"); + HADRON_ERROR(Range, "vertex does not exists"); return false; } @@ -430,7 +429,7 @@ std::vector Graph::getAdjacentVertices(const T &value) const { return ((e.first == value) or (e.second == value)); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { @@ -442,7 +441,7 @@ std::vector Graph::getAdjacentVertices(const T &value) const { adjacentVertex.push_back((*eIt).first); } - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return adjacentVertex; @@ -458,12 +457,12 @@ std::vector Graph::getChildren(const T &value) const { return (e.first == value); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { child.push_back((*eIt).second); - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return child; @@ -479,12 +478,12 @@ std::vector Graph::getParents(const T &value) const { return (e.second == value); }; - auto eIt = find_if(edgeSet_.begin(), edgeSet_.end(), pred); + auto eIt = std::find_if(edgeSet_.begin(), edgeSet_.end(), pred); while (eIt != edgeSet_.end()) { parent.push_back((*eIt).first); - eIt = find_if(++eIt, edgeSet_.end(), pred); + eIt = std::find_if(++eIt, edgeSet_.end(), pred); } return parent; @@ -544,7 +543,7 @@ std::vector Graph::topoSort(void) { if (tmpMarked.at(v)) { - HADRON_ERROR("cannot topologically sort a cyclic graph"); + HADRON_ERROR(Range, "cannot topologically sort a cyclic graph"); } if (!isMarked(v)) { @@ -603,7 +602,7 @@ std::vector Graph::topoSort(Gen &gen) { if (tmpMarked.at(v)) { - HADRON_ERROR("cannot topologically sort a cyclic graph"); + HADRON_ERROR(Range, "cannot topologically sort a cyclic graph"); } if (!isMarked(v)) { diff --git a/extras/Hadrons/HadronsXmlRun.cc b/extras/Hadrons/HadronsXmlRun.cc index 0dff8f9a..680f234b 100644 --- a/extras/Hadrons/HadronsXmlRun.cc +++ b/extras/Hadrons/HadronsXmlRun.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/HadronsXmlRun.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -55,12 +54,6 @@ int main(int argc, char *argv[]) // initialization Grid_init(&argc, &argv); - HadronsLogError.Active(GridLogError.isActive()); - HadronsLogWarning.Active(GridLogWarning.isActive()); - HadronsLogMessage.Active(GridLogMessage.isActive()); - HadronsLogIterative.Active(GridLogIterative.isActive()); - HadronsLogDebug.Active(GridLogDebug.isActive()); - LOG(Message) << "Grid initialized" << std::endl; // execution Application application(parameterFileName); diff --git a/extras/Hadrons/HadronsXmlSchedule.cc b/extras/Hadrons/HadronsXmlSchedule.cc index a8ca9a63..55f3b231 100644 --- a/extras/Hadrons/HadronsXmlSchedule.cc +++ b/extras/Hadrons/HadronsXmlSchedule.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/HadronsXmlSchedule.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -49,12 +48,6 @@ int main(int argc, char *argv[]) // initialization Grid_init(&argc, &argv); - HadronsLogError.Active(GridLogError.isActive()); - HadronsLogWarning.Active(GridLogWarning.isActive()); - HadronsLogMessage.Active(GridLogMessage.isActive()); - HadronsLogIterative.Active(GridLogIterative.isActive()); - HadronsLogDebug.Active(GridLogDebug.isActive()); - LOG(Message) << "Grid initialized" << std::endl; // execution Application application; diff --git a/extras/Hadrons/Makefile.am b/extras/Hadrons/Makefile.am index 9cb23600..3d07679a 100644 --- a/extras/Hadrons/Makefile.am +++ b/extras/Hadrons/Makefile.am @@ -7,20 +7,24 @@ libHadrons_a_SOURCES = \ $(modules_cc) \ Application.cc \ Environment.cc \ + Exceptions.cc \ Global.cc \ - Module.cc + Module.cc \ + VirtualMachine.cc libHadrons_adir = $(pkgincludedir)/Hadrons nobase_libHadrons_a_HEADERS = \ $(modules_hpp) \ Application.hpp \ Environment.hpp \ + Exceptions.hpp \ Factory.hpp \ GeneticScheduler.hpp \ Global.hpp \ Graph.hpp \ Module.hpp \ Modules.hpp \ - ModuleFactory.hpp + ModuleFactory.hpp \ + VirtualMachine.hpp HadronsXmlRun_SOURCES = HadronsXmlRun.cc HadronsXmlRun_LDADD = libHadrons.a -lGrid diff --git a/extras/Hadrons/Module.cc b/extras/Hadrons/Module.cc index 2549a931..54978f93 100644 --- a/extras/Hadrons/Module.cc +++ b/extras/Hadrons/Module.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Module.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -39,7 +38,6 @@ using namespace Hadrons; // constructor ///////////////////////////////////////////////////////////////// ModuleBase::ModuleBase(const std::string name) : name_(name) -, env_(Environment::getInstance()) {} // access ////////////////////////////////////////////////////////////////////// @@ -48,15 +46,10 @@ std::string ModuleBase::getName(void) const return name_; } -Environment & ModuleBase::env(void) const -{ - return env_; -} - // get factory registration name if available std::string ModuleBase::getRegisteredName(void) { - HADRON_ERROR("module '" + getName() + "' has a type not registered" + HADRON_ERROR(Definition, "module '" + getName() + "' has no registered type" + " in the factory"); } @@ -64,8 +57,5 @@ std::string ModuleBase::getRegisteredName(void) void ModuleBase::operator()(void) { setup(); - if (!env().isDryRun()) - { - execute(); - } + execute(); } diff --git a/extras/Hadrons/Module.hpp b/extras/Hadrons/Module.hpp index 071e254a..2ba425e4 100644 --- a/extras/Hadrons/Module.hpp +++ b/extras/Hadrons/Module.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Module.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -31,7 +30,7 @@ See the full license in the file "LICENSE" in the top level distribution directo #define Hadrons_Module_hpp_ #include -#include +#include BEGIN_HADRONS_NAMESPACE @@ -87,6 +86,56 @@ public:\ static ns##mod##ModuleRegistrar ns##mod##ModuleRegistrarInstance; #define ARG(...) __VA_ARGS__ +#define MACRO_REDIRECT(arg1, arg2, arg3, macro, ...) macro + +#define envGet(type, name)\ +*env().template getObject(name) + +#define envGetTmp(type, var)\ +type &var = *env().template getObject(getName() + "_tmp_" + #var) + +#define envHasType(type, name)\ +env().template isObjectOfType(name) + +#define envCreate(type, name, Ls, ...)\ +env().template createObject(name, Environment::Storage::object, Ls, __VA_ARGS__) + +#define envCreateDerived(base, type, name, Ls, ...)\ +env().template createDerivedObject(name, Environment::Storage::object, Ls, __VA_ARGS__) + +#define envCreateLat4(type, name)\ +envCreate(type, name, 1, env().getGrid()) + +#define envCreateLat5(type, name, Ls)\ +envCreate(type, name, Ls, env().getGrid(Ls)) + +#define envCreateLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envCreateLat5, envCreateLat4)(__VA_ARGS__) + +#define envCache(type, name, Ls, ...)\ +env().template createObject(name, Environment::Storage::cache, Ls, __VA_ARGS__) + +#define envCacheLat4(type, name)\ +envCache(type, name, 1, env().getGrid()) + +#define envCacheLat5(type, name, Ls)\ +envCache(type, name, Ls, env().getGrid(Ls)) + +#define envCacheLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envCacheLat5, envCacheLat4)(__VA_ARGS__) + +#define envTmp(type, name, Ls, ...)\ +env().template createObject(getName() + "_tmp_" + name, \ + Environment::Storage::temporary, Ls, __VA_ARGS__) + +#define envTmpLat4(type, name)\ +envTmp(type, name, 1, env().getGrid()) + +#define envTmpLat5(type, name, Ls)\ +envTmp(type, name, Ls, env().getGrid(Ls)) + +#define envTmpLat(...)\ +MACRO_REDIRECT(__VA_ARGS__, envTmpLat5, envTmpLat4)(__VA_ARGS__) /****************************************************************************** * Module class * @@ -101,23 +150,30 @@ public: virtual ~ModuleBase(void) = default; // access std::string getName(void) const; - Environment &env(void) const; // get factory registration name if available virtual std::string getRegisteredName(void); // dependencies/products virtual std::vector getInput(void) = 0; + virtual std::vector getReference(void) + { + return std::vector(0); + }; virtual std::vector getOutput(void) = 0; // parse parameters virtual void parseParameters(XmlReader &reader, const std::string name) = 0; virtual void saveParameters(XmlWriter &writer, const std::string name) = 0; // setup virtual void setup(void) {}; + virtual void execute(void) = 0; // execution void operator()(void); - virtual void execute(void) = 0; +protected: + // environment shortcut + DEFINE_ENV_ALIAS; + // virtual machine shortcut + DEFINE_VM_ALIAS; private: std::string name_; - Environment &env_; }; // derived class, templating the parameter class diff --git a/extras/Hadrons/ModuleFactory.hpp b/extras/Hadrons/ModuleFactory.hpp index 48ab305c..d5c703fa 100644 --- a/extras/Hadrons/ModuleFactory.hpp +++ b/extras/Hadrons/ModuleFactory.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/ModuleFactory.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli diff --git a/extras/Hadrons/Modules.hpp b/extras/Hadrons/Modules.hpp index a52b9a6e..ed6254f4 100644 --- a/extras/Hadrons/Modules.hpp +++ b/extras/Hadrons/Modules.hpp @@ -1,31 +1,55 @@ -#include -#include -#include -#include -#include -#include +/************************************************************************************* +Grid physics library, www.github.com/paboyle/Grid +Source file: extras/Hadrons/Modules.hpp +Copyright (C) 2015-2018 +Author: Antonin Portelli +Author: Lanny91 +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include +#include #include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include +#include +#include +#include +#include +#include diff --git a/extras/Hadrons/Modules/MAction/DWF.hpp b/extras/Hadrons/Modules/MAction/DWF.hpp index 78e0916c..4dfd06cf 100644 --- a/extras/Hadrons/Modules/MAction/DWF.hpp +++ b/extras/Hadrons/Modules/MAction/DWF.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MAction/DWF.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -65,6 +65,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -103,35 +104,29 @@ std::vector TDWF::getOutput(void) template void TDWF::setup(void) { - unsigned int size; - - size = 2*env().template lattice4dSize(); - env().registerObject(getName(), size, par().Ls); + LOG(Message) << "Setting up domain wall fermion matrix with m= " + << par().mass << ", M5= " << par().M5 << " and Ls= " + << par().Ls << " using gauge field '" << par().gauge << "'" + << std::endl; + LOG(Message) << "Fermion boundary conditions: " << par().boundary + << std::endl; + + env().createGrid(par().Ls); + auto &U = envGet(LatticeGaugeField, par().gauge); + auto &g4 = *env().getGrid(); + auto &grb4 = *env().getRbGrid(); + auto &g5 = *env().getGrid(par().Ls); + auto &grb5 = *env().getRbGrid(par().Ls); + std::vector boundary = strToVec(par().boundary); + typename DomainWallFermion::ImplParams implParams(boundary); + envCreateDerived(FMat, DomainWallFermion, getName(), par().Ls, U, g5, + grb5, g4, grb4, par().mass, par().M5, implParams); } // execution /////////////////////////////////////////////////////////////////// template void TDWF::execute(void) -{ - LOG(Message) << "Setting up domain wall fermion matrix with m= " - << par().mass << ", M5= " << par().M5 << " and Ls= " - << par().Ls << " using gauge field '" << par().gauge << "'" - << std::endl; - LOG(Message) << "Fermion boundary conditions: " << par().boundary - << std::endl; - env().createGrid(par().Ls); - auto &U = *env().template getObject(par().gauge); - auto &g4 = *env().getGrid(); - auto &grb4 = *env().getRbGrid(); - auto &g5 = *env().getGrid(par().Ls); - auto &grb5 = *env().getRbGrid(par().Ls); - std::vector boundary = strToVec(par().boundary); - typename DomainWallFermion::ImplParams implParams(boundary); - FMat *fMatPt = new DomainWallFermion(U, g5, grb5, g4, grb4, - par().mass, par().M5, - implParams); - env().setObject(getName(), fMatPt); -} +{} END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MAction/Wilson.hpp b/extras/Hadrons/Modules/MAction/Wilson.hpp index aab54245..6467b3ee 100644 --- a/extras/Hadrons/Modules/MAction/Wilson.hpp +++ b/extras/Hadrons/Modules/MAction/Wilson.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MAction/Wilson.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -63,6 +63,7 @@ public: // dependencies/products virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -101,29 +102,24 @@ std::vector TWilson::getOutput(void) template void TWilson::setup(void) { - unsigned int size; - - size = 2*env().template lattice4dSize(); - env().registerObject(getName(), size); + LOG(Message) << "Setting up TWilson fermion matrix with m= " << par().mass + << " using gauge field '" << par().gauge << "'" << std::endl; + LOG(Message) << "Fermion boundary conditions: " << par().boundary + << std::endl; + + auto &U = envGet(LatticeGaugeField, par().gauge); + auto &grid = *env().getGrid(); + auto &gridRb = *env().getRbGrid(); + std::vector boundary = strToVec(par().boundary); + typename WilsonFermion::ImplParams implParams(boundary); + envCreateDerived(FMat, WilsonFermion, getName(), 1, U, grid, gridRb, + par().mass, implParams); } // execution /////////////////////////////////////////////////////////////////// template void TWilson::execute() -{ - LOG(Message) << "Setting up TWilson fermion matrix with m= " << par().mass - << " using gauge field '" << par().gauge << "'" << std::endl; - LOG(Message) << "Fermion boundary conditions: " << par().boundary - << std::endl; - auto &U = *env().template getObject(par().gauge); - auto &grid = *env().getGrid(); - auto &gridRb = *env().getRbGrid(); - std::vector boundary = strToVec(par().boundary); - typename WilsonFermion::ImplParams implParams(boundary); - FMat *fMatPt = new WilsonFermion(U, grid, gridRb, par().mass, - implParams); - env().setObject(getName(), fMatPt); -} +{} END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MContraction/Baryon.hpp b/extras/Hadrons/Modules/MContraction/Baryon.hpp index 78bde5a2..8966d95b 100644 --- a/extras/Hadrons/Modules/MContraction/Baryon.hpp +++ b/extras/Hadrons/Modules/MContraction/Baryon.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Baryon.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -72,6 +72,9 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: + // setup + virtual void setup(void); // execution virtual void execute(void); }; @@ -99,11 +102,18 @@ std::vector TBaryon::getInput(void) template std::vector TBaryon::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } +// setup /////////////////////////////////////////////////////////////////////// +template +void TBaryon::setup(void) +{ + envTmpLat(LatticeComplex, "c"); +} + // execution /////////////////////////////////////////////////////////////////// template void TBaryon::execute(void) @@ -112,12 +122,12 @@ void TBaryon::execute(void) << " quarks '" << par().q1 << "', '" << par().q2 << "', and '" << par().q3 << "'" << std::endl; - CorrWriter writer(par().output); - PropagatorField1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); - PropagatorField3 &q3 = *env().template getObject(par().q2); - LatticeComplex c(env().getGrid()); - Result result; + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q1 = envGet(PropagatorField1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + auto &q3 = envGet(PropagatorField3, par().q2); + envGetTmp(LatticeComplex, c); + Result result; // FIXME: do contractions diff --git a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp index 4f782cd3..539abbbb 100644 --- a/extras/Hadrons/Modules/MContraction/DiscLoop.hpp +++ b/extras/Hadrons/Modules/MContraction/DiscLoop.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/DiscLoop.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -68,6 +69,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -97,7 +99,7 @@ std::vector TDiscLoop::getInput(void) template std::vector TDiscLoop::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -106,7 +108,7 @@ std::vector TDiscLoop::getOutput(void) template void TDiscLoop::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// @@ -117,13 +119,13 @@ void TDiscLoop::execute(void) << "' using '" << par().q_loop << "' with " << par().gamma << " insertion." << std::endl; - CorrWriter writer(par().output); - PropagatorField &q_loop = *env().template getObject(par().q_loop); - LatticeComplex c(env().getGrid()); + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q_loop = envGet(PropagatorField, par().q_loop); Gamma gamma(par().gamma); std::vector buf; Result result; + envGetTmp(LatticeComplex, c); c = trace(gamma*q_loop); sliceSum(c, buf, Tp); diff --git a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp index 162ab786..b4327a13 100644 --- a/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp +++ b/extras/Hadrons/Modules/MContraction/Gamma3pt.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Gamma3pt.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -99,6 +100,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -128,7 +130,7 @@ std::vector TGamma3pt::getInput(void) template std::vector TGamma3pt::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -137,7 +139,7 @@ std::vector TGamma3pt::getOutput(void) template void TGamma3pt::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// @@ -151,11 +153,10 @@ void TGamma3pt::execute(void) // Initialise variables. q2 and q3 are normal propagators, q1 may be // sink smeared. - CorrWriter writer(par().output); - SlicedPropagator1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); - PropagatorField3 &q3 = *env().template getObject(par().q3); - LatticeComplex c(env().getGrid()); + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q1 = envGet(SlicedPropagator1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + auto &q3 = envGet(PropagatorField2, par().q3); Gamma g5(Gamma::Algebra::Gamma5); Gamma gamma(par().gamma); std::vector buf; @@ -164,6 +165,7 @@ void TGamma3pt::execute(void) // Extract relevant timeslice of sinked propagator q1, then contract & // sum over all spacial positions of gamma insertion. SitePropagator1 q1Snk = q1[par().tSnk]; + envGetTmp(LatticeComplex, c); c = trace(g5*q1Snk*adj(q2)*(g5*gamma)*q3); sliceSum(c, buf, Tp); diff --git a/extras/Hadrons/Modules/MContraction/Meson.hpp b/extras/Hadrons/Modules/MContraction/Meson.hpp index b71f7c08..0197534d 100644 --- a/extras/Hadrons/Modules/MContraction/Meson.hpp +++ b/extras/Hadrons/Modules/MContraction/Meson.hpp @@ -4,12 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/Meson.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli - Andrew Lawson +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -97,6 +95,9 @@ public: virtual std::vector getInput(void); virtual std::vector getOutput(void); virtual void parseGammaString(std::vector &gammaList); +protected: + // execution + virtual void setup(void); // execution virtual void execute(void); }; @@ -124,7 +125,7 @@ std::vector TMeson::getInput(void) template std::vector TMeson::getOutput(void) { - std::vector output = {getName()}; + std::vector output = {}; return output; } @@ -150,9 +151,15 @@ void TMeson::parseGammaString(std::vector &gammaList) { // Parse individual contractions from input string. gammaList = strToVec(par().gammas); - } + } } +// execution /////////////////////////////////////////////////////////////////// +template +void TMeson::setup(void) +{ + envTmpLat(LatticeComplex, "c"); +} // execution /////////////////////////////////////////////////////////////////// #define mesonConnected(q1, q2, gSnk, gSrc) \ @@ -165,7 +172,7 @@ void TMeson::execute(void) << " quarks '" << par().q1 << "' and '" << par().q2 << "'" << std::endl; - CorrWriter writer(par().output); + ResultWriter writer(RESULT_FILE_NAME(par().output)); std::vector buf; std::vector result; Gamma g5(Gamma::Algebra::Gamma5); @@ -180,11 +187,11 @@ void TMeson::execute(void) result[i].gamma_src = gammaList[i].second; result[i].corr.resize(nt); } - if (env().template isObjectOfType(par().q1) and - env().template isObjectOfType(par().q2)) + if (envHasType(SlicedPropagator1, par().q1) and + envHasType(SlicedPropagator2, par().q2)) { - SlicedPropagator1 &q1 = *env().template getObject(par().q1); - SlicedPropagator2 &q2 = *env().template getObject(par().q2); + auto &q1 = envGet(SlicedPropagator1, par().q1); + auto &q2 = envGet(SlicedPropagator2, par().q2); LOG(Message) << "(propagator already sinked)" << std::endl; for (unsigned int i = 0; i < result.size(); ++i) @@ -200,10 +207,10 @@ void TMeson::execute(void) } else { - PropagatorField1 &q1 = *env().template getObject(par().q1); - PropagatorField2 &q2 = *env().template getObject(par().q2); - LatticeComplex c(env().getGrid()); + auto &q1 = envGet(PropagatorField1, par().q1); + auto &q2 = envGet(PropagatorField2, par().q2); + envGetTmp(LatticeComplex, c); LOG(Message) << "(using sink '" << par().sink << "')" << std::endl; for (unsigned int i = 0; i < result.size(); ++i) { @@ -211,18 +218,17 @@ void TMeson::execute(void) Gamma gSrc(gammaList[i].second); std::string ns; - ns = env().getModuleNamespace(env().getObjectModule(par().sink)); + ns = vm().getModuleNamespace(env().getObjectModule(par().sink)); if (ns == "MSource") { - PropagatorField1 &sink = - *env().template getObject(par().sink); + PropagatorField1 &sink = envGet(PropagatorField1, par().sink); c = trace(mesonConnected(q1, q2, gSnk, gSrc)*sink); sliceSum(c, buf, Tp); } else if (ns == "MSink") { - SinkFnScalar &sink = *env().template getObject(par().sink); + SinkFnScalar &sink = envGet(SinkFnScalar, par().sink); c = trace(mesonConnected(q1, q2, gSnk, gSrc)); buf = sink(c); diff --git a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp index 8a56e0eb..2801d88c 100644 --- a/extras/Hadrons/Modules/MContraction/WardIdentity.hpp +++ b/extras/Hadrons/Modules/MContraction/WardIdentity.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WardIdentity.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -74,6 +75,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -105,7 +107,7 @@ std::vector TWardIdentity::getInput(void) template std::vector TWardIdentity::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -117,7 +119,16 @@ void TWardIdentity::setup(void) Ls_ = env().getObjectLs(par().q); if (Ls_ != env().getObjectLs(par().action)) { - HADRON_ERROR("Ls mismatch between quark action and propagator"); + HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); + } + envTmpLat(PropagatorField, "tmp"); + envTmpLat(PropagatorField, "vector_WI"); + if (par().test_axial) + { + envTmpLat(PropagatorField, "psi"); + envTmpLat(LatticeComplex, "PP"); + envTmpLat(LatticeComplex, "axial_defect"); + envTmpLat(LatticeComplex, "PJ5q"); } } @@ -128,12 +139,13 @@ void TWardIdentity::execute(void) LOG(Message) << "Performing Ward Identity checks for quark '" << par().q << "'." << std::endl; - PropagatorField tmp(env().getGrid()), vector_WI(env().getGrid()); - PropagatorField &q = *env().template getObject(par().q); - FMat &act = *(env().template getObject(par().action)); - Gamma g5(Gamma::Algebra::Gamma5); + auto &q = envGet(PropagatorField, par().q); + auto &act = envGet(FMat, par().action); + Gamma g5(Gamma::Algebra::Gamma5); // Compute D_mu V_mu, D here is backward derivative. + envGetTmp(PropagatorField, tmp); + envGetTmp(PropagatorField, vector_WI); vector_WI = zero; for (unsigned int mu = 0; mu < Nd; ++mu) { @@ -148,9 +160,10 @@ void TWardIdentity::execute(void) if (par().test_axial) { - PropagatorField psi(env().getGrid()); - LatticeComplex PP(env().getGrid()), axial_defect(env().getGrid()), - PJ5q(env().getGrid()); + envGetTmp(PropagatorField, psi); + envGetTmp(LatticeComplex, PP); + envGetTmp(LatticeComplex, axial_defect); + envGetTmp(LatticeComplex, PJ5q); std::vector axial_buf; // Compute , D is backwards derivative. diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp index 302b207e..9d8ada98 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -100,11 +101,13 @@ public:\ /* dependency relation */ \ virtual std::vector getInput(void);\ virtual std::vector getOutput(void);\ +public:\ + std::vector VA_label = {"V", "A"};\ +protected:\ /* setup */ \ virtual void setup(void);\ /* execution */ \ virtual void execute(void);\ - std::vector VA_label = {"V", "A"};\ };\ MODULE_REGISTER_NS(modname, T##modname, MContraction); diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc index 314b080a..1d257fc7 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -76,7 +77,7 @@ std::vector TWeakHamiltonianEye::getInput(void) std::vector TWeakHamiltonianEye::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -84,7 +85,15 @@ std::vector TWeakHamiltonianEye::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakHamiltonianEye::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp1"); + envTmpLat(LatticeComplex, "tmp2"); + envTmp(std::vector, "S_body", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "S_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "E_body", 1, ndim, LatticeComplex(env().getGrid())); + envTmp(std::vector, "E_loop", 1, ndim, LatticeComplex(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -95,23 +104,23 @@ void TWeakHamiltonianEye::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); - SlicedPropagator &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q1 = envGet(SlicedPropagator, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_eye_diag); unsigned int ndim = env().getNd(); - PropagatorField tmp1(env().getGrid()); - LatticeComplex tmp2(env().getGrid()); - std::vector S_body(ndim, tmp1); - std::vector S_loop(ndim, tmp1); - std::vector E_body(ndim, tmp2); - std::vector E_loop(ndim, tmp2); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp1); + envGetTmp(LatticeComplex, tmp2); + envGetTmp(std::vector, S_body); + envGetTmp(std::vector, S_loop); + envGetTmp(std::vector, E_body); + envGetTmp(std::vector, E_loop); // Get sink timeslice of q1. SitePropagator q1Snk = q1[par().tSnk]; diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp index 3a2b9309..24f39f6c 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc index 2c4df68a..2ad2e7dc 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -76,7 +77,7 @@ std::vector TWeakHamiltonianNonEye::getInput(void) std::vector TWeakHamiltonianNonEye::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -84,7 +85,15 @@ std::vector TWeakHamiltonianNonEye::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakHamiltonianNonEye::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp1"); + envTmpLat(LatticeComplex, "tmp2"); + envTmp(std::vector, "C_i_side_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "C_f_side_loop", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "W_i_side_loop", 1, ndim, LatticeComplex(env().getGrid())); + envTmp(std::vector, "W_f_side_loop", 1, ndim, LatticeComplex(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -95,23 +104,23 @@ void TWeakHamiltonianNonEye::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); - PropagatorField &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q1 = envGet(PropagatorField, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_noneye_diag); - unsigned int ndim = env().getNd(); + unsigned int ndim = env().getNd(); - PropagatorField tmp1(env().getGrid()); - LatticeComplex tmp2(env().getGrid()); - std::vector C_i_side_loop(ndim, tmp1); - std::vector C_f_side_loop(ndim, tmp1); - std::vector W_i_side_loop(ndim, tmp2); - std::vector W_f_side_loop(ndim, tmp2); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp1); + envGetTmp(LatticeComplex, tmp2); + envGetTmp(std::vector, C_i_side_loop); + envGetTmp(std::vector, C_f_side_loop); + envGetTmp(std::vector, W_i_side_loop); + envGetTmp(std::vector, W_f_side_loop); // Setup for C-type contractions. for (int mu = 0; mu < ndim; ++mu) diff --git a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp index eb5abe3c..c4cd66f1 100644 --- a/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc index 6685f292..2c94b2ba 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.cc -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -78,7 +79,7 @@ std::vector TWeakNeutral4ptDisc::getInput(void) std::vector TWeakNeutral4ptDisc::getOutput(void) { - std::vector out = {getName()}; + std::vector out = {}; return out; } @@ -86,7 +87,13 @@ std::vector TWeakNeutral4ptDisc::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TWeakNeutral4ptDisc::setup(void) { + unsigned int ndim = env().getNd(); + envTmpLat(LatticeComplex, "expbuf"); + envTmpLat(PropagatorField, "tmp"); + envTmpLat(LatticeComplex, "curr"); + envTmp(std::vector, "meson", 1, ndim, PropagatorField(env().getGrid())); + envTmp(std::vector, "loop", 1, ndim, PropagatorField(env().getGrid())); } // execution /////////////////////////////////////////////////////////////////// @@ -97,21 +104,21 @@ void TWeakNeutral4ptDisc::execute(void) << par().q2 << ", '" << par().q3 << "' and '" << par().q4 << "'." << std::endl; - CorrWriter writer(par().output); - PropagatorField &q1 = *env().template getObject(par().q1); - PropagatorField &q2 = *env().template getObject(par().q2); - PropagatorField &q3 = *env().template getObject(par().q3); - PropagatorField &q4 = *env().template getObject(par().q4); - Gamma g5 = Gamma(Gamma::Algebra::Gamma5); - LatticeComplex expbuf(env().getGrid()); + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &q1 = envGet(PropagatorField, par().q1); + auto &q2 = envGet(PropagatorField, par().q2); + auto &q3 = envGet(PropagatorField, par().q3); + auto &q4 = envGet(PropagatorField, par().q4); + Gamma g5 = Gamma(Gamma::Algebra::Gamma5); std::vector corrbuf; std::vector result(n_neut_disc_diag); - unsigned int ndim = env().getNd(); + unsigned int ndim = env().getNd(); - PropagatorField tmp(env().getGrid()); - std::vector meson(ndim, tmp); - std::vector loop(ndim, tmp); - LatticeComplex curr(env().getGrid()); + envGetTmp(LatticeComplex, expbuf); + envGetTmp(PropagatorField, tmp); + envGetTmp(LatticeComplex, curr); + envGetTmp(std::vector, meson); + envGetTmp(std::vector, loop); // Setup for type 1 contractions. for (int mu = 0; mu < ndim; ++mu) diff --git a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp index f26d4636..5de2a751 100644 --- a/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp +++ b/extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp index 8add9a00..33787a0b 100644 --- a/extras/Hadrons/Modules/MFermion/GaugeProp.hpp +++ b/extras/Hadrons/Modules/MFermion/GaugeProp.hpp @@ -4,12 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MFermion/GaugeProp.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli - Andrew Lawson +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -85,6 +83,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -127,10 +126,13 @@ template void TGaugeProp::setup(void) { Ls_ = env().getObjectLs(par().solver); - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envTmpLat(FermionField, "source", Ls_); + envTmpLat(FermionField, "sol", Ls_); + envTmpLat(FermionField, "tmp"); if (Ls_ > 1) { - env().template registerLattice(getName() + "_5d", Ls_); + envCreateLat(PropagatorField, getName() + "_5d", Ls_); } } @@ -139,26 +141,23 @@ template void TGaugeProp::execute(void) { LOG(Message) << "Computing quark propagator '" << getName() << "'" - << std::endl; + << std::endl; - FermionField source(env().getGrid(Ls_)), sol(env().getGrid(Ls_)), - tmp(env().getGrid()); - std::string propName = (Ls_ == 1) ? getName() : (getName() + "_5d"); - PropagatorField &prop = *env().template createLattice(propName); - PropagatorField &fullSrc = *env().template getObject(par().source); - SolverFn &solver = *env().template getObject(par().solver); - if (Ls_ > 1) - { - env().template createLattice(getName()); - } + std::string propName = (Ls_ == 1) ? getName() : (getName() + "_5d"); + auto &prop = envGet(PropagatorField, propName); + auto &fullSrc = envGet(PropagatorField, par().source); + auto &solver = envGet(SolverFn, par().solver); + envGetTmp(FermionField, source); + envGetTmp(FermionField, sol); + envGetTmp(FermionField, tmp); LOG(Message) << "Inverting using solver '" << par().solver - << "' on source '" << par().source << "'" << std::endl; + << "' on source '" << par().source << "'" << std::endl; for (unsigned int s = 0; s < Ns; ++s) for (unsigned int c = 0; c < Nc; ++c) { LOG(Message) << "Inversion for spin= " << s << ", color= " << c - << std::endl; + << std::endl; // source conversion for 4D sources if (!env().isObject5d(par().source)) { @@ -177,7 +176,7 @@ void TGaugeProp::execute(void) { if (Ls_ != env().getObjectLs(par().source)) { - HADRON_ERROR("Ls mismatch between quark action and source"); + HADRON_ERROR(Size, "Ls mismatch between quark action and source"); } else { @@ -190,8 +189,7 @@ void TGaugeProp::execute(void) // create 4D propagators from 5D one if necessary if (Ls_ > 1) { - PropagatorField &p4d = - *env().template getObject(getName()); + PropagatorField &p4d = envGet(PropagatorField, getName()); make_4D(sol, tmp, Ls_); FermToProp(p4d, tmp, s, c); } diff --git a/extras/Hadrons/Modules/MGauge/Random.cc b/extras/Hadrons/Modules/MGauge/Random.cc index c10fdfc3..962fc243 100644 --- a/extras/Hadrons/Modules/MGauge/Random.cc +++ b/extras/Hadrons/Modules/MGauge/Random.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Random.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -44,7 +43,9 @@ TRandom::TRandom(const std::string name) // dependencies/products /////////////////////////////////////////////////////// std::vector TRandom::getInput(void) { - return std::vector(); + std::vector in; + + return in; } std::vector TRandom::getOutput(void) @@ -57,13 +58,14 @@ std::vector TRandom::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TRandom::setup(void) { - env().registerLattice(getName()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TRandom::execute(void) { LOG(Message) << "Generating random gauge configuration" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + + auto &U = envGet(LatticeGaugeField, getName()); SU3::HotConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MGauge/Random.hpp b/extras/Hadrons/Modules/MGauge/Random.hpp index a97d25cf..51a08dbb 100644 --- a/extras/Hadrons/Modules/MGauge/Random.hpp +++ b/extras/Hadrons/Modules/MGauge/Random.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Random.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -51,6 +50,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/StochEm.cc b/extras/Hadrons/Modules/MGauge/StochEm.cc index c7a9fc4f..21b7f626 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.cc +++ b/extras/Hadrons/Modules/MGauge/StochEm.cc @@ -4,9 +4,9 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/StochEm.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -57,32 +57,28 @@ std::vector TStochEm::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TStochEm::setup(void) { - if (!env().hasRegisteredObject("_" + getName() + "_weight")) + if (!env().hasCreatedObject("_" + getName() + "_weight")) { - env().registerLattice("_" + getName() + "_weight"); + envCacheLat(EmComp, "_" + getName() + "_weight"); } - env().registerLattice(getName()); + envCreateLat(EmField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TStochEm::execute(void) { + LOG(Message) << "Generating stochatic EM potential..." << std::endl; + PhotonR photon(par().gauge, par().zmScheme); - EmField &a = *env().createLattice(getName()); - EmComp *w; + auto &a = envGet(EmField, getName()); + auto &w = envGet(EmComp, "_" + getName() + "_weight"); if (!env().hasCreatedObject("_" + getName() + "_weight")) { LOG(Message) << "Caching stochatic EM potential weight (gauge: " << par().gauge << ", zero-mode scheme: " << par().zmScheme << ")..." << std::endl; - w = env().createLattice("_" + getName() + "_weight"); - photon.StochasticWeight(*w); + photon.StochasticWeight(w); } - else - { - w = env().getObject("_" + getName() + "_weight"); - } - LOG(Message) << "Generating stochatic EM potential..." << std::endl; - photon.StochasticField(a, *env().get4dRng(), *w); + photon.StochasticField(a, *env().get4dRng(), w); } diff --git a/extras/Hadrons/Modules/MGauge/StochEm.hpp b/extras/Hadrons/Modules/MGauge/StochEm.hpp index 12ce9fdc..87b70880 100644 --- a/extras/Hadrons/Modules/MGauge/StochEm.hpp +++ b/extras/Hadrons/Modules/MGauge/StochEm.hpp @@ -4,9 +4,9 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/StochEm.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 +Author: Antonin Portelli This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -60,6 +60,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MGauge/Unit.cc b/extras/Hadrons/Modules/MGauge/Unit.cc index 18d75c59..38b5f3aa 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.cc +++ b/extras/Hadrons/Modules/MGauge/Unit.cc @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Unit.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -57,13 +56,14 @@ std::vector TUnit::getOutput(void) // setup /////////////////////////////////////////////////////////////////////// void TUnit::setup(void) { - env().registerLattice(getName()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TUnit::execute(void) { LOG(Message) << "Creating unit gauge configuration" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + + auto &U = envGet(LatticeGaugeField, getName()); SU3::ColdConfiguration(*env().get4dRng(), U); } diff --git a/extras/Hadrons/Modules/MGauge/Unit.hpp b/extras/Hadrons/Modules/MGauge/Unit.hpp index 7cd15ef7..d6ce5a6b 100644 --- a/extras/Hadrons/Modules/MGauge/Unit.hpp +++ b/extras/Hadrons/Modules/MGauge/Unit.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MGauge/Unit.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -51,6 +50,7 @@ public: // dependencies/products virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution diff --git a/extras/Hadrons/Modules/MIO/LoadBinary.hpp b/extras/Hadrons/Modules/MIO/LoadBinary.hpp new file mode 100644 index 00000000..d9a8b5f8 --- /dev/null +++ b/extras/Hadrons/Modules/MIO/LoadBinary.hpp @@ -0,0 +1,140 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MIO/LoadBinary.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MIO_LoadBinary_hpp_ +#define Hadrons_MIO_LoadBinary_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * Load a binary configurations * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MIO) + +class LoadBinaryPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LoadBinaryPar, + std::string, file, + std::string, format); +}; + +template +class TLoadBinary: public Module +{ +public: + typedef typename Impl::Field Field; + typedef typename Impl::Simd Simd; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + typedef typename sobj::DoublePrecision sobj_double; + typedef BinarySimpleMunger Munger; +public: + // constructor + TLoadBinary(const std::string name); + // destructor + virtual ~TLoadBinary(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +}; + +MODULE_REGISTER_NS(LoadBinary, TLoadBinary, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU2, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU3, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU4, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU5, TLoadBinary>, MIO); +MODULE_REGISTER_NS(LoadBinaryScalarSU6, TLoadBinary>, MIO); + +/****************************************************************************** + * TLoadBinary implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TLoadBinary::TLoadBinary(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TLoadBinary::getInput(void) +{ + std::vector in; + + return in; +} + +template +std::vector TLoadBinary::getOutput(void) +{ + std::vector out = {getName()}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TLoadBinary::setup(void) +{ + envCreateLat(Field, getName()); +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TLoadBinary::execute(void) +{ + Munger munge; + uint32_t nersc_csum, scidac_csuma, scidac_csumb; + auto &U = envGet(Field, getName()); + std::string filename = par().file + "." + + std::to_string(vm().getTrajectory()); + + LOG(Message) << "Loading " << par().format + << " binary configuration from file '" << filename + << "'" << std::endl; + BinaryIO::readLatticeObject(U, filename, munge, 0, + par().format, nersc_csum, + scidac_csuma, scidac_csumb); + LOG(Message) << "Checksums:" << std::endl; + LOG(Message) << " NERSC " << nersc_csum << std::endl; + LOG(Message) << " SciDAC A " << scidac_csuma << std::endl; + LOG(Message) << " SciDAC B " << scidac_csumb << std::endl; +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MIO_LoadBinary_hpp_ diff --git a/extras/Hadrons/Modules/MGauge/Load.cc b/extras/Hadrons/Modules/MIO/LoadNersc.cc similarity index 73% rename from extras/Hadrons/Modules/MGauge/Load.cc rename to extras/Hadrons/Modules/MIO/LoadNersc.cc index 062e7e98..f20606fc 100644 --- a/extras/Hadrons/Modules/MGauge/Load.cc +++ b/extras/Hadrons/Modules/MIO/LoadNersc.cc @@ -2,10 +2,9 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MGauge/Load.cc +Source file: extras/Hadrons/Modules/MIO/LoadNersc.cc -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -26,30 +25,29 @@ with this program; if not, write to the Free Software Foundation, Inc., See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ - -#include +#include using namespace Grid; using namespace Hadrons; -using namespace MGauge; +using namespace MIO; /****************************************************************************** -* TLoad implementation * +* TLoadNersc implementation * ******************************************************************************/ // constructor ///////////////////////////////////////////////////////////////// -TLoad::TLoad(const std::string name) -: Module(name) +TLoadNersc::TLoadNersc(const std::string name) +: Module(name) {} // dependencies/products /////////////////////////////////////////////////////// -std::vector TLoad::getInput(void) +std::vector TLoadNersc::getInput(void) { std::vector in; return in; } -std::vector TLoad::getOutput(void) +std::vector TLoadNersc::getOutput(void) { std::vector out = {getName()}; @@ -57,21 +55,21 @@ std::vector TLoad::getOutput(void) } // setup /////////////////////////////////////////////////////////////////////// -void TLoad::setup(void) +void TLoadNersc::setup(void) { - env().registerLattice(getName()); + envCreateLat(LatticeGaugeField, getName()); } // execution /////////////////////////////////////////////////////////////////// -void TLoad::execute(void) +void TLoadNersc::execute(void) { - FieldMetaData header; - std::string fileName = par().file + "." - + std::to_string(env().getTrajectory()); - + FieldMetaData header; + std::string fileName = par().file + "." + + std::to_string(vm().getTrajectory()); LOG(Message) << "Loading NERSC configuration from file '" << fileName << "'" << std::endl; - LatticeGaugeField &U = *env().createLattice(getName()); + + auto &U = envGet(LatticeGaugeField, getName()); NerscIO::readConfiguration(U, header, fileName); LOG(Message) << "NERSC header:" << std::endl; dump_meta_data(header, LOG(Message)); diff --git a/extras/Hadrons/Modules/MGauge/Load.hpp b/extras/Hadrons/Modules/MIO/LoadNersc.hpp similarity index 76% rename from extras/Hadrons/Modules/MGauge/Load.hpp rename to extras/Hadrons/Modules/MIO/LoadNersc.hpp index 5ff6da0f..d6742e1e 100644 --- a/extras/Hadrons/Modules/MGauge/Load.hpp +++ b/extras/Hadrons/Modules/MIO/LoadNersc.hpp @@ -2,10 +2,9 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MGauge/Load.hpp +Source file: extras/Hadrons/Modules/MIO/LoadNersc.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -26,9 +25,8 @@ with this program; if not, write to the Free Software Foundation, Inc., See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ - -#ifndef Hadrons_MGauge_Load_hpp_ -#define Hadrons_MGauge_Load_hpp_ +#ifndef Hadrons_MIO_LoadNersc_hpp_ +#define Hadrons_MIO_LoadNersc_hpp_ #include #include @@ -37,24 +35,24 @@ See the full license in the file "LICENSE" in the top level distribution directo BEGIN_HADRONS_NAMESPACE /****************************************************************************** - * Load a NERSC configuration * + * Load a NERSC configuration * ******************************************************************************/ -BEGIN_MODULE_NAMESPACE(MGauge) +BEGIN_MODULE_NAMESPACE(MIO) -class LoadPar: Serializable +class LoadNerscPar: Serializable { public: - GRID_SERIALIZABLE_CLASS_MEMBERS(LoadPar, + GRID_SERIALIZABLE_CLASS_MEMBERS(LoadNerscPar, std::string, file); }; -class TLoad: public Module +class TLoadNersc: public Module { public: // constructor - TLoad(const std::string name); + TLoadNersc(const std::string name); // destructor - virtual ~TLoad(void) = default; + virtual ~TLoadNersc(void) = default; // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); @@ -64,10 +62,10 @@ public: virtual void execute(void); }; -MODULE_REGISTER_NS(Load, TLoad, MGauge); +MODULE_REGISTER_NS(LoadNersc, TLoadNersc, MIO); END_MODULE_NAMESPACE END_HADRONS_NAMESPACE -#endif // Hadrons_MGauge_Load_hpp_ +#endif // Hadrons_MIO_LoadNersc_hpp_ diff --git a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp index 5d2c4a13..e61bf163 100644 --- a/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp +++ b/extras/Hadrons/Modules/MLoop/NoiseLoop.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MLoop/NoiseLoop.hpp -Copyright (C) 2016 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -74,6 +75,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -112,16 +114,16 @@ std::vector TNoiseLoop::getOutput(void) template void TNoiseLoop::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// template void TNoiseLoop::execute(void) { - PropagatorField &loop = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &eta = *env().template getObject(par().eta); + auto &loop = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &eta = envGet(PropagatorField, par().eta); loop = q*adj(eta); } diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.cc b/extras/Hadrons/Modules/MScalar/ChargedProp.cc index cd8dc244..1470f1ad 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.cc +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.cc @@ -1,3 +1,31 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/ChargedProp.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli +Author: James Harrison + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include #include @@ -37,90 +65,44 @@ void TChargedProp::setup(void) { phaseName_.push_back("_shiftphase_" + std::to_string(mu)); } - GFSrcName_ = "_" + getName() + "_DinvSrc"; - if (!env().hasRegisteredObject(freeMomPropName_)) + GFSrcName_ = getName() + "_DinvSrc"; + fftName_ = getName() + "_fft"; + + freeMomPropDone_ = env().hasCreatedObject(freeMomPropName_); + GFSrcDone_ = env().hasCreatedObject(GFSrcName_); + phasesDone_ = env().hasCreatedObject(phaseName_[0]); + envCacheLat(ScalarField, freeMomPropName_); + for (unsigned int mu = 0; mu < env().getNd(); ++mu) { - env().registerLattice(freeMomPropName_); + envCacheLat(ScalarField, phaseName_[mu]); } - if (!env().hasRegisteredObject(phaseName_[0])) - { - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - env().registerLattice(phaseName_[mu]); - } - } - if (!env().hasRegisteredObject(GFSrcName_)) - { - env().registerLattice(GFSrcName_); - } - env().registerLattice(getName()); + envCacheLat(ScalarField, GFSrcName_); + envCreateLat(ScalarField, getName()); + envTmpLat(ScalarField, "buf"); + envTmpLat(ScalarField, "result"); + envTmpLat(ScalarField, "Amu"); + envCache(FFT, fftName_, 1, env().getGrid()); } // execution /////////////////////////////////////////////////////////////////// void TChargedProp::execute(void) { // CACHING ANALYTIC EXPRESSIONS - ScalarField &source = *env().getObject(par().source); - Complex ci(0.0,1.0); - FFT fft(env().getGrid()); - - // cache free scalar propagator - if (!env().hasCreatedObject(freeMomPropName_)) - { - LOG(Message) << "Caching momentum space free scalar propagator" - << " (mass= " << par().mass << ")..." << std::endl; - freeMomProp_ = env().createLattice(freeMomPropName_); - SIMPL::MomentumSpacePropagator(*freeMomProp_, par().mass); - } - else - { - freeMomProp_ = env().getObject(freeMomPropName_); - } - // cache G*F*src - if (!env().hasCreatedObject(GFSrcName_)) - - { - GFSrc_ = env().createLattice(GFSrcName_); - fft.FFT_all_dim(*GFSrc_, source, FFT::forward); - *GFSrc_ = (*freeMomProp_)*(*GFSrc_); - } - else - { - GFSrc_ = env().getObject(GFSrcName_); - } - // cache phases - if (!env().hasCreatedObject(phaseName_[0])) - { - std::vector &l = env().getGrid()->_fdimensions; - - LOG(Message) << "Caching shift phases..." << std::endl; - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - Real twoPiL = M_PI*2./l[mu]; - - phase_.push_back(env().createLattice(phaseName_[mu])); - LatticeCoordinate(*(phase_[mu]), mu); - *(phase_[mu]) = exp(ci*twoPiL*(*(phase_[mu]))); - } - } - else - { - for (unsigned int mu = 0; mu < env().getNd(); ++mu) - { - phase_.push_back(env().getObject(phaseName_[mu])); - } - } + makeCaches(); // PROPAGATOR CALCULATION LOG(Message) << "Computing charged scalar propagator" << " (mass= " << par().mass << ", charge= " << par().charge << ")..." << std::endl; - ScalarField &prop = *env().createLattice(getName()); - ScalarField buf(env().getGrid()); - ScalarField &GFSrc = *GFSrc_, &G = *freeMomProp_; - double q = par().charge; - + auto &prop = envGet(ScalarField, getName()); + auto &GFSrc = envGet(ScalarField, GFSrcName_); + auto &G = envGet(ScalarField, freeMomPropName_); + auto &fft = envGet(FFT, fftName_); + double q = par().charge; + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, buf); + // G*F*Src prop = GFSrc; @@ -146,12 +128,12 @@ void TChargedProp::execute(void) if (!par().output.empty()) { std::string filename = par().output + "." + - std::to_string(env().getTrajectory()); + std::to_string(vm().getTrajectory()); LOG(Message) << "Saving zero-momentum projection to '" << filename << "'..." << std::endl; - CorrWriter writer(filename); + ResultWriter writer(RESULT_FILE_NAME(par().output)); std::vector vecBuf; std::vector result; @@ -166,15 +148,55 @@ void TChargedProp::execute(void) } } +void TChargedProp::makeCaches(void) +{ + auto &freeMomProp = envGet(ScalarField, freeMomPropName_); + auto &GFSrc = envGet(ScalarField, GFSrcName_); + auto &fft = envGet(FFT, fftName_); + + if (!freeMomPropDone_) + { + LOG(Message) << "Caching momentum space free scalar propagator" + << " (mass= " << par().mass << ")..." << std::endl; + SIMPL::MomentumSpacePropagator(freeMomProp, par().mass); + } + if (!GFSrcDone_) + { + FFT fft(env().getGrid()); + auto &source = envGet(ScalarField, par().source); + + LOG(Message) << "Caching G*F*src..." << std::endl; + fft.FFT_all_dim(GFSrc, source, FFT::forward); + GFSrc = freeMomProp*GFSrc; + } + if (!phasesDone_) + { + std::vector &l = env().getGrid()->_fdimensions; + Complex ci(0.0,1.0); + + LOG(Message) << "Caching shift phases..." << std::endl; + for (unsigned int mu = 0; mu < env().getNd(); ++mu) + { + Real twoPiL = M_PI*2./l[mu]; + auto &phmu = envGet(ScalarField, phaseName_[mu]); + + LatticeCoordinate(phmu, mu); + phmu = exp(ci*twoPiL*phmu); + phase_.push_back(&phmu); + } + } +} + void TChargedProp::momD1(ScalarField &s, FFT &fft) { - EmField &A = *env().getObject(par().emField); - ScalarField buf(env().getGrid()), result(env().getGrid()), - Amu(env().getGrid()); + auto &A = envGet(EmField, par().emField); Complex ci(0.0,1.0); - result = zero; + envGetTmp(ScalarField, buf); + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, Amu); + result = zero; for (unsigned int mu = 0; mu < env().getNd(); ++mu) { Amu = peekLorentz(A, mu); @@ -198,12 +220,13 @@ void TChargedProp::momD1(ScalarField &s, FFT &fft) void TChargedProp::momD2(ScalarField &s, FFT &fft) { - EmField &A = *env().getObject(par().emField); - ScalarField buf(env().getGrid()), result(env().getGrid()), - Amu(env().getGrid()); + auto &A = envGet(EmField, par().emField); + + envGetTmp(ScalarField, buf); + envGetTmp(ScalarField, result); + envGetTmp(ScalarField, Amu); result = zero; - for (unsigned int mu = 0; mu < env().getNd(); ++mu) { Amu = peekLorentz(A, mu); diff --git a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp index fbe75c05..4d43aec2 100644 --- a/extras/Hadrons/Modules/MScalar/ChargedProp.hpp +++ b/extras/Hadrons/Modules/MScalar/ChargedProp.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/ChargedProp.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalar_ChargedProp_hpp_ #define Hadrons_MScalar_ChargedProp_hpp_ @@ -37,19 +64,20 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); private: + void makeCaches(void); void momD1(ScalarField &s, FFT &fft); void momD2(ScalarField &s, FFT &fft); private: - std::string freeMomPropName_, GFSrcName_; + bool freeMomPropDone_, GFSrcDone_, phasesDone_; + std::string freeMomPropName_, GFSrcName_, fftName_; std::vector phaseName_; - ScalarField *freeMomProp_, *GFSrc_; std::vector phase_; - EmField *A; }; MODULE_REGISTER_NS(ChargedProp, TChargedProp, MScalar); diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.cc b/extras/Hadrons/Modules/MScalar/FreeProp.cc index 674867e3..ee86b9db 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.cc +++ b/extras/Hadrons/Modules/MScalar/FreeProp.cc @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/FreeProp.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #include #include @@ -33,38 +60,31 @@ void TFreeProp::setup(void) { freeMomPropName_ = FREEMOMPROP(par().mass); - if (!env().hasRegisteredObject(freeMomPropName_)) - { - env().registerLattice(freeMomPropName_); - } - env().registerLattice(getName()); + freePropDone_ = env().hasCreatedObject(freeMomPropName_); + envCacheLat(ScalarField, freeMomPropName_); + envCreateLat(ScalarField, getName()); } // execution /////////////////////////////////////////////////////////////////// void TFreeProp::execute(void) { - ScalarField &prop = *env().createLattice(getName()); - ScalarField &source = *env().getObject(par().source); - ScalarField *freeMomProp; + auto &freeMomProp = envGet(ScalarField, freeMomPropName_); + auto &prop = envGet(ScalarField, getName()); + auto &source = envGet(ScalarField, par().source); - if (!env().hasCreatedObject(freeMomPropName_)) + if (!freePropDone_) { LOG(Message) << "Caching momentum space free scalar propagator" << " (mass= " << par().mass << ")..." << std::endl; - freeMomProp = env().createLattice(freeMomPropName_); - SIMPL::MomentumSpacePropagator(*freeMomProp, par().mass); - } - else - { - freeMomProp = env().getObject(freeMomPropName_); + SIMPL::MomentumSpacePropagator(freeMomProp, par().mass); } LOG(Message) << "Computing free scalar propagator..." << std::endl; - SIMPL::FreePropagator(source, prop, *freeMomProp); + SIMPL::FreePropagator(source, prop, freeMomProp); if (!par().output.empty()) { TextWriter writer(par().output + "." + - std::to_string(env().getTrajectory())); + std::to_string(vm().getTrajectory())); std::vector buf; std::vector result; diff --git a/extras/Hadrons/Modules/MScalar/FreeProp.hpp b/extras/Hadrons/Modules/MScalar/FreeProp.hpp index 97cf288a..df17f44e 100644 --- a/extras/Hadrons/Modules/MScalar/FreeProp.hpp +++ b/extras/Hadrons/Modules/MScalar/FreeProp.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/FreeProp.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_MScalar_FreeProp_hpp_ #define Hadrons_MScalar_FreeProp_hpp_ @@ -33,12 +60,14 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); private: std::string freeMomPropName_; + bool freePropDone_; }; MODULE_REGISTER_NS(FreeProp, TFreeProp, MScalar); diff --git a/extras/Hadrons/Modules/MScalar/Scalar.hpp b/extras/Hadrons/Modules/MScalar/Scalar.hpp index db702ff2..7272f1b3 100644 --- a/extras/Hadrons/Modules/MScalar/Scalar.hpp +++ b/extras/Hadrons/Modules/MScalar/Scalar.hpp @@ -1,3 +1,30 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalar/Scalar.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ #ifndef Hadrons_Scalar_hpp_ #define Hadrons_Scalar_hpp_ diff --git a/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp new file mode 100644 index 00000000..96eb794e --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TrMag.hpp @@ -0,0 +1,146 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TrMag.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MScalarSUN_TrMag_hpp_ +#define Hadrons_MScalarSUN_TrMag_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * Module to compute tr(mag^n) * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TrMagPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TrMagPar, + std::string, field, + unsigned int, maxPow, + std::string, output); +}; + +template +class TTrMag: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, op, + Real, value); + }; +public: + // constructor + TTrMag(const std::string name); + // destructor + virtual ~TTrMag(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +}; + +MODULE_REGISTER_NS(TrMagSU2, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU3, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU4, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU5, TTrMag>, MScalarSUN); +MODULE_REGISTER_NS(TrMagSU6, TTrMag>, MScalarSUN); + +/****************************************************************************** + * TTrMag implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTrMag::TTrMag(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTrMag::getInput(void) +{ + std::vector in = {par().field}; + + return in; +} + +template +std::vector TTrMag::getOutput(void) +{ + std::vector out = {}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTrMag::setup(void) +{} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTrMag::execute(void) +{ + LOG(Message) << "Computing tr(mag^n) for n even up to " << par().maxPow + << "..." << std::endl; + + std::vector result; + ResultWriter writer(RESULT_FILE_NAME(par().output)); + auto &phi = envGet(Field, par().field); + + auto m2 = sum(phi), mn = m2; + + m2 = -m2*m2; + mn = 1.; + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + Result r; + + mn = mn*m2; + r.op = "tr(mag^" + std::to_string(n) + ")"; + r.value = TensorRemove(trace(mn)).real(); + result.push_back(r); + } + write(writer, "trmag", result); +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TrMag_hpp_ diff --git a/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp new file mode 100644 index 00000000..4586663d --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp @@ -0,0 +1,182 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TrPhi.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MScalarSUN_TrPhi_hpp_ +#define Hadrons_MScalarSUN_TrPhi_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * Module to compute tr(phi^n) * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TrPhiPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TrPhiPar, + std::string, field, + unsigned int, maxPow, + std::string, output); +}; + +template +class TTrPhi: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, op, + Real, value); + }; +public: + // constructor + TTrPhi(const std::string name); + // destructor + virtual ~TTrPhi(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +private: + // output name generator + std::string outName(const unsigned int n); +}; + +MODULE_REGISTER_NS(TrPhiSU2, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU3, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU4, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU5, TTrPhi>, MScalarSUN); +MODULE_REGISTER_NS(TrPhiSU6, TTrPhi>, MScalarSUN); + +/****************************************************************************** + * TTrPhi implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTrPhi::TTrPhi(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTrPhi::getInput(void) +{ + std::vector in = {par().field}; + + return in; +} + +template +std::vector TTrPhi::getOutput(void) +{ + std::vector out; + + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + out.push_back(outName(n)); + } + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTrPhi::setup(void) +{ + if (par().maxPow < 2) + { + HADRON_ERROR(Size, "'maxPow' should be at least equal to 2"); + } + envTmpLat(Field, "phi2"); + envTmpLat(Field, "buf"); + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + envCreateLat(ComplexField, outName(n)); + } +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTrPhi::execute(void) +{ + LOG(Message) << "Computing tr(phi^n) for n even up to " << par().maxPow + << "..." << std::endl; + + std::vector result; + auto &phi = envGet(Field, par().field); + + envGetTmp(Field, phi2); + envGetTmp(Field, buf); + buf = 1.; + phi2 = -phi*phi; + for (unsigned int n = 2; n <= par().maxPow; n += 2) + { + auto &phin = envGet(ComplexField, outName(n)); + + buf = buf*phi2; + phin = trace(buf); + if (!par().output.empty()) + { + Result r; + + r.op = "tr(phi^" + std::to_string(n) + ")"; + r.value = TensorRemove(sum(phin)).real(); + result.push_back(r); + } + } + if (result.size() > 0) + { + ResultWriter writer(RESULT_FILE_NAME(par().output)); + + write(writer, "trphi", result); + } +} + +// output name generator /////////////////////////////////////////////////////// +template +std::string TTrPhi::outName(const unsigned int n) +{ + return getName() + "_" + std::to_string(n); +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TrPhi_hpp_ diff --git a/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp b/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp new file mode 100644 index 00000000..abfbf609 --- /dev/null +++ b/extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp @@ -0,0 +1,184 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/Modules/MScalarSUN/TwoPoint.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ +#ifndef Hadrons_MScalarSUN_TwoPoint_hpp_ +#define Hadrons_MScalarSUN_TwoPoint_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +/****************************************************************************** + * 2-pt functions for a given set of operators * + ******************************************************************************/ +BEGIN_MODULE_NAMESPACE(MScalarSUN) + +class TwoPointPar: Serializable +{ +public: + GRID_SERIALIZABLE_CLASS_MEMBERS(TwoPointPar, + std::vector, op, + std::string, output); +}; + +template +class TTwoPoint: public Module +{ +public: + typedef typename SImpl::Field Field; + typedef typename SImpl::ComplexField ComplexField; + class Result: Serializable + { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(Result, + std::string, sink, + std::string, source, + std::vector, data); + }; +public: + // constructor + TTwoPoint(const std::string name); + // destructor + virtual ~TTwoPoint(void) = default; + // dependency relation + virtual std::vector getInput(void); + virtual std::vector getOutput(void); + // setup + virtual void setup(void); + // execution + virtual void execute(void); +private: + // make 2-pt function + template + std::vector makeTwoPoint(const std::vector &sink, + const std::vector &source); +}; + +MODULE_REGISTER_NS(TwoPointSU2, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU3, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU4, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU5, TTwoPoint>, MScalarSUN); +MODULE_REGISTER_NS(TwoPointSU6, TTwoPoint>, MScalarSUN); + +/****************************************************************************** + * TTwoPoint implementation * + ******************************************************************************/ +// constructor ///////////////////////////////////////////////////////////////// +template +TTwoPoint::TTwoPoint(const std::string name) +: Module(name) +{} + +// dependencies/products /////////////////////////////////////////////////////// +template +std::vector TTwoPoint::getInput(void) +{ + return par().op; +} + +template +std::vector TTwoPoint::getOutput(void) +{ + std::vector out = {}; + + return out; +} + +// setup /////////////////////////////////////////////////////////////////////// +template +void TTwoPoint::setup(void) +{ + const unsigned int nt = env().getDim().back(); + envTmp(std::vector>, "slicedOp", 1, par().op.size(), + std::vector(nt)); +} + +// execution /////////////////////////////////////////////////////////////////// +template +void TTwoPoint::execute(void) +{ + LOG(Message) << "Computing 2-point functions for operators:" << std::endl; + for (auto &o: par().op) + { + LOG(Message) << " '" << o << "'" << std::endl; + } + + ResultWriter writer(RESULT_FILE_NAME(par().output)); + const unsigned int nd = env().getDim().size(); + std::vector result; + + envGetTmp(std::vector>, slicedOp); + for (unsigned int i = 0; i < par().op.size(); ++i) + { + auto &op = envGet(ComplexField, par().op[i]); + + sliceSum(op, slicedOp[i], nd - 1); + } + for (unsigned int i = 0; i < par().op.size(); ++i) + for (unsigned int j = 0; j < par().op.size(); ++j) + { + Result r; + + r.sink = par().op[i]; + r.source = par().op[j]; + r.data = makeTwoPoint(slicedOp[i], slicedOp[j]); + result.push_back(r); + } + write(writer, "twopt", result); +} + +// make 2-pt function ////////////////////////////////////////////////////////// +template +template +std::vector TTwoPoint::makeTwoPoint( + const std::vector &sink, + const std::vector &source) +{ + assert(sink.size() == source.size()); + + unsigned int nt = sink.size(); + std::vector res(nt, 0.); + + for (unsigned int dt = 0; dt < nt; ++dt) + { + for (unsigned int t = 0; t < nt; ++t) + { + res[dt] += TensorRemove(trace(sink[(t+dt)%nt]*source[t])); + } + res[dt] *= 1./static_cast(nt); + } + + return res; +} + +END_MODULE_NAMESPACE + +END_HADRONS_NAMESPACE + +#endif // Hadrons_MScalarSUN_TwoPoint_hpp_ diff --git a/extras/Hadrons/Modules/MSink/Point.hpp b/extras/Hadrons/Modules/MSink/Point.hpp index 0761c4c4..c5f6eff0 100644 --- a/extras/Hadrons/Modules/MSink/Point.hpp +++ b/extras/Hadrons/Modules/MSink/Point.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSink/Point.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -61,10 +62,14 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_; }; MODULE_REGISTER_NS(Point, TPoint, MSink); @@ -77,6 +82,7 @@ MODULE_REGISTER_NS(ScalarPoint, TPoint, MSink); template TPoint::TPoint(const std::string name) : Module(name) +, momphName_ (name + "_momph") {} // dependencies/products /////////////////////////////////////////////////////// @@ -100,30 +106,37 @@ std::vector TPoint::getOutput(void) template void TPoint::setup(void) { - unsigned int size; - - size = env().template lattice4dSize(); - env().registerObject(getName(), size); + envTmpLat(LatticeComplex, "coor"); + envCacheLat(LatticeComplex, momphName_); + envCreate(SinkFn, getName(), 1, nullptr); } // execution /////////////////////////////////////////////////////////////////// template void TPoint::execute(void) -{ - std::vector p = strToVec(par().mom); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); - Complex i(0.0,1.0); - +{ LOG(Message) << "Setting up point sink function for momentum [" << par().mom << "]" << std::endl; - ph = zero; - for(unsigned int mu = 0; mu < env().getNd(); mu++) + + auto &ph = envGet(LatticeComplex, momphName_); + + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + Complex i(0.0,1.0); + std::vector p; + + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - auto sink = [ph](const PropagatorField &field) + auto sink = [&ph](const PropagatorField &field) { SlicedPropagator res; PropagatorField tmp = ph*field; @@ -132,7 +145,7 @@ void TPoint::execute(void) return res; }; - env().setObject(getName(), new SinkFn(sink)); + envGet(SinkFn, getName()) = sink; } END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MSink/Smear.hpp b/extras/Hadrons/Modules/MSink/Smear.hpp index c3973d2b..e72dece0 100644 --- a/extras/Hadrons/Modules/MSink/Smear.hpp +++ b/extras/Hadrons/Modules/MSink/Smear.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSink/Smear.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -62,6 +63,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -100,9 +102,7 @@ std::vector TSmear::getOutput(void) template void TSmear::setup(void) { - unsigned int nt = env().getDim(Tp); - unsigned int size = nt * sizeof(SitePropagator); - env().registerObject(getName(), size); + envCreate(SlicedPropagator, getName(), 1, env().getDim(Tp)); } // execution /////////////////////////////////////////////////////////////////// @@ -113,11 +113,11 @@ void TSmear::execute(void) << "' using sink function '" << par().sink << "'." << std::endl; - SinkFn &sink = *env().template getObject(par().sink); - PropagatorField &q = *env().template getObject(par().q); - SlicedPropagator *out = new SlicedPropagator(env().getDim(Tp)); - *out = sink(q); - env().setObject(getName(), out); + auto &sink = envGet(SinkFn, par().sink); + auto &q = envGet(PropagatorField, par().q); + auto &out = envGet(SlicedPropagator, getName()); + + out = sink(q); } END_MODULE_NAMESPACE diff --git a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp index b1f63a5d..54c0f2d8 100644 --- a/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp +++ b/extras/Hadrons/Modules/MSolver/RBPrecCG.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSolver/RBPrecCG.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -61,7 +60,9 @@ public: virtual ~TRBPrecCG(void) = default; // dependencies/products virtual std::vector getInput(void); + virtual std::vector getReference(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -83,11 +84,19 @@ TRBPrecCG::TRBPrecCG(const std::string name) template std::vector TRBPrecCG::getInput(void) { - std::vector in = {par().action}; + std::vector in = {}; return in; } +template +std::vector TRBPrecCG::getReference(void) +{ + std::vector ref = {par().action}; + + return ref; +} + template std::vector TRBPrecCG::getOutput(void) { @@ -100,17 +109,12 @@ std::vector TRBPrecCG::getOutput(void) template void TRBPrecCG::setup(void) { - auto Ls = env().getObjectLs(par().action); - - env().registerObject(getName(), 0, Ls); - env().addOwnership(getName(), par().action); -} + LOG(Message) << "setting up Schur red-black preconditioned CG for" + << " action '" << par().action << "' with residual " + << par().residual << std::endl; -// execution /////////////////////////////////////////////////////////////////// -template -void TRBPrecCG::execute(void) -{ - auto &mat = *(env().template getObject(par().action)); + auto Ls = env().getObjectLs(par().action); + auto &mat = envGet(FMat, par().action); auto solver = [&mat, this](FermionField &sol, const FermionField &source) { ConjugateGradient cg(par().residual, 10000); @@ -118,13 +122,14 @@ void TRBPrecCG::execute(void) schurSolver(mat, source, sol); }; - - LOG(Message) << "setting up Schur red-black preconditioned CG for" - << " action '" << par().action << "' with residual " - << par().residual << std::endl; - env().setObject(getName(), new SolverFn(solver)); + envCreate(SolverFn, getName(), Ls, solver); } +// execution /////////////////////////////////////////////////////////////////// +template +void TRBPrecCG::execute(void) +{} + END_MODULE_NAMESPACE END_HADRONS_NAMESPACE diff --git a/extras/Hadrons/Modules/MSource/Point.hpp b/extras/Hadrons/Modules/MSource/Point.hpp index 7815e5c1..ac6df252 100644 --- a/extras/Hadrons/Modules/MSource/Point.hpp +++ b/extras/Hadrons/Modules/MSource/Point.hpp @@ -4,10 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Point.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -72,6 +72,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -111,19 +112,20 @@ std::vector TPoint::getOutput(void) template void TPoint::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// template void TPoint::execute(void) { - std::vector position = strToVec(par().position); - SitePropagator id; - LOG(Message) << "Creating point source at position [" << par().position - << "]" << std::endl; - PropagatorField &src = *env().template createLattice(getName()); + << "]" << std::endl; + + std::vector position = strToVec(par().position); + auto &src = envGet(PropagatorField, getName()); + SitePropagator id; + id = 1.; src = zero; pokeSite(id, src, position); diff --git a/extras/Hadrons/Modules/MSource/SeqConserved.hpp b/extras/Hadrons/Modules/MSource/SeqConserved.hpp index 86a7dfb9..ee8d8d56 100644 --- a/extras/Hadrons/Modules/MSource/SeqConserved.hpp +++ b/extras/Hadrons/Modules/MSource/SeqConserved.hpp @@ -2,11 +2,12 @@ Grid physics library, www.github.com/paboyle/Grid -Source file: extras/Hadrons/Modules/MContraction/SeqConserved.hpp +Source file: extras/Hadrons/Modules/MSource/SeqConserved.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -83,6 +84,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -122,7 +124,7 @@ template void TSeqConserved::setup(void) { auto Ls_ = env().getObjectLs(par().action); - env().template registerLattice(getName(), Ls_); + envCreateLat(PropagatorField, getName(), Ls_); } // execution /////////////////////////////////////////////////////////////////// @@ -142,9 +144,9 @@ void TSeqConserved::execute(void) << par().mu << ") for " << par().tA << " <= t <= " << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - FMat &mat = *(env().template getObject(par().action)); + auto &src = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &mat = envGet(FMat, par().action); std::vector mom = strToVec(par().mom); mat.SeqConservedCurrent(q, src, par().curr_type, par().mu, diff --git a/extras/Hadrons/Modules/MSource/SeqGamma.hpp b/extras/Hadrons/Modules/MSource/SeqGamma.hpp index e2129a46..40eda29f 100644 --- a/extras/Hadrons/Modules/MSource/SeqGamma.hpp +++ b/extras/Hadrons/Modules/MSource/SeqGamma.hpp @@ -4,11 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/SeqGamma.hpp -Copyright (C) 2015 -Copyright (C) 2016 -Copyright (C) 2017 +Copyright (C) 2015-2018 Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -81,10 +80,14 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_, tName_; }; MODULE_REGISTER_NS(SeqGamma, TSeqGamma, MSource); @@ -96,6 +99,8 @@ MODULE_REGISTER_NS(SeqGamma, TSeqGamma, MSource); template TSeqGamma::TSeqGamma(const std::string name) : Module(name) +, momphName_ (name + "_momph") +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// @@ -119,7 +124,10 @@ std::vector TSeqGamma::getOutput(void) template void TSeqGamma::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envCacheLat(Lattice>, tName_); + envCacheLat(LatticeComplex, momphName_); + envTmpLat(LatticeComplex, "coor"); } // execution /////////////////////////////////////////////////////////////////// @@ -137,23 +145,29 @@ void TSeqGamma::execute(void) << " sequential source for " << par().tA << " <= t <= " << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - PropagatorField &q = *env().template getObject(par().q); - Lattice> t(env().getGrid()); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); - Gamma g(par().gamma); - std::vector p; - Complex i(0.0,1.0); + auto &src = envGet(PropagatorField, getName()); + auto &q = envGet(PropagatorField, par().q); + auto &ph = envGet(LatticeComplex, momphName_); + auto &t = envGet(Lattice>, tName_); + Gamma g(par().gamma); - p = strToVec(par().mom); - ph = zero; - for(unsigned int mu = 0; mu < env().getNd(); mu++) + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + p[mu]*coor*((1./(env().getGrid()->_fdimensions[mu]))); + Complex i(0.0,1.0); + std::vector p; + + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + LatticeCoordinate(t, Tp); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - LatticeCoordinate(t, Tp); src = where((t >= par().tA) and (t <= par().tB), ph*(g*q), 0.*q); } diff --git a/extras/Hadrons/Modules/MSource/Wall.hpp b/extras/Hadrons/Modules/MSource/Wall.hpp index 4de37e4d..5853b11a 100644 --- a/extras/Hadrons/Modules/MSource/Wall.hpp +++ b/extras/Hadrons/Modules/MSource/Wall.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Wall.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -73,10 +74,14 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasPhase_{false}; + std::string momphName_, tName_; }; MODULE_REGISTER_NS(Wall, TWall, MSource); @@ -88,13 +93,15 @@ MODULE_REGISTER_NS(Wall, TWall, MSource); template TWall::TWall(const std::string name) : Module(name) +, momphName_ (name + "_momph") +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// template std::vector TWall::getInput(void) { - std::vector in; + std::vector in = {}; return in; } @@ -111,7 +118,7 @@ std::vector TWall::getOutput(void) template void TWall::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); } // execution /////////////////////////////////////////////////////////////////// @@ -121,21 +128,28 @@ void TWall::execute(void) LOG(Message) << "Generating wall source at t = " << par().tW << " with momentum " << par().mom << std::endl; - PropagatorField &src = *env().template createLattice(getName()); - Lattice> t(env().getGrid()); - LatticeComplex ph(env().getGrid()), coor(env().getGrid()); - std::vector p; - Complex i(0.0,1.0); + auto &src = envGet(PropagatorField, getName()); + auto &ph = envGet(LatticeComplex, momphName_); + auto &t = envGet(Lattice>, tName_); - p = strToVec(par().mom); - ph = zero; - for(unsigned int mu = 0; mu < Nd; mu++) + if (!hasPhase_) { - LatticeCoordinate(coor, mu); - ph = ph + p[mu]*coor*((1./(env().getGrid()->_fdimensions[mu]))); + Complex i(0.0,1.0); + std::vector p; + + envGetTmp(LatticeComplex, coor); + p = strToVec(par().mom); + ph = zero; + for(unsigned int mu = 0; mu < env().getNd(); mu++) + { + LatticeCoordinate(coor, mu); + ph = ph + (p[mu]/env().getGrid()->_fdimensions[mu])*coor; + } + ph = exp((Real)(2*M_PI)*i*ph); + LatticeCoordinate(t, Tp); + hasPhase_ = true; } - ph = exp((Real)(2*M_PI)*i*ph); - LatticeCoordinate(t, Tp); + src = 1.; src = where((t == par().tW), src*ph, 0.*src); } diff --git a/extras/Hadrons/Modules/MSource/Z2.hpp b/extras/Hadrons/Modules/MSource/Z2.hpp index a7f7a3e6..4414e37f 100644 --- a/extras/Hadrons/Modules/MSource/Z2.hpp +++ b/extras/Hadrons/Modules/MSource/Z2.hpp @@ -4,8 +4,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MSource/Z2.hpp -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 Author: Antonin Portelli @@ -76,10 +75,14 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution virtual void execute(void); +private: + bool hasT_{false}; + std::string tName_; }; MODULE_REGISTER_NS(Z2, TZ2, MSource); @@ -92,6 +95,7 @@ MODULE_REGISTER_NS(ScalarZ2, TZ2, MSource); template TZ2::TZ2(const std::string name) : Module(name) +, tName_ (name + "_t") {} // dependencies/products /////////////////////////////////////////////////////// @@ -115,29 +119,36 @@ std::vector TZ2::getOutput(void) template void TZ2::setup(void) { - env().template registerLattice(getName()); + envCreateLat(PropagatorField, getName()); + envCacheLat(Lattice>, tName_); + envTmpLat(LatticeComplex, "eta"); } // execution /////////////////////////////////////////////////////////////////// template void TZ2::execute(void) { - Lattice> t(env().getGrid()); - LatticeComplex eta(env().getGrid()); - Complex shift(1., 1.); - if (par().tA == par().tB) { LOG(Message) << "Generating Z_2 wall source at t= " << par().tA - << std::endl; + << std::endl; } else { LOG(Message) << "Generating Z_2 band for " << par().tA << " <= t <= " - << par().tB << std::endl; + << par().tB << std::endl; } - PropagatorField &src = *env().template createLattice(getName()); - LatticeCoordinate(t, Tp); + + auto &src = envGet(PropagatorField, getName()); + auto &t = envGet(Lattice>, tName_); + Complex shift(1., 1.); + + if (!hasT_) + { + LatticeCoordinate(t, Tp); + hasT_ = true; + } + envGetTmp(LatticeComplex, eta); bernoulli(*env().get4dRng(), eta); eta = (2.*eta - shift)*(1./::sqrt(2.)); eta = where((t >= par().tA) and (t <= par().tB), eta, 0.*eta); diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp index b085eb8c..6ee1e3c2 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MUtilities/TestSeqConserved.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -80,6 +81,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -121,38 +123,39 @@ void TTestSeqConserved::setup(void) auto Ls = env().getObjectLs(par().q); if (Ls != env().getObjectLs(par().action)) { - HADRON_ERROR("Ls mismatch between quark action and propagator"); + HADRON_ERROR(Size, "Ls mismatch between quark action and propagator"); } + envTmpLat(PropagatorField, "tmp"); + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// template void TTestSeqConserved::execute(void) { - PropagatorField tmp(env().getGrid()); - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &qSeq = *env().template getObject(par().qSeq); - FMat &act = *(env().template getObject(par().action)); - Gamma g5(Gamma::Algebra::Gamma5); - Gamma::Algebra gA = (par().curr == Current::Axial) ? - Gamma::Algebra::Gamma5 : - Gamma::Algebra::Identity; - Gamma g(gA); - SitePropagator qSite; - Complex test_S, test_V, check_S, check_V; - std::vector check_buf; - LatticeComplex c(env().getGrid()); - // Check sequential insertion of current gives same result as conserved // current sink upon contraction. Assume q uses a point source. - std::vector siteCoord; + + auto &q = envGet(PropagatorField, par().q); + auto &qSeq = envGet(PropagatorField, par().qSeq); + auto &act = envGet(FMat, par().action); + Gamma g5(Gamma::Algebra::Gamma5); + Gamma::Algebra gA = (par().curr == Current::Axial) ? + Gamma::Algebra::Gamma5 : + Gamma::Algebra::Identity; + Gamma g(gA); + SitePropagator qSite; + Complex test_S, test_V, check_S, check_V; + std::vector check_buf; + std::vector siteCoord; + + envGetTmp(PropagatorField, tmp); + envGetTmp(LatticeComplex, c); siteCoord = strToVec(par().origin); peekSite(qSite, qSeq, siteCoord); test_S = trace(qSite*g); test_V = trace(qSite*g*Gamma::gmu[par().mu]); - act.ContractConservedCurrent(q, q, tmp, par().curr, par().mu); - c = trace(tmp*g); sliceSum(c, check_buf, Tp); check_S = TensorRemove(check_buf[par().t_J]); diff --git a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp index 2799e5d0..df35d887 100644 --- a/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp +++ b/extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp @@ -4,9 +4,10 @@ Grid physics library, www.github.com/paboyle/Grid Source file: extras/Hadrons/Modules/MUtilities/TestSeqGamma.hpp -Copyright (C) 2017 +Copyright (C) 2015-2018 -Author: Andrew Lawson +Author: Antonin Portelli +Author: Lanny91 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -64,6 +65,7 @@ public: // dependency relation virtual std::vector getInput(void); virtual std::vector getOutput(void); +protected: // setup virtual void setup(void); // execution @@ -102,26 +104,27 @@ std::vector TTestSeqGamma::getOutput(void) template void TTestSeqGamma::setup(void) { - + envTmpLat(LatticeComplex, "c"); } // execution /////////////////////////////////////////////////////////////////// template void TTestSeqGamma::execute(void) { - PropagatorField &q = *env().template getObject(par().q); - PropagatorField &qSeq = *env().template getObject(par().qSeq); - LatticeComplex c(env().getGrid()); - Gamma g5(Gamma::Algebra::Gamma5); - Gamma g(par().gamma); - SitePropagator qSite; - Complex test, check; + auto &q = envGet(PropagatorField, par().q); + auto &qSeq = envGet(PropagatorField, par().qSeq); + Gamma g5(Gamma::Algebra::Gamma5); + Gamma g(par().gamma); + SitePropagator qSite; + Complex test, check; std::vector check_buf; + std::vector siteCoord; // Check sequential insertion of gamma matrix gives same result as // insertion of gamma at sink upon contraction. Assume q uses a point // source. - std::vector siteCoord; + + envGetTmp(LatticeComplex, c); siteCoord = strToVec(par().origin); peekSite(qSite, qSeq, siteCoord); test = trace(g*qSite); diff --git a/extras/Hadrons/VirtualMachine.cc b/extras/Hadrons/VirtualMachine.cc new file mode 100644 index 00000000..d47bafb7 --- /dev/null +++ b/extras/Hadrons/VirtualMachine.cc @@ -0,0 +1,622 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/VirtualMachine.cc + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include +#include +#include + +using namespace Grid; +using namespace QCD; +using namespace Hadrons; + +/****************************************************************************** + * VirtualMachine implementation * + ******************************************************************************/ +// trajectory counter ////////////////////////////////////////////////////////// +void VirtualMachine::setTrajectory(const unsigned int traj) +{ + traj_ = traj; +} + +unsigned int VirtualMachine::getTrajectory(void) const +{ + return traj_; +} + +// module management /////////////////////////////////////////////////////////// +void VirtualMachine::pushModule(VirtualMachine::ModPt &pt) +{ + std::string name = pt->getName(); + + if (!hasModule(name)) + { + std::vector inputAddress; + unsigned int address; + ModuleInfo m; + + // module registration ------------------------------------------------- + m.data = std::move(pt); + m.type = typeIdPt(*m.data.get()); + m.name = name; + // input dependencies + for (auto &in: m.data->getInput()) + { + if (!env().hasObject(in)) + { + // if object does not exist, add it with no creator module + env().addObject(in , -1); + } + m.input.push_back(env().getObjectAddress(in)); + } + // reference dependencies + for (auto &ref: m.data->getReference()) + { + if (!env().hasObject(ref)) + { + // if object does not exist, add it with no creator module + env().addObject(ref , -1); + } + m.input.push_back(env().getObjectAddress(ref)); + } + auto inCopy = m.input; + // if module has inputs with references, they need to be added as + // an input + for (auto &in: inCopy) + { + int inm = env().getObjectModule(in); + + if (inm > 0) + { + if (getModule(inm)->getReference().size() > 0) + { + for (auto &rin: getModule(inm)->getReference()) + { + m.input.push_back(env().getObjectAddress(rin)); + } + } + } + } + module_.push_back(std::move(m)); + address = static_cast(module_.size() - 1); + moduleAddress_[name] = address; + // connecting outputs to potential inputs ------------------------------ + for (auto &out: getModule(address)->getOutput()) + { + if (!env().hasObject(out)) + { + // output does not exists, add it + env().addObject(out, address); + } + else + { + if (env().getObjectModule(env().getObjectAddress(out)) < 0) + { + // output exists but without creator, correct it + env().setObjectModule(env().getObjectAddress(out), address); + } + else + { + // output already fully registered, error + HADRON_ERROR(Definition, "object '" + out + + "' is already produced by module '" + + module_[env().getObjectModule(out)].name + + "' (while pushing module '" + name + "')"); + } + if (getModule(address)->getReference().size() > 0) + { + // module has references, dependency should be propagated + // to children modules; find module with `out` as an input + // and add references to their input + auto pred = [this, out](const ModuleInfo &n) + { + auto &in = n.input; + auto it = std::find(in.begin(), in.end(), + env().getObjectAddress(out)); + + return (it != in.end()); + }; + auto it = std::find_if(module_.begin(), module_.end(), pred); + while (it != module_.end()) + { + for (auto &ref: getModule(address)->getReference()) + { + it->input.push_back(env().getObjectAddress(ref)); + } + it = std::find_if(++it, module_.end(), pred); + } + } + } + } + graphOutdated_ = true; + memoryProfileOutdated_ = true; + } + else + { + HADRON_ERROR(Definition, "module '" + name + "' already exists"); + } +} + +unsigned int VirtualMachine::getNModule(void) const +{ + return module_.size(); +} + +void VirtualMachine::createModule(const std::string name, const std::string type, + XmlReader &reader) +{ + auto &factory = ModuleFactory::getInstance(); + auto pt = factory.create(type, name); + + pt->parseParameters(reader, "options"); + pushModule(pt); +} + +ModuleBase * VirtualMachine::getModule(const unsigned int address) const +{ + if (hasModule(address)) + { + return module_[address].data.get(); + } + else + { + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); + } +} + +ModuleBase * VirtualMachine::getModule(const std::string name) const +{ + return getModule(getModuleAddress(name)); +} + +unsigned int VirtualMachine::getModuleAddress(const std::string name) const +{ + if (hasModule(name)) + { + return moduleAddress_.at(name); + } + else + { + HADRON_ERROR(Definition, "no module with name '" + name + "'"); + } +} + +std::string VirtualMachine::getModuleName(const unsigned int address) const +{ + if (hasModule(address)) + { + return module_[address].name; + } + else + { + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); + } +} + +std::string VirtualMachine::getModuleType(const unsigned int address) const +{ + if (hasModule(address)) + { + return typeName(module_[address].type); + } + else + { + HADRON_ERROR(Definition, "no module with address " + std::to_string(address)); + } +} + +std::string VirtualMachine::getModuleType(const std::string name) const +{ + return getModuleType(getModuleAddress(name)); +} + +std::string VirtualMachine::getModuleNamespace(const unsigned int address) const +{ + std::string type = getModuleType(address), ns; + + auto pos2 = type.rfind("::"); + auto pos1 = type.rfind("::", pos2 - 2); + + return type.substr(pos1 + 2, pos2 - pos1 - 2); +} + +std::string VirtualMachine::getModuleNamespace(const std::string name) const +{ + return getModuleNamespace(getModuleAddress(name)); +} + +bool VirtualMachine::hasModule(const unsigned int address) const +{ + return (address < module_.size()); +} + +bool VirtualMachine::hasModule(const std::string name) const +{ + return (moduleAddress_.find(name) != moduleAddress_.end()); +} + +// print VM content //////////////////////////////////////////////////////////// +void VirtualMachine::printContent(void) const +{ + LOG(Debug) << "Modules: " << std::endl; + for (unsigned int i = 0; i < module_.size(); ++i) + { + LOG(Debug) << std::setw(4) << i << ": " + << getModuleName(i) << std::endl; + } +} + +// module graph //////////////////////////////////////////////////////////////// +Graph VirtualMachine::getModuleGraph(void) +{ + if (graphOutdated_) + { + makeModuleGraph(); + graphOutdated_ = false; + } + + return graph_; +} + +void VirtualMachine::makeModuleGraph(void) +{ + Graph graph; + + // create vertices + for (unsigned int m = 0; m < module_.size(); ++m) + { + graph.addVertex(m); + } + // create edges + for (unsigned int m = 0; m < module_.size(); ++m) + { + for (auto &in: module_[m].input) + { + graph.addEdge(env().getObjectModule(in), m); + } + } + graph_ = graph; +} + +// memory profile ////////////////////////////////////////////////////////////// +const VirtualMachine::MemoryProfile & VirtualMachine::getMemoryProfile(void) +{ + if (memoryProfileOutdated_) + { + makeMemoryProfile(); + memoryProfileOutdated_ = false; + } + + return profile_; +} + +void VirtualMachine::makeMemoryProfile(void) +{ + bool protect = env().objectsProtected(); + bool hmsg = HadronsLogMessage.isActive(); + bool gmsg = GridLogMessage.isActive(); + bool err = HadronsLogError.isActive(); + auto program = getModuleGraph().topoSort(); + + resetProfile(); + profile_.module.resize(getNModule()); + env().protectObjects(false); + GridLogMessage.Active(false); + HadronsLogMessage.Active(false); + HadronsLogError.Active(false); + for (auto it = program.rbegin(); it != program.rend(); ++it) + { + auto a = *it; + + if (profile_.module[a].empty()) + { + LOG(Debug) << "Profiling memory for module '" << module_[a].name + << "' (" << a << ")..." << std::endl; + memoryProfile(a); + env().freeAll(); + } + } + env().protectObjects(protect); + GridLogMessage.Active(gmsg); + HadronsLogMessage.Active(hmsg); + HadronsLogError.Active(err); + LOG(Debug) << "Memory profile:" << std::endl; + LOG(Debug) << "----------------" << std::endl; + for (unsigned int a = 0; a < profile_.module.size(); ++a) + { + LOG(Debug) << getModuleName(a) << " (" << a << ")" << std::endl; + for (auto &o: profile_.module[a]) + { + LOG(Debug) << "|__ " << env().getObjectName(o.first) << " (" + << sizeString(o.second) << ")" << std::endl; + } + LOG(Debug) << std::endl; + } + LOG(Debug) << "----------------" << std::endl; +} + +void VirtualMachine::resetProfile(void) +{ + profile_.module.clear(); + profile_.object.clear(); +} + +void VirtualMachine::resizeProfile(void) +{ + if (env().getMaxAddress() > profile_.object.size()) + { + MemoryPrint empty; + + empty.size = 0; + empty.module = -1; + profile_.object.resize(env().getMaxAddress(), empty); + } +} + +void VirtualMachine::updateProfile(const unsigned int address) +{ + resizeProfile(); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile_.object[a].module == -1)) + { + profile_.object[a].size = env().getObjectSize(a); + profile_.object[a].storage = env().getObjectStorage(a); + profile_.object[a].module = address; + profile_.module[address][a] = profile_.object[a].size; + if (env().getObjectModule(a) < 0) + { + env().setObjectModule(a, address); + } + } + } +} + +void VirtualMachine::cleanEnvironment(void) +{ + resizeProfile(); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().hasCreatedObject(a) and (profile_.object[a].module == -1)) + { + env().freeObject(a); + } + } +} + +void VirtualMachine::memoryProfile(const unsigned int address) +{ + auto m = getModule(address); + + LOG(Debug) << "Setting up module '" << m->getName() + << "' (" << address << ")..." << std::endl; + try + { + m->setup(); + updateProfile(address); + } + catch (Exceptions::Definition &) + { + cleanEnvironment(); + for (auto &in: m->getInput()) + { + memoryProfile(env().getObjectModule(in)); + } + for (auto &ref: m->getReference()) + { + memoryProfile(env().getObjectModule(ref)); + } + m->setup(); + updateProfile(address); + } +} + +void VirtualMachine::memoryProfile(const std::string name) +{ + memoryProfile(getModuleAddress(name)); +} + +// garbage collector /////////////////////////////////////////////////////////// +VirtualMachine::GarbageSchedule +VirtualMachine::makeGarbageSchedule(const Program &p) const +{ + GarbageSchedule freeProg; + + freeProg.resize(p.size()); + for (unsigned int a = 0; a < env().getMaxAddress(); ++a) + { + if (env().getObjectStorage(a) == Environment::Storage::temporary) + { + auto it = std::find(p.begin(), p.end(), env().getObjectModule(a)); + + if (it != p.end()) + { + freeProg[std::distance(p.begin(), it)].insert(a); + } + } + else if (env().getObjectStorage(a) == Environment::Storage::object) + { + auto pred = [a, this](const unsigned int b) + { + auto &in = module_[b].input; + auto it = std::find(in.begin(), in.end(), a); + + return (it != in.end()) or (b == env().getObjectModule(a)); + }; + auto it = std::find_if(p.rbegin(), p.rend(), pred); + if (it != p.rend()) + { + freeProg[std::distance(it, p.rend()) - 1].insert(a); + } + } + } + + return freeProg; +} + +// high-water memory function ////////////////////////////////////////////////// +VirtualMachine::Size VirtualMachine::memoryNeeded(const Program &p) +{ + const MemoryProfile &profile = getMemoryProfile(); + GarbageSchedule freep = makeGarbageSchedule(p); + Size current = 0, max = 0; + + for (unsigned int i = 0; i < p.size(); ++i) + { + for (auto &o: profile.module[p[i]]) + { + current += o.second; + } + max = std::max(current, max); + for (auto &o: freep[i]) + { + current -= profile.object[o].size; + } + } + + return max; +} + +// genetic scheduler /////////////////////////////////////////////////////////// +VirtualMachine::Program VirtualMachine::schedule(const GeneticPar &par) +{ + typedef GeneticScheduler Scheduler; + + auto graph = getModuleGraph(); + + //constrained topological sort using a genetic algorithm + LOG(Message) << "Scheduling computation..." << std::endl; + LOG(Message) << " #module= " << graph.size() << std::endl; + LOG(Message) << " population size= " << par.popSize << std::endl; + LOG(Message) << " max. generation= " << par.maxGen << std::endl; + LOG(Message) << " max. cst. generation= " << par.maxCstGen << std::endl; + LOG(Message) << " mutation rate= " << par.mutationRate << std::endl; + + unsigned int k = 0, gen, prevPeak, nCstPeak = 0; + std::random_device rd; + Scheduler::Parameters gpar; + + gpar.popSize = par.popSize; + gpar.mutationRate = par.mutationRate; + gpar.seed = rd(); + CartesianCommunicator::BroadcastWorld(0, &(gpar.seed), sizeof(gpar.seed)); + Scheduler::ObjFunc memPeak = [this](const Program &p)->Size + { + return memoryNeeded(p); + }; + Scheduler scheduler(graph, memPeak, gpar); + gen = 0; + do + { + LOG(Debug) << "Generation " << gen << ":" << std::endl; + scheduler.nextGeneration(); + if (gen != 0) + { + if (prevPeak == scheduler.getMinValue()) + { + nCstPeak++; + } + else + { + nCstPeak = 0; + } + } + + prevPeak = scheduler.getMinValue(); + if (gen % 10 == 0) + { + LOG(Iterative) << "Generation " << gen << ": " + << sizeString(scheduler.getMinValue()) << std::endl; + } + + gen++; + } while ((gen < par.maxGen) and (nCstPeak < par.maxCstGen)); + + return scheduler.getMinSchedule(); +} + +// general execution /////////////////////////////////////////////////////////// +#define BIG_SEP "===============" +#define SEP "---------------" +#define MEM_MSG(size) sizeString(size) + +void VirtualMachine::executeProgram(const Program &p) const +{ + Size memPeak = 0, sizeBefore, sizeAfter; + GarbageSchedule freeProg; + + // build garbage collection schedule + LOG(Debug) << "Building garbage collection schedule..." << std::endl; + freeProg = makeGarbageSchedule(p); + + // program execution + LOG(Debug) << "Executing program..." << std::endl; + for (unsigned int i = 0; i < p.size(); ++i) + { + // execute module + LOG(Message) << SEP << " Measurement step " << i + 1 << "/" + << p.size() << " (module '" << module_[p[i]].name + << "') " << SEP << std::endl; + (*module_[p[i]].data)(); + sizeBefore = env().getTotalSize(); + // print used memory after execution + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeBefore) + << std::endl; + if (sizeBefore > memPeak) + { + memPeak = sizeBefore; + } + // garbage collection for step i + LOG(Message) << "Garbage collection..." << std::endl; + for (auto &j: freeProg[i]) + { + env().freeObject(j); + } + // print used memory after garbage collection if necessary + sizeAfter = env().getTotalSize(); + if (sizeBefore != sizeAfter) + { + LOG(Message) << "Allocated objects: " << MEM_MSG(sizeAfter) + << std::endl; + } + else + { + LOG(Message) << "Nothing to free" << std::endl; + } + } +} + +void VirtualMachine::executeProgram(const std::vector &p) const +{ + Program pAddress; + + for (auto &n: p) + { + pAddress.push_back(getModuleAddress(n)); + } + executeProgram(pAddress); +} diff --git a/extras/Hadrons/VirtualMachine.hpp b/extras/Hadrons/VirtualMachine.hpp new file mode 100644 index 00000000..19a74f94 --- /dev/null +++ b/extras/Hadrons/VirtualMachine.hpp @@ -0,0 +1,207 @@ +/************************************************************************************* + +Grid physics library, www.github.com/paboyle/Grid + +Source file: extras/Hadrons/VirtualMachine.hpp + +Copyright (C) 2015-2018 + +Author: Antonin Portelli + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along +with this program; if not, write to the Free Software Foundation, Inc., +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#ifndef Hadrons_VirtualMachine_hpp_ +#define Hadrons_VirtualMachine_hpp_ + +#include +#include +#include + +BEGIN_HADRONS_NAMESPACE + +#define DEFINE_VM_ALIAS \ +inline VirtualMachine & vm(void) const\ +{\ + return VirtualMachine::getInstance();\ +} + +/****************************************************************************** + * Virtual machine for module execution * + ******************************************************************************/ +// forward declaration of Module +class ModuleBase; + +class VirtualMachine +{ + SINGLETON_DEFCTOR(VirtualMachine); +public: + typedef SITE_SIZE_TYPE Size; + typedef std::unique_ptr ModPt; + typedef std::vector> GarbageSchedule; + typedef std::vector Program; + struct MemoryPrint + { + Size size; + Environment::Storage storage; + int module; + }; + struct MemoryProfile + { + std::vector> module; + std::vector object; + }; + class GeneticPar: Serializable + { + public: + GeneticPar(void): + popSize{20}, maxGen{1000}, maxCstGen{100}, mutationRate{.1} {}; + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(GeneticPar, + unsigned int, popSize, + unsigned int, maxGen, + unsigned int, maxCstGen, + double , mutationRate); + }; +private: + struct ModuleInfo + { + const std::type_info *type{nullptr}; + std::string name; + ModPt data{nullptr}; + std::vector input; + size_t maxAllocated; + }; +public: + // trajectory counter + void setTrajectory(const unsigned int traj); + unsigned int getTrajectory(void) const; + // module management + void pushModule(ModPt &pt); + template + void createModule(const std::string name); + template + void createModule(const std::string name, + const typename M::Par &par); + void createModule(const std::string name, + const std::string type, + XmlReader &reader); + unsigned int getNModule(void) const; + ModuleBase * getModule(const unsigned int address) const; + ModuleBase * getModule(const std::string name) const; + template + M * getModule(const unsigned int address) const; + template + M * getModule(const std::string name) const; + unsigned int getModuleAddress(const std::string name) const; + std::string getModuleName(const unsigned int address) const; + std::string getModuleType(const unsigned int address) const; + std::string getModuleType(const std::string name) const; + std::string getModuleNamespace(const unsigned int address) const; + std::string getModuleNamespace(const std::string name) const; + bool hasModule(const unsigned int address) const; + bool hasModule(const std::string name) const; + // print VM content + void printContent(void) const; + // module graph (could be a const reference if topoSort was const) + Graph getModuleGraph(void); + // memory profile + const MemoryProfile &getMemoryProfile(void); + // garbage collector + GarbageSchedule makeGarbageSchedule(const Program &p) const; + // high-water memory function + Size memoryNeeded(const Program &p); + // genetic scheduler + Program schedule(const GeneticPar &par); + // general execution + void executeProgram(const Program &p) const; + void executeProgram(const std::vector &p) const; +private: + // environment shortcut + DEFINE_ENV_ALIAS; + // module graph + void makeModuleGraph(void); + // memory profile + void makeMemoryProfile(void); + void resetProfile(void); + void resizeProfile(void); + void updateProfile(const unsigned int address); + void cleanEnvironment(void); + void memoryProfile(const std::string name); + void memoryProfile(const unsigned int address); +private: + // general + unsigned int traj_; + // module and related maps + std::vector module_; + std::map moduleAddress_; + std::string currentModule_{""}; + // module graph + bool graphOutdated_{true}; + Graph graph_; + // memory profile + bool memoryProfileOutdated_{true}; + MemoryProfile profile_; +}; + +/****************************************************************************** + * VirtualMachine template implementation * + ******************************************************************************/ +// module management /////////////////////////////////////////////////////////// +template +void VirtualMachine::createModule(const std::string name) +{ + ModPt pt(new M(name)); + + pushModule(pt); +} + +template +void VirtualMachine::createModule(const std::string name, + const typename M::Par &par) +{ + ModPt pt(new M(name)); + + static_cast(pt.get())->setPar(par); + pushModule(pt); +} + +template +M * VirtualMachine::getModule(const unsigned int address) const +{ + if (auto *pt = dynamic_cast(getModule(address))) + { + return pt; + } + else + { + HADRON_ERROR(Definition, "module '" + module_[address].name + + "' does not have type " + typeid(M).name() + + "(has type: " + getModuleType(address) + ")"); + } +} + +template +M * VirtualMachine::getModule(const std::string name) const +{ + return getModule(getModuleAddress(name)); +} + +END_HADRONS_NAMESPACE + +#endif // Hadrons_VirtualMachine_hpp_ diff --git a/extras/Hadrons/modules.inc b/extras/Hadrons/modules.inc index 1ad4750a..d005caec 100644 --- a/extras/Hadrons/modules.inc +++ b/extras/Hadrons/modules.inc @@ -2,43 +2,47 @@ modules_cc =\ Modules/MContraction/WeakHamiltonianEye.cc \ Modules/MContraction/WeakNeutral4ptDisc.cc \ Modules/MContraction/WeakHamiltonianNonEye.cc \ + Modules/MGauge/Unit.cc \ + Modules/MGauge/StochEm.cc \ + Modules/MGauge/Random.cc \ Modules/MScalar/FreeProp.cc \ Modules/MScalar/ChargedProp.cc \ - Modules/MGauge/Random.cc \ - Modules/MGauge/StochEm.cc \ - Modules/MGauge/Unit.cc \ - Modules/MGauge/Load.cc + Modules/MIO/LoadNersc.cc modules_hpp =\ - Modules/MSolver/RBPrecCG.hpp \ - Modules/MContraction/WardIdentity.hpp \ - Modules/MContraction/Meson.hpp \ - Modules/MContraction/Gamma3pt.hpp \ - Modules/MContraction/DiscLoop.hpp \ - Modules/MContraction/WeakHamiltonianEye.hpp \ Modules/MContraction/Baryon.hpp \ + Modules/MContraction/Meson.hpp \ Modules/MContraction/WeakHamiltonian.hpp \ - Modules/MContraction/WeakNeutral4ptDisc.hpp \ Modules/MContraction/WeakHamiltonianNonEye.hpp \ + Modules/MContraction/DiscLoop.hpp \ + Modules/MContraction/WeakNeutral4ptDisc.hpp \ + Modules/MContraction/Gamma3pt.hpp \ + Modules/MContraction/WardIdentity.hpp \ + Modules/MContraction/WeakHamiltonianEye.hpp \ + Modules/MFermion/GaugeProp.hpp \ + Modules/MSource/SeqConservedSummed.hpp \ + Modules/MSource/SeqGamma.hpp \ + Modules/MSource/Point.hpp \ + Modules/MSource/Wall.hpp \ + Modules/MSource/Z2.hpp \ + Modules/MSource/SeqConserved.hpp \ + Modules/MSink/Smear.hpp \ + Modules/MSink/Point.hpp \ + Modules/MSolver/RBPrecCG.hpp \ + Modules/MGauge/Unit.hpp \ + Modules/MGauge/Random.hpp \ + Modules/MGauge/StochEm.hpp \ Modules/MUtilities/TestSeqGamma.hpp \ Modules/MUtilities/TestSeqConserved.hpp \ Modules/MLoop/NoiseLoop.hpp \ Modules/MScalar/FreeProp.hpp \ - Modules/MScalar/ChargedProp.hpp \ Modules/MScalar/Scalar.hpp \ - Modules/MSink/Point.hpp \ - Modules/MSink/Smear.hpp \ - Modules/MFermion/GaugeProp.hpp \ - Modules/MSource/SeqConservedSummed.hpp \ - Modules/MSource/Wall.hpp \ - Modules/MSource/Point.hpp \ - Modules/MSource/SeqConserved.hpp \ - Modules/MSource/SeqGamma.hpp \ - Modules/MSource/Z2.hpp \ - Modules/MGauge/Load.hpp \ - Modules/MGauge/StochEm.hpp \ - Modules/MGauge/Random.hpp \ - Modules/MGauge/Unit.hpp \ + Modules/MScalar/ChargedProp.hpp \ Modules/MAction/DWF.hpp \ - Modules/MAction/Wilson.hpp + Modules/MAction/Wilson.hpp \ + Modules/MScalarSUN/TrMag.hpp \ + Modules/MScalarSUN/TwoPoint.hpp \ + Modules/MScalarSUN/TrPhi.hpp \ + Modules/MIO/LoadNersc.hpp \ +Modules/MIO/LoadBinary.hpp diff --git a/lib/Makefile.am b/lib/Makefile.am index 6dd7899e..dc33e7cf 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -1,28 +1,18 @@ extra_sources= extra_headers= -if BUILD_COMMS_MPI - extra_sources+=communicator/Communicator_mpi.cc - extra_sources+=communicator/Communicator_base.cc -endif if BUILD_COMMS_MPI3 extra_sources+=communicator/Communicator_mpi3.cc extra_sources+=communicator/Communicator_base.cc -endif - -if BUILD_COMMS_MPIT - extra_sources+=communicator/Communicator_mpit.cc - extra_sources+=communicator/Communicator_base.cc -endif - -if BUILD_COMMS_SHMEM - extra_sources+=communicator/Communicator_shmem.cc - extra_sources+=communicator/Communicator_base.cc + extra_sources+=communicator/SharedMemoryMPI.cc + extra_sources+=communicator/SharedMemory.cc endif if BUILD_COMMS_NONE extra_sources+=communicator/Communicator_none.cc extra_sources+=communicator/Communicator_base.cc + extra_sources+=communicator/SharedMemoryNone.cc + extra_sources+=communicator/SharedMemory.cc endif if BUILD_HDF5 diff --git a/lib/algorithms/CoarsenedMatrix.h b/lib/algorithms/CoarsenedMatrix.h index c2910151..8af8d7ac 100644 --- a/lib/algorithms/CoarsenedMatrix.h +++ b/lib/algorithms/CoarsenedMatrix.h @@ -103,29 +103,32 @@ namespace Grid { GridBase *CoarseGrid; GridBase *FineGrid; std::vector > subspace; + int checkerboard; - Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid) : - CoarseGrid(_CoarseGrid), + Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) : + CoarseGrid(_CoarseGrid), FineGrid(_FineGrid), - subspace(nbasis,_FineGrid) + subspace(nbasis,_FineGrid), + checkerboard(_checkerboard) { }; void Orthogonalise(void){ CoarseScalar InnerProd(CoarseGrid); + std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"< pokey(CoarseGrid); - - for(int i=0;ioSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ eProj._odata[ss](i)=CComplex(1.0); } eProj=eProj - iProj; @@ -137,6 +140,7 @@ namespace Grid { blockProject(CoarseVec,FineVec,subspace); } void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){ + FineVec.checkerboard = subspace[0].checkerboard; blockPromote(CoarseVec,FineVec,subspace); } void CreateSubspaceRandom(GridParallelRNG &RNG){ @@ -147,6 +151,7 @@ namespace Grid { Orthogonalise(); } + /* virtual void CreateSubspaceLanczos(GridParallelRNG &RNG,LinearOperatorBase &hermop,int nn=nbasis) { // Run a Lanczos with sloppy convergence @@ -195,7 +200,7 @@ namespace Grid { std::cout << GridLogMessage <<"subspace["< &hermop,int nn=nbasis) { RealD scale; diff --git a/lib/algorithms/LinearOperator.h b/lib/algorithms/LinearOperator.h index 6cb77296..26746e6e 100644 --- a/lib/algorithms/LinearOperator.h +++ b/lib/algorithms/LinearOperator.h @@ -162,15 +162,10 @@ namespace Grid { _Mat.M(in,out); } void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ - ComplexD dot; - _Mat.M(in,out); - dot= innerProduct(in,out); - n1=real(dot); - - dot = innerProduct(out,out); - n2=real(dot); + ComplexD dot= innerProduct(in,out); n1=real(dot); + n2=norm2(out); } void HermOp(const Field &in, Field &out){ _Mat.M(in,out); @@ -192,10 +187,10 @@ namespace Grid { ni=Mpc(in,tmp); no=MpcDag(tmp,out); } - void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ MpcDagMpc(in,out,n1,n2); } - void HermOp(const Field &in, Field &out){ + virtual void HermOp(const Field &in, Field &out){ RealD n1,n2; HermOpAndNorm(in,out,n1,n2); } @@ -212,7 +207,6 @@ namespace Grid { void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); } - }; template class SchurDiagMooeeOperator : public SchurOperatorBase { @@ -270,7 +264,6 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; - template class SchurDiagTwoOperator : public SchurOperatorBase { protected: @@ -299,6 +292,59 @@ namespace Grid { return axpy_norm(out,-1.0,tmp,in); } }; + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Left handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) psi = eta --> ( 1 - Moo^-1 Moe Mee^-1 Meo ) psi = Moo^-1 eta + // Right handed Moo^-1 ; (Moo - Moe Mee^-1 Meo) Moo^-1 Moo psi = eta --> ( 1 - Moe Mee^-1 Meo ) Moo^-1 phi=eta ; psi = Moo^-1 phi + /////////////////////////////////////////////////////////////////////////////////////////////////// + template using SchurDiagOneRH = SchurDiagTwoOperator ; + template using SchurDiagOneLH = SchurDiagOneOperator ; + /////////////////////////////////////////////////////////////////////////////////////////////////// + // Staggered use + /////////////////////////////////////////////////////////////////////////////////////////////////// + template + class SchurStaggeredOperator : public SchurOperatorBase { + protected: + Matrix &_Mat; + public: + SchurStaggeredOperator (Matrix &Mat): _Mat(Mat){}; + virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ + GridLogIterative.TimingMode(1); + std::cout << GridLogIterative << " HermOpAndNorm "< using SchurStagOperator = SchurStaggeredOperator; ///////////////////////////////////////////////////////////// @@ -314,6 +360,14 @@ namespace Grid { virtual void operator() (const Field &in, Field &out) = 0; }; + template class IdentityLinearFunction : public LinearFunction { + public: + void operator() (const Field &in, Field &out){ + out = in; + }; + }; + + ///////////////////////////////////////////////////////////// // Base classes for Multishift solvers for operators ///////////////////////////////////////////////////////////// @@ -336,6 +390,64 @@ namespace Grid { }; */ + //////////////////////////////////////////////////////////////////////////////////////////// + // Hermitian operator Linear function and operator function + //////////////////////////////////////////////////////////////////////////////////////////// + template + class HermOpOperatorFunction : public OperatorFunction { + void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { + Linop.HermOp(in,out); + }; + }; + + template + class PlainHermOp : public LinearFunction { + public: + LinearOperatorBase &_Linop; + + PlainHermOp(LinearOperatorBase& linop) : _Linop(linop) + {} + + void operator()(const Field& in, Field& out) { + _Linop.HermOp(in,out); + } + }; + + template + class FunctionHermOp : public LinearFunction { + public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + + FunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop) + : _poly(poly), _Linop(linop) {}; + + void operator()(const Field& in, Field& out) { + _poly(_Linop,in,out); + } + }; + + template + class Polynomial : public OperatorFunction { + private: + std::vector Coeffs; + public: + Polynomial(std::vector &_Coeffs) : Coeffs(_Coeffs) { }; + + // Implement the required interface + void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { + + Field AtoN(in._grid); + Field Mtmp(in._grid); + AtoN = in; + out = AtoN*Coeffs[0]; + for(int n=1;n Author: paboyle +Author: Christoph Lehner This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -33,41 +34,12 @@ Author: paboyle namespace Grid { - //////////////////////////////////////////////////////////////////////////////////////////// - // Simple general polynomial with user supplied coefficients - //////////////////////////////////////////////////////////////////////////////////////////// - template - class HermOpOperatorFunction : public OperatorFunction { - void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { - Linop.HermOp(in,out); - }; - }; - - template - class Polynomial : public OperatorFunction { - private: - std::vector Coeffs; - public: - Polynomial(std::vector &_Coeffs) : Coeffs(_Coeffs) { }; - - // Implement the required interface - void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { - - Field AtoN(in._grid); - Field Mtmp(in._grid); - AtoN = in; - out = AtoN*Coeffs[0]; -// std::cout <<"Poly in " <::quiet_NaN(); + } + // Implement the required interface void operator() (LinearOperatorBase &Linop, const Field &in, Field &out) { diff --git a/lib/algorithms/iterative/ConjugateGradient.h b/lib/algorithms/iterative/ConjugateGradient.h index 5c968e04..0d4e51c7 100644 --- a/lib/algorithms/iterative/ConjugateGradient.h +++ b/lib/algorithms/iterative/ConjugateGradient.h @@ -78,12 +78,12 @@ class ConjugateGradient : public OperatorFunction { cp = a; ssq = norm2(src); - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: guess " << guess << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: src " << ssq << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: mp " << d << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: mmp " << b << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: cp,r " << cp << std::endl; - std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradient: p " << a << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: guess " << guess << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: src " << ssq << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mp " << d << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: mmp " << b << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: cp,r " << cp << std::endl; + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: p " << a << std::endl; RealD rsq = Tolerance * Tolerance * ssq; @@ -92,7 +92,7 @@ class ConjugateGradient : public OperatorFunction { return; } - std::cout << GridLogIterative << std::setprecision(4) + std::cout << GridLogIterative << std::setprecision(8) << "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl; GridStopWatch LinalgTimer; diff --git a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h index a8723f32..7b85c095 100644 --- a/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h +++ b/lib/algorithms/iterative/ImplicitlyRestartedLanczos.h @@ -7,8 +7,9 @@ Copyright (C) 2015 Author: Peter Boyle -Author: Chulwoo Jung -Author: Guido Cossu +Author: paboyle +Author: Chulwoo Jung +Author: Christoph Lehner This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -27,125 +28,288 @@ Author: Guido Cossu See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#ifndef GRID_IRL_H -#define GRID_IRL_H +#ifndef GRID_BIRL_H +#define GRID_BIRL_H #include //memset +//#include +#include -namespace Grid { +namespace Grid { - enum IRLdiagonalisation { - IRLdiagonaliseWithDSTEGR, - IRLdiagonaliseWithQR, - IRLdiagonaliseWithEigen - }; - -//////////////////////////////////////////////////////////////////////////////// -// Helper class for sorting the evalues AND evectors by Field -// Use pointer swizzle on vectors -//////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////// + // Move following 100 LOC to lattice/Lattice_basis.h + //////////////////////////////////////////////////////// template -class SortEigen { - private: - static bool less_lmd(RealD left,RealD right){ - return left > right; - } - static bool less_pair(std::pair& left, - std::pair& right){ - return left.first > (right.first); - } - - public: - void push(std::vector& lmd,std::vector& evec,int N) { - - //////////////////////////////////////////////////////////////////////// - // PAB: FIXME: VERY VERY VERY wasteful: takes a copy of the entire vector set. - // : The vector reorder should be done by pointer swizzle somehow - //////////////////////////////////////////////////////////////////////// - std::vector cpy(lmd.size(),evec[0]._grid); - for(int i=0;i > emod(lmd.size()); +void basisOrthogonalize(std::vector &basis,Field &w,int k) +{ + for(int j=0; j(lmd[i],&cpy[i]); - - partial_sort(emod.begin(),emod.begin()+N,emod.end(),less_pair); - - typename std::vector >::iterator it = emod.begin(); - for(int i=0;ifirst; - evec[i]=*(it->second); - ++it; +template +void basisRotate(std::vector &basis,Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) +{ + typedef typename Field::vector_object vobj; + GridBase* grid = basis[0]._grid; + + parallel_region + { + std::vector < vobj > B(Nm); // Thread private + + parallel_for_internal(int ss=0;ss < grid->oSites();ss++){ + for(int j=j0; j& lmd,int N) { - std::partial_sort(lmd.begin(),lmd.begin()+N,lmd.end(),less_lmd); +} + +// Extract a single rotated vector +template +void basisRotateJ(Field &result,std::vector &basis,Eigen::MatrixXd& Qt,int j, int k0,int k1,int Nm) +{ + typedef typename Field::vector_object vobj; + GridBase* grid = basis[0]._grid; + + result.checkerboard = basis[0].checkerboard; + parallel_for(int ss=0;ss < grid->oSites();ss++){ + vobj B = zero; + for(int k=k0; k fabs(thrs); +} + +template +void basisReorderInPlace(std::vector &_v,std::vector& sort_vals, std::vector& idx) +{ + int vlen = idx.size(); + + assert(vlen>=1); + assert(vlen<=sort_vals.size()); + assert(vlen<=_v.size()); + + for (size_t i=0;ii for which _vnew[j] = _vold[i], + // track the move idx[j] => idx[i] + // track the move idx[i] => i + ////////////////////////////////////// + size_t j; + for (j=i;j i); assert(j!=idx.size()); assert(idx[j]==i); + + std::swap(_v[i]._odata,_v[idx[i]]._odata); // should use vector move constructor, no data copy + std::swap(sort_vals[i],sort_vals[idx[i]]); + + idx[j] = idx[i]; + idx[i] = i; + } } -}; +} + +inline std::vector basisSortGetIndex(std::vector& sort_vals) +{ + std::vector idx(sort_vals.size()); + std::iota(idx.begin(), idx.end(), 0); + + // sort indexes based on comparing values in v + std::sort(idx.begin(), idx.end(), [&sort_vals](int i1, int i2) { + return ::fabs(sort_vals[i1]) < ::fabs(sort_vals[i2]); + }); + return idx; +} + +template +void basisSortInPlace(std::vector & _v,std::vector& sort_vals, bool reverse) +{ + std::vector idx = basisSortGetIndex(sort_vals); + if (reverse) + std::reverse(idx.begin(), idx.end()); + + basisReorderInPlace(_v,sort_vals,idx); +} + +// PAB: faster to compute the inner products first then fuse loops. +// If performance critical can improve. +template +void basisDeflate(const std::vector &_v,const std::vector& eval,const Field& src_orig,Field& result) { + result = zero; + assert(_v.size()==eval.size()); + int N = (int)_v.size(); + for (int i=0;i class ImplicitlyRestartedLanczosTester +{ + public: + virtual int TestConvergence(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0; + virtual int ReconstructEval(int j,RealD resid,Field &evec, RealD &eval,RealD evalMaxApprox)=0; +}; + +enum IRLdiagonalisation { + IRLdiagonaliseWithDSTEGR, + IRLdiagonaliseWithQR, + IRLdiagonaliseWithEigen +}; + +template class ImplicitlyRestartedLanczosHermOpTester : public ImplicitlyRestartedLanczosTester +{ + public: + LinearFunction &_HermOp; + ImplicitlyRestartedLanczosHermOpTester(LinearFunction &HermOp) : _HermOp(HermOp) { }; + int ReconstructEval(int j,RealD resid,Field &B, RealD &eval,RealD evalMaxApprox) + { + return TestConvergence(j,resid,B,eval,evalMaxApprox); + } + int TestConvergence(int j,RealD eresid,Field &B, RealD &eval,RealD evalMaxApprox) + { + Field v(B); + RealD eval_poly = eval; + // Apply operator + _HermOp(B,v); + + RealD vnum = real(innerProduct(B,v)); // HermOp. + RealD vden = norm2(B); + RealD vv0 = norm2(v); + eval = vnum/vden; + v -= eval*B; + + RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); + + std::cout.precision(13); + std::cout< class ImplicitlyRestartedLanczos { - -private: - - int MaxIter; // Max iterations - int Nstop; // Number of evecs checked for convergence - int Nk; // Number of converged sought - int Nm; // Nm -- total number of vectors - RealD eresid; + private: + const RealD small = 1.0e-8; + int MaxIter; + int MinRestart; // Minimum number of restarts; only check for convergence after + int Nstop; // Number of evecs checked for convergence + int Nk; // Number of converged sought + // int Np; // Np -- Number of spare vecs in krylov space // == Nm - Nk + int Nm; // Nm -- total number of vectors IRLdiagonalisation diagonalisation; - //////////////////////////////////// + int orth_period; + + RealD OrthoTime; + RealD eresid, betastp; + //////////////////////////////// // Embedded objects - //////////////////////////////////// - SortEigen _sort; - LinearOperatorBase &_Linop; - OperatorFunction &_poly; - + //////////////////////////////// + LinearFunction &_PolyOp; + LinearFunction &_HermOp; + ImplicitlyRestartedLanczosTester &_Tester; + // Default tester provided (we need a ref to something in default case) + ImplicitlyRestartedLanczosHermOpTester SimpleTester; ///////////////////////// // Constructor ///////////////////////// + public: - ImplicitlyRestartedLanczos(LinearOperatorBase &Linop, // op - OperatorFunction & poly, // polynomial - int _Nstop, // really sought vecs - int _Nk, // sought vecs - int _Nm, // total vecs - RealD _eresid, // resid in lmd deficit - int _MaxIter, // Max iterations - IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen ) : - _Linop(Linop), _poly(poly), - Nstop(_Nstop), Nk(_Nk), Nm(_Nm), - eresid(_eresid), MaxIter(_MaxIter), - diagonalisation(_diagonalisation) - { }; + ////////////////////////////////////////////////////////////////// + // PAB: + ////////////////////////////////////////////////////////////////// + // Too many options & knobs. + // Eliminate: + // orth_period + // betastp + // MinRestart + // + // Do we really need orth_period + // What is the theoretical basis & guarantees of betastp ? + // Nstop=Nk viable? + // MinRestart avoidable with new convergence test? + // Could cut to PolyOp, HermOp, Tester, Nk, Nm, resid, maxiter (+diagonalisation) + // HermOp could be eliminated if we dropped the Power method for max eval. + // -- also: The eval, eval2, eval2_copy stuff is still unnecessarily unclear + ////////////////////////////////////////////////////////////////// + ImplicitlyRestartedLanczos(LinearFunction & PolyOp, + LinearFunction & HermOp, + ImplicitlyRestartedLanczosTester & Tester, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + int _MaxIter, // Max iterations + RealD _betastp=0.0, // if beta(k) < betastp: converged + int _MinRestart=1, int _orth_period = 1, + IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : + SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(Tester), + Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), + eresid(_eresid), betastp(_betastp), + MaxIter(_MaxIter) , MinRestart(_MinRestart), + orth_period(_orth_period), diagonalisation(_diagonalisation) { }; + + ImplicitlyRestartedLanczos(LinearFunction & PolyOp, + LinearFunction & HermOp, + int _Nstop, // sought vecs + int _Nk, // sought vecs + int _Nm, // spare vecs + RealD _eresid, // resid in lmdue deficit + int _MaxIter, // Max iterations + RealD _betastp=0.0, // if beta(k) < betastp: converged + int _MinRestart=1, int _orth_period = 1, + IRLdiagonalisation _diagonalisation= IRLdiagonaliseWithEigen) : + SimpleTester(HermOp), _PolyOp(PolyOp), _HermOp(HermOp), _Tester(SimpleTester), + Nstop(_Nstop) , Nk(_Nk), Nm(_Nm), + eresid(_eresid), betastp(_betastp), + MaxIter(_MaxIter) , MinRestart(_MinRestart), + orth_period(_orth_period), diagonalisation(_diagonalisation) { }; //////////////////////////////// // Helpers //////////////////////////////// - static RealD normalise(Field& v) + template static RealD normalise(T& v) { RealD nn = norm2(v); nn = sqrt(nn); v = v * (1.0/nn); return nn; } - - void orthogonalize(Field& w, std::vector& evec, int k) + + void orthogonalize(Field& w, std::vector& evec,int k) { - typedef typename Field::scalar_type MyComplex; - MyComplex ip; - - for(int j=0; j& eval, std::vector& evec, const Field& src, int& Nconv) + void calc(std::vector& eval, std::vector& evec, const Field& src, int& Nconv, bool reverse=false) { + GridBase *grid = src._grid; + assert(grid == evec[0]._grid); - GridBase *grid = evec[0]._grid; - assert(grid == src._grid); - - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout << GridLogMessage <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl; - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout << GridLogMessage <<" -- seek Nk = " << Nk <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- total Nm = " << Nm <<" vectors"<< std::endl; - std::cout << GridLogMessage <<" -- size of eval = " << eval.size() << std::endl; - std::cout << GridLogMessage <<" -- size of evec = " << evec.size() << std::endl; + GridLogIRL.TimingMode(1); + std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; + std::cout << GridLogIRL <<" ImplicitlyRestartedLanczos::calc() starting iteration 0 / "<< MaxIter<< std::endl; + std::cout << GridLogIRL <<"**************************************************************************"<< std::endl; + std::cout << GridLogIRL <<" -- seek Nk = " << Nk <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- accept Nstop = " << Nstop <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- total Nm = " << Nm <<" vectors"<< std::endl; + std::cout << GridLogIRL <<" -- size of eval = " << eval.size() << std::endl; + std::cout << GridLogIRL <<" -- size of evec = " << evec.size() << std::endl; if ( diagonalisation == IRLdiagonaliseWithDSTEGR ) { - std::cout << GridLogMessage << "Diagonalisation is DSTEGR "< lme(Nm); std::vector lme2(Nm); std::vector eval2(Nm); + std::vector eval2_copy(Nm); + Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); - Eigen::MatrixXd Qt = Eigen::MatrixXd::Zero(Nm,Nm); - - std::vector Iconv(Nm); - std::vector B(Nm,grid); // waste of space replicating - Field f(grid); Field v(grid); - int k1 = 1; int k2 = Nk; - - Nconv = 0; - RealD beta_k; + + Nconv = 0; // Set initial vector evec[0] = src; - std::cout << GridLogMessage <<"norm2(src)= " << norm2(src)<()); + std::cout<0); + + basisRotate(evec,Qt,k1-1,k2+1,0,Nm,Nm); /// big constraint on the basis + std::cout<= MinRestart) { - if( Nconv>=Nstop ){ - goto converged; - } - } // end of iter loop - - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; - std::cout<< GridLogError <<" ImplicitlyRestartedLanczos::calc() NOT converged."; - std::cout << GridLogMessage <<"**************************************************************************"<< std::endl; + std::cout << GridLogIRL << "Test convergence: rotate subset of vectors to test convergence " << std::endl; + + Field B(grid); B.checkerboard = evec[0].checkerboard; + + // power of two search pattern; not every evalue in eval2 is assessed. + for(int jj = 1; jj<=Nstop; jj*=2){ + int j = Nstop-jj; + RealD e = eval2_copy[j]; // Discard the evalue + basisRotateJ(B,evec,Qt,j,0,Nk,Nm); + if( _Tester.TestConvergence(j,eresid,B,e,evalMaxApprox) ) { + if ( j > Nconv ) { + Nconv=j+1; + jj=Nstop; // Terminate the scan + } + } + } + // Do evec[0] for good measure + { + int j=0; + RealD e = eval2_copy[0]; + basisRotateJ(B,evec,Qt,j,0,Nk,Nm); + _Tester.TestConvergence(j,eresid,B,e,evalMaxApprox); + } + // test if we converged, if so, terminate + std::cout<= "<=Nstop || beta_k < betastp){ + if( Nconv>=Nstop){ + goto converged; + } + + } else { + std::cout << GridLogIRL << "iter < MinRestart: do not yet test for convergence\n"; + } // end of iter loop + } + + std::cout<0) w -= lme[k-1] * evec[k-1]; - - ComplexD zalph = innerProduct(evec[k],w); // 4. αk:=(wk,vk) + + ComplexD zalph = innerProduct(evec_k,w); // 4. αk:=(wk,vk) RealD alph = real(zalph); - - w = w - alph * evec[k];// 5. wk:=wk−αkvk - + + w = w - alph * evec_k;// 5. wk:=wk−αkvk + RealD beta = normalise(w); // 6. βk+1 := ∥wk∥2. If βk+1 = 0 then Stop // 7. vk+1 := wk/βk+1 - + lmd[k] = alph; lme[k] = beta; - - if ( k > 0 ) orthogonalize(w,evec,k); // orthonormalise - if ( k < Nm-1) evec[k+1] = w; - - if ( beta < tiny ) std::cout << GridLogMessage << " beta is tiny "<0 && k % orth_period == 0) { + orthogonalize(w,evec,k); // orthonormalise + std::cout<& lmd, std::vector& lme, int Nk, int Nm, Eigen::MatrixXd & Qt, // Nm x Nm @@ -404,11 +632,11 @@ private: } } } - /////////////////////////////////////////////////////////////////////////// - // File could end here if settle on Eigen ??? - /////////////////////////////////////////////////////////////////////////// - void qr_decomp(std::vector& lmd, // Nm + /////////////////////////////////////////////////////////////////////////// + // File could end here if settle on Eigen ??? !!! + /////////////////////////////////////////////////////////////////////////// + void QR_decomp(std::vector& lmd, // Nm std::vector& lme, // Nm int Nk, int Nm, // Nk, Nm Eigen::MatrixXd& Qt, // Nm x Nm matrix @@ -575,51 +803,50 @@ void diagonalize_lapack(std::vector& lmd, #endif } - void diagonalize_QR(std::vector& lmd, std::vector& lme, - int Nk, int Nm, - Eigen::MatrixXd & Qt, - GridBase *grid) - { - int Niter = 100*Nm; - int kmin = 1; - int kmax = Nk; - - // (this should be more sophisticated) - for(int iter=0; iter= kmin; --j){ - RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); - if(fabs(lme[j-1])+dds > dds){ - kmax = j+1; - goto continued; - } - } - Niter = iter; - return; - - continued: - for(int j=0; j dds){ - kmin = j+1; - break; - } +void diagonalize_QR(std::vector& lmd, std::vector& lme, + int Nk, int Nm, + Eigen::MatrixXd & Qt, + GridBase *grid) +{ + int QRiter = 100*Nm; + int kmin = 1; + int kmax = Nk; + + // (this should be more sophisticated) + for(int iter=0; iter= kmin; --j){ + RealD dds = fabs(lmd[j-1])+fabs(lmd[j]); + if(fabs(lme[j-1])+dds > dds){ + kmax = j+1; + goto continued; + } + } + QRiter = iter; + return; + + continued: + for(int j=0; j dds){ + kmin = j+1; + break; } } - std::cout << GridLogError << "[QL method] Error - Too many iteration: "< +Author: paboyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#ifndef GRID_LOCAL_COHERENCE_IRL_H +#define GRID_LOCAL_COHERENCE_IRL_H +namespace Grid { +struct LanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LanczosParams, + ChebyParams, Cheby,/*Chebyshev*/ + int, Nstop, /*Vecs in Lanczos must converge Nstop < Nk < Nm*/ + int, Nk, /*Vecs in Lanczos seek converge*/ + int, Nm, /*Total vecs in Lanczos include restart*/ + RealD, resid, /*residual*/ + int, MaxIt, + RealD, betastp, /* ? */ + int, MinRes); // Must restart +}; + +struct LocalCoherenceLanczosParams : Serializable { + public: + GRID_SERIALIZABLE_CLASS_MEMBERS(LocalCoherenceLanczosParams, + bool, doFine, + bool, doFineRead, + bool, doCoarse, + bool, doCoarseRead, + LanczosParams, FineParams, + LanczosParams, CoarseParams, + ChebyParams, Smoother, + RealD , coarse_relax_tol, + std::vector, blockSize, + std::string, config, + std::vector < std::complex >, omega, + RealD, mass, + RealD, M5); +}; + +// Duplicate functionality; ProjectedFunctionHermOp could be used with the trivial function +template +class ProjectedHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedHermOp(LinearOperatorBase& linop, Aggregation &aggregate) : + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + FineField fin(FineGrid); + FineField fout(FineGrid); + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ProjectedFunctionHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, + Aggregation &aggregate) : + _poly(poly), + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + + FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; + FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; + + _Aggregate.PromoteFromSubspace(in,fin); std::cout< +class ImplicitlyRestartedLanczosSmoothedTester : public ImplicitlyRestartedLanczosTester > > +{ + public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearFunction & _Poly; + OperatorFunction & _smoother; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + RealD _coarse_relax_tol; + ImplicitlyRestartedLanczosSmoothedTester(LinearFunction &Poly, + OperatorFunction &smoother, + LinearOperatorBase &Linop, + Aggregation &Aggregate, + RealD coarse_relax_tol=5.0e3) + : _smoother(smoother), _Linop(Linop),_Aggregate(Aggregate), _Poly(Poly), _coarse_relax_tol(coarse_relax_tol) { }; + + int TestConvergence(int j,RealD eresid,CoarseField &B, RealD &eval,RealD evalMaxApprox) + { + CoarseField v(B); + RealD eval_poly = eval; + // Apply operator + _Poly(B,v); + + RealD vnum = real(innerProduct(B,v)); // HermOp. + RealD vden = norm2(B); + RealD vv0 = norm2(v); + eval = vnum/vden; + v -= eval*B; + + RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0); + + std::cout.precision(13); + std::cout< nbasis ) eresid = eresid*_coarse_relax_tol; + if( (vv +class LocalCoherenceLanczos +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice CoarseField; + typedef Lattice FineField; + +protected: + GridBase *_CoarseGrid; + GridBase *_FineGrid; + int _checkerboard; + LinearOperatorBase & _FineOp; + + // FIXME replace Aggregation with vector of fine; the code reuse is too small for + // the hassle and complexity of cross coupling. + Aggregation _Aggregate; + std::vector evals_fine; + std::vector evals_coarse; + std::vector evec_coarse; +public: + LocalCoherenceLanczos(GridBase *FineGrid, + GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) : + _CoarseGrid(CoarseGrid), + _FineGrid(FineGrid), + _Aggregate(CoarseGrid,FineGrid,checkerboard), + _FineOp(FineOp), + _checkerboard(checkerboard) + { + evals_fine.resize(0); + evals_coarse.resize(0); + }; + void Orthogonalise(void ) { _Aggregate.Orthogonalise(); } + + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = ::sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void fakeFine(void) + { + int Nk = nbasis; + _Aggregate.subspace.resize(Nk,_FineGrid); + _Aggregate.subspace[0]=1.0; + _Aggregate.subspace[0].checkerboard=_checkerboard; + normalise(_Aggregate.subspace[0]); + PlainHermOp Op(_FineOp); + for(int k=1;k Op(_FineOp); + ImplicitlyRestartedLanczosHermOpTester SimpleTester(Op); + for(int k=0;k ChebySmooth(cheby_smooth); + ProjectedFunctionHermOp ChebyOp (ChebySmooth,_FineOp,_Aggregate); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); + + for(int k=0;k Cheby(cheby_parms); + FunctionHermOp ChebyOp(Cheby,_FineOp); + PlainHermOp Op(_FineOp); + + evals_fine.resize(Nm); + _Aggregate.subspace.resize(Nm,_FineGrid); + + ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + + FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; + + int Nconv; + IRL.calc(evals_fine,_Aggregate.subspace,src,Nconv,false); + + // Shrink down to number saved + assert(Nstop>=nbasis); + assert(Nconv>=nbasis); + evals_fine.resize(nbasis); + _Aggregate.subspace.resize(nbasis,_FineGrid); + } + void calcCoarse(ChebyParams cheby_op,ChebyParams cheby_smooth,RealD relax, + int Nstop, int Nk, int Nm,RealD resid, + RealD MaxIt, RealD betastp, int MinRes) + { + Chebyshev Cheby(cheby_op); + ProjectedHermOp Op(_FineOp,_Aggregate); + ProjectedFunctionHermOp ChebyOp (Cheby,_FineOp,_Aggregate); + ////////////////////////////////////////////////////////////////////////////////////////////////// + // create a smoother and see if we can get a cheap convergence test and smooth inside the IRL + ////////////////////////////////////////////////////////////////////////////////////////////////// + + Chebyshev ChebySmooth(cheby_smooth); + ImplicitlyRestartedLanczosSmoothedTester ChebySmoothTester(ChebyOp,ChebySmooth,_FineOp,_Aggregate,relax); + + evals_coarse.resize(Nm); + evec_coarse.resize(Nm,_CoarseGrid); + + CoarseField src(_CoarseGrid); src=1.0; + + ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,ChebySmoothTester,Nstop,Nk,Nm,resid,MaxIt,betastp,MinRes); + int Nconv=0; + IRL.calc(evals_coarse,evec_coarse,src,Nconv,false); + assert(Nconv>=Nstop); + evals_coarse.resize(Nstop); + evec_coarse.resize (Nstop,_CoarseGrid); + for (int i=0;i using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve; + /////////////////////////////////////////////////////////////////////////////////////////////////////// // Take a matrix and form a Red Black solver calling a Herm solver // Use of RB info prevents making SchurRedBlackSolve conform to standard interface @@ -76,12 +184,10 @@ namespace Grid { ///////////////////////////////////////////////////// // Wrap the usual normal equations Schur trick ///////////////////////////////////////////////////// - SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver) : - _HermitianRBSolver(HermitianRBSolver) - { - CBfactorise=0; - }; - + SchurRedBlackDiagMooeeSolve(OperatorFunction &HermitianRBSolver,int cb=0) : _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=cb; + }; template void operator() (Matrix & _Matrix,const Field &in, Field &out){ @@ -141,5 +247,166 @@ namespace Grid { } }; + + /////////////////////////////////////////////////////////////////////////////////////////////////////// + // Take a matrix and form a Red Black solver calling a Herm solver + // Use of RB info prevents making SchurRedBlackSolve conform to standard interface + /////////////////////////////////////////////////////////////////////////////////////////////////////// + template class SchurRedBlackDiagTwoSolve { + private: + OperatorFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoSolve(OperatorFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout< class SchurRedBlackDiagTwoMixed { + private: + LinearFunction & _HermitianRBSolver; + int CBfactorise; + public: + + ///////////////////////////////////////////////////// + // Wrap the usual normal equations Schur trick + ///////////////////////////////////////////////////// + SchurRedBlackDiagTwoMixed(LinearFunction &HermitianRBSolver) : + _HermitianRBSolver(HermitianRBSolver) + { + CBfactorise=0; + }; + + template + void operator() (Matrix & _Matrix,const Field &in, Field &out){ + + // FIXME CGdiagonalMee not implemented virtual function + // FIXME use CBfactorise to control schur decomp + GridBase *grid = _Matrix.RedBlackGrid(); + GridBase *fgrid= _Matrix.Grid(); + + SchurDiagTwoOperator _HermOpEO(_Matrix); + + Field src_e(grid); + Field src_o(grid); + Field sol_e(grid); + Field sol_o(grid); + Field tmp(grid); + Field Mtmp(grid); + Field resid(fgrid); + + pickCheckerboard(Even,src_e,in); + pickCheckerboard(Odd ,src_o,in); + pickCheckerboard(Even,sol_e,out); + pickCheckerboard(Odd ,sol_o,out); + + ///////////////////////////////////////////////////// + // src_o = Mdag * (source_o - Moe MeeInv source_e) + ///////////////////////////////////////////////////// + _Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even); + _Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd); + tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd); + + // get the right MpcDag + _HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd); + + ////////////////////////////////////////////////////////////// + // Call the red-black solver + ////////////////////////////////////////////////////////////// + std::cout<= 1024 && s < 7) + { + s++; + count /= 1024; + } + if (count - floor(count) == 0.0) + { + snprintf(buf, bufSize, "%d %sB", (int)count, suffixes[s]); + } + else + { + snprintf(buf, bufSize, "%.1f %sB", count, suffixes[s]); + } + + return std::string(buf); +} + } diff --git a/lib/allocator/AlignedAllocator.h b/lib/allocator/AlignedAllocator.h index 62579587..3b27aec9 100644 --- a/lib/allocator/AlignedAllocator.h +++ b/lib/allocator/AlignedAllocator.h @@ -63,6 +63,64 @@ namespace Grid { static void *Lookup(size_t bytes) ; }; + + std::string sizeString(size_t bytes); + + struct MemoryStats + { + size_t totalAllocated{0}, maxAllocated{0}, + currentlyAllocated{0}, totalFreed{0}; + }; + + class MemoryProfiler + { + public: + static MemoryStats *stats; + static bool debug; + }; + + #define memString(bytes) std::to_string(bytes) + " (" + sizeString(bytes) + ")" + #define profilerDebugPrint \ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + std::cout << GridLogDebug << "[Memory debug] Stats " << MemoryProfiler::stats << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] total : " << memString(s->totalAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] max : " << memString(s->maxAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] current: " << memString(s->currentlyAllocated) \ + << std::endl;\ + std::cout << GridLogDebug << "[Memory debug] freed : " << memString(s->totalFreed) \ + << std::endl;\ + } + + #define profilerAllocate(bytes)\ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + s->totalAllocated += (bytes);\ + s->currentlyAllocated += (bytes);\ + s->maxAllocated = std::max(s->maxAllocated, s->currentlyAllocated);\ + }\ + if (MemoryProfiler::debug)\ + {\ + std::cout << GridLogDebug << "[Memory debug] allocating " << memString(bytes) << std::endl;\ + profilerDebugPrint;\ + } + + #define profilerFree(bytes)\ + if (MemoryProfiler::stats)\ + {\ + auto s = MemoryProfiler::stats;\ + s->totalFreed += (bytes);\ + s->currentlyAllocated -= (bytes);\ + }\ + if (MemoryProfiler::debug)\ + {\ + std::cout << GridLogDebug << "[Memory debug] freeing " << memString(bytes) << std::endl;\ + profilerDebugPrint;\ + } void check_huge_pages(void *Buf,uint64_t BYTES); @@ -92,6 +150,7 @@ public: pointer allocate(size_type __n, const void* _p= 0) { size_type bytes = __n*sizeof(_Tp); + profilerAllocate(bytes); _Tp *ptr = (_Tp *) PointerCache::Lookup(bytes); // if ( ptr != NULL ) @@ -122,6 +181,8 @@ public: void deallocate(pointer __p, size_type __n) { size_type bytes = __n * sizeof(_Tp); + profilerFree(bytes); + pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes); #ifdef HAVE_MM_MALLOC_H @@ -172,10 +233,13 @@ public: #ifdef GRID_COMMS_SHMEM pointer allocate(size_type __n, const void* _p= 0) { + size_type bytes = __n*sizeof(_Tp); + + profilerAllocate(bytes); #ifdef CRAY - _Tp *ptr = (_Tp *) shmem_align(__n*sizeof(_Tp),64); + _Tp *ptr = (_Tp *) shmem_align(bytes,64); #else - _Tp *ptr = (_Tp *) shmem_align(64,__n*sizeof(_Tp)); + _Tp *ptr = (_Tp *) shmem_align(64,bytes); #endif #ifdef PARANOID_SYMMETRIC_HEAP static void * bcast; @@ -193,18 +257,23 @@ public: #endif return ptr; } - void deallocate(pointer __p, size_type) { + void deallocate(pointer __p, size_type __n) { + size_type bytes = __n*sizeof(_Tp); + + profilerFree(bytes); shmem_free((void *)__p); } #else pointer allocate(size_type __n, const void* _p= 0) { -#ifdef HAVE_MM_MALLOC_H - _Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN); -#else - _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp)); -#endif size_type bytes = __n*sizeof(_Tp); + + profilerAllocate(bytes); +#ifdef HAVE_MM_MALLOC_H + _Tp * ptr = (_Tp *) _mm_malloc(bytes, GRID_ALLOC_ALIGN); +#else + _Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN, bytes); +#endif uint8_t *cp = (uint8_t *)ptr; if ( ptr ) { // One touch per 4k page, static OMP loop to catch same loop order @@ -215,7 +284,10 @@ public: } return ptr; } - void deallocate(pointer __p, size_type) { + void deallocate(pointer __p, size_type __n) { + size_type bytes = __n*sizeof(_Tp); + + profilerFree(bytes); #ifdef HAVE_MM_MALLOC_H _mm_free((void *)__p); #else diff --git a/lib/cartesian/Cartesian_base.h b/lib/cartesian/Cartesian_base.h index 324772c5..c49dac84 100644 --- a/lib/cartesian/Cartesian_base.h +++ b/lib/cartesian/Cartesian_base.h @@ -44,13 +44,21 @@ namespace Grid{ class GridBase : public CartesianCommunicator , public GridThread { public: - + int dummy; // Give Lattice access template friend class Lattice; GridBase(const std::vector & processor_grid) : CartesianCommunicator(processor_grid) {}; GridBase(const std::vector & processor_grid, - const CartesianCommunicator &parent) : CartesianCommunicator(processor_grid,parent) {}; + const CartesianCommunicator &parent, + int &split_rank) + : CartesianCommunicator(processor_grid,parent,split_rank) {}; + GridBase(const std::vector & processor_grid, + const CartesianCommunicator &parent) + : CartesianCommunicator(processor_grid,parent,dummy) {}; + + virtual ~GridBase() = default; + // Physics Grid information. std::vector _simd_layout;// Which dimensions get relayed out over simd lanes. diff --git a/lib/cartesian/Cartesian_full.h b/lib/cartesian/Cartesian_full.h index a6a85ab7..b2372575 100644 --- a/lib/cartesian/Cartesian_full.h +++ b/lib/cartesian/Cartesian_full.h @@ -38,7 +38,7 @@ namespace Grid{ class GridCartesian: public GridBase { public: - + int dummy; virtual int CheckerBoardFromOindexTable (int Oindex) { return 0; } @@ -67,7 +67,14 @@ public: GridCartesian(const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid, - const GridCartesian &parent) : GridBase(processor_grid,parent) + const GridCartesian &parent) : GridBase(processor_grid,parent,dummy) + { + Init(dimensions,simd_layout,processor_grid); + } + GridCartesian(const std::vector &dimensions, + const std::vector &simd_layout, + const std::vector &processor_grid, + const GridCartesian &parent,int &split_rank) : GridBase(processor_grid,parent,split_rank) { Init(dimensions,simd_layout,processor_grid); } @@ -81,6 +88,8 @@ public: Init(dimensions,simd_layout,processor_grid); } + virtual ~GridCartesian() = default; + void Init(const std::vector &dimensions, const std::vector &simd_layout, const std::vector &processor_grid) @@ -113,6 +122,7 @@ public: // Use a reduced simd grid _ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions + //std::cout << _ldimensions[d] << " " << _gdimensions[d] << " " << _processors[d] << std::endl; assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); _rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition @@ -157,6 +167,7 @@ public: block = block * _rdimensions[d]; } }; + }; } #endif diff --git a/lib/cartesian/Cartesian_red_black.h b/lib/cartesian/Cartesian_red_black.h index f89cacc5..ee424385 100644 --- a/lib/cartesian/Cartesian_red_black.h +++ b/lib/cartesian/Cartesian_red_black.h @@ -133,6 +133,8 @@ public: { Init(base->_fdimensions,base->_simd_layout,base->_processors,checker_dim_mask,checker_dim) ; } + + virtual ~GridRedBlackCartesian() = default; #if 0 //////////////////////////////////////////////////////////// // Create redblack grid ;; deprecate these. Should not @@ -205,6 +207,7 @@ public: { assert((_gdimensions[d] & 0x1) == 0); _gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard + _gsites /= 2; } _ldimensions[d] = _gdimensions[d] / _processors[d]; assert(_ldimensions[d] * _processors[d] == _gdimensions[d]); diff --git a/lib/communicator/Communicator.h b/lib/communicator/Communicator.h index 09ce50dc..d4ec5a13 100644 --- a/lib/communicator/Communicator.h +++ b/lib/communicator/Communicator.h @@ -28,6 +28,7 @@ Author: Peter Boyle #ifndef GRID_COMMUNICATOR_H #define GRID_COMMUNICATOR_H +#include #include #endif diff --git a/lib/communicator/Communicator_base.cc b/lib/communicator/Communicator_base.cc index bcf429ab..edbf26af 100644 --- a/lib/communicator/Communicator_base.cc +++ b/lib/communicator/Communicator_base.cc @@ -36,33 +36,9 @@ namespace Grid { /////////////////////////////////////////////////////////////// // Info that is setup once and indept of cartesian layout /////////////////////////////////////////////////////////////// -void * CartesianCommunicator::ShmCommBuf; -uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL; CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent; int CartesianCommunicator::nCommThreads = -1; -int CartesianCommunicator::Hugepages = 0; - -///////////////////////////////// -// Alloc, free shmem region -///////////////////////////////// -void *CartesianCommunicator::ShmBufferMalloc(size_t bytes){ - // bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes - void *ptr = (void *)heap_top; - heap_top += bytes; - heap_bytes+= bytes; - if (heap_bytes >= MAX_MPI_SHM_BYTES) { - std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < &processors,const CartesianCommunicator &parent) -{ - _ndimension = processors.size(); - assert(_ndimension = parent._ndimension); - - ////////////////////////////////////////////////////////////////////////////////////////////////////// - // split the communicator - ////////////////////////////////////////////////////////////////////////////////////////////////////// - int Nparent; - MPI_Comm_size(parent.communicator,&Nparent); - - int childsize=1; - for(int d=0;d 1 ) { - - std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, MPI_Comm communicator_base) -{ - // if ( communicator_base != communicator_world ) { - // std::cout << "Cartesian communicator created with a non-world communicator"< periodic(_ndimension,1); - MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],1,&communicator); - MPI_Comm_rank(communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); - - int Size; - MPI_Comm_size(communicator,&Size); - -#ifdef GRID_COMMS_MPIT - communicator_halo.resize (2*_ndimension); - for(int i=0;i<_ndimension*2;i++){ - MPI_Comm_dup(communicator,&communicator_halo[i]); - } -#endif - - assert(Size==_Nprocessors); -} - -CartesianCommunicator::CartesianCommunicator(const std::vector &processors) -{ - InitFromMPICommunicator(processors,communicator_world); -} - -#endif - -#if !defined( GRID_COMMS_MPI3) - -int CartesianCommunicator::NodeCount(void) { return ProcessorCount();}; -int CartesianCommunicator::RankCount(void) { return ProcessorCount();}; -#endif -#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT) -double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes, int dir) -{ - std::vector list; - // Discard the "dir" - SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); - SendToRecvFromComplete(list); - return 2.0*bytes; -} -double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes, int dir) -{ - // Discard the "dir" - SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); - return 2.0*bytes; -} -void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall,int dir) -{ - SendToRecvFromComplete(waitall); -} -#endif - -#if !defined( GRID_COMMS_MPI3) - -void CartesianCommunicator::StencilBarrier(void){}; - -commVector CartesianCommunicator::ShmBufStorageVector; - -void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; } - -void *CartesianCommunicator::ShmBuffer(int rank) { - return NULL; -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { - return NULL; -} -void CartesianCommunicator::ShmInitGeneric(void){ -#if 1 - int mmap_flag =0; -#ifdef MAP_ANONYMOUS - mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; -#endif -#ifdef MAP_ANON - mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; -#endif -#ifdef MAP_HUGETLB - if ( Hugepages ) mmap_flag |= MAP_HUGETLB; -#endif - ShmCommBuf =(void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); - if (ShmCommBuf == (void *)MAP_FAILED) { - perror("mmap failed "); - exit(EXIT_FAILURE); - } -#ifdef MADV_HUGEPAGE - if (!Hugepages ) madvise(ShmCommBuf,MAX_MPI_SHM_BYTES,MADV_HUGEPAGE); -#endif -#else - ShmBufStorageVector.resize(MAX_MPI_SHM_BYTES); - ShmCommBuf=(void *)&ShmBufStorageVector[0]; -#endif - bzero(ShmCommBuf,MAX_MPI_SHM_BYTES); -} - -#endif } diff --git a/lib/communicator/Communicator_base.h b/lib/communicator/Communicator_base.h index bfdb0da1..7d6911d3 100644 --- a/lib/communicator/Communicator_base.h +++ b/lib/communicator/Communicator_base.h @@ -32,117 +32,33 @@ Author: Peter Boyle /////////////////////////////////// // Processor layout information /////////////////////////////////// -#ifdef GRID_COMMS_MPI -#include -#endif -#ifdef GRID_COMMS_MPI3 -#include -#endif -#ifdef GRID_COMMS_MPIT -#include -#endif -#ifdef GRID_COMMS_SHMEM -#include -#endif +#include namespace Grid { -class CartesianCommunicator { - public: +class CartesianCommunicator : public SharedMemory { +public: //////////////////////////////////////////// - // Isend/Irecv/Wait, or Sendrecv blocking + // Policies //////////////////////////////////////////// enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential }; static CommunicatorPolicy_t CommunicatorPolicy; static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; } - - /////////////////////////////////////////// - // Up to 65536 ranks per node adequate for now - // 128MB shared memory for comms enought for 48^4 local vol comms - // Give external control (command line override?) of this - /////////////////////////////////////////// - static const int MAXLOG2RANKSPERNODE = 16; - static uint64_t MAX_MPI_SHM_BYTES; static int nCommThreads; - // use explicit huge pages - static int Hugepages; + //////////////////////////////////////////// // Communicator should know nothing of the physics grid, only processor grid. + //////////////////////////////////////////// int _Nprocessors; // How many in all std::vector _processors; // Which dimensions get relayed out over processors lanes. int _processor; // linear processor rank std::vector _processor_coor; // linear processor coordinate - unsigned long _ndimension; - -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT) - static MPI_Comm communicator_world; - - MPI_Comm communicator; - std::vector communicator_halo; - - typedef MPI_Request CommsRequest_t; - -#else - typedef int CommsRequest_t; -#endif - - - //////////////////////////////////////////////////////////////////// - // Helper functionality for SHM Windows common to all other impls - //////////////////////////////////////////////////////////////////// - // Longer term; drop this in favour of a master / slave model with - // cartesian communicator on a subset of ranks, slave ranks controlled - // by group leader with data xfer via shared memory - //////////////////////////////////////////////////////////////////// -#ifdef GRID_COMMS_MPI3 - - static int ShmRank; - static int ShmSize; - static int GroupRank; - static int GroupSize; - static int WorldRank; - static int WorldSize; - - std::vector WorldDims; - std::vector GroupDims; - std::vector ShmDims; - - std::vector GroupCoor; - std::vector ShmCoor; - std::vector WorldCoor; - - static std::vector GroupRanks; - static std::vector MyGroup; - static int ShmSetup; - static MPI_Win ShmWindow; - static MPI_Comm ShmComm; - - std::vector LexicographicToWorldRank; - - static std::vector ShmCommBufs; - -#else - static void ShmInitGeneric(void); - static commVector ShmBufStorageVector; -#endif - - ///////////////////////////////// - // Grid information and queries - // Implemented in Communicator_base.C - ///////////////////////////////// - static void * ShmCommBuf; - - - size_t heap_top; - size_t heap_bytes; - - void *ShmBufferSelf(void); - void *ShmBuffer(int rank); - void *ShmBufferTranslate(int rank,void * local_p); - void *ShmBufferMalloc(size_t bytes); - void ShmBufferFreeAll(void) ; + unsigned long _ndimension; + static Grid_MPI_Comm communicator_world; + Grid_MPI_Comm communicator; + std::vector communicator_halo; //////////////////////////////////////////////// // Must call in Grid startup @@ -153,18 +69,20 @@ class CartesianCommunicator { // Constructors to sub-divide a parent communicator // and default to comm world //////////////////////////////////////////////// - CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent); + CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank); CartesianCommunicator(const std::vector &pdimensions_in); + virtual ~CartesianCommunicator(); private: -#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPIT) + //////////////////////////////////////////////// // Private initialise from an MPI communicator // Can use after an MPI_Comm_split, but hidden from user so private //////////////////////////////////////////////// - void InitFromMPICommunicator(const std::vector &processors, MPI_Comm communicator_base); -#endif + void InitFromMPICommunicator(const std::vector &processors, Grid_MPI_Comm communicator_base); + public: + //////////////////////////////////////////////////////////////////////////////////////// // Wraps MPI_Cart routines, or implements equivalent on other impls @@ -180,8 +98,6 @@ class CartesianCommunicator { const std::vector & ThisProcessorCoor(void) ; const std::vector & ProcessorGrid(void) ; int ProcessorCount(void) ; - int NodeCount(void) ; - int RankCount(void) ; //////////////////////////////////////////////////////////////////////////////// // very VERY rarely (Log, serial RNG) we need world without a grid @@ -262,6 +178,23 @@ class CartesianCommunicator { // Broadcast a buffer and composite larger //////////////////////////////////////////////////////////// void Broadcast(int root,void* data, int bytes); + + //////////////////////////////////////////////////////////// + // All2All down one dimension + //////////////////////////////////////////////////////////// + template void AllToAll(int dim,std::vector &in, std::vector &out){ + assert(dim>=0); + assert(dim<_ndimension); + assert(in.size()==out.size()); + int numnode = _processors[dim]; + uint64_t bytes=sizeof(T); + uint64_t words=in.size()/numnode; + assert(numnode * words == in.size()); + assert(words < (1ULL<<31)); + AllToAll(dim,(void *)&in[0],(void *)&out[0],words,bytes); + } + void AllToAll(int dim ,void *in,void *out,uint64_t words,uint64_t bytes); + void AllToAll(void *in,void *out,uint64_t words ,uint64_t bytes); template void Broadcast(int root,obj &data) { diff --git a/lib/communicator/Communicator_mpi.cc b/lib/communicator/Communicator_mpi.cc deleted file mode 100644 index a55c0164..00000000 --- a/lib/communicator/Communicator_mpi.cc +++ /dev/null @@ -1,211 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include -#include - -namespace Grid { - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -MPI_Comm CartesianCommunicator::communicator_world; - -// Should error check all MPI calls. -void CartesianCommunicator::Init(int *argc, char ***argv) { - int flag; - int provided; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); - if ( provided != MPI_THREAD_MULTIPLE ) { - QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; - } - } - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - ShmInitGeneric(); -} -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - int myrank = _processor; - int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - MPI_Request xrq; - MPI_Request rrq; - - ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - - assert(ierr==0); - list.push_back(xrq); - list.push_back(rrq); - } else { - // Give the CPU to MPI immediately; can use threads to overlap optionally - ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, - recv,bytes,MPI_CHAR,from, from, - communicator,MPI_STATUS_IGNORE); - assert(ierr==0); - } -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); - } -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - /////////////////////////////////////////////////////// - // Should only be used prior to Grid Init finished. - // Check for this? - /////////////////////////////////////////////////////// -int CartesianCommunicator::RankWorld(void){ - int r; - MPI_Comm_rank(communicator_world,&r); - return r; -} -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - -} - diff --git a/lib/communicator/Communicator_mpi3.cc b/lib/communicator/Communicator_mpi3.cc index dce9588a..ef47d617 100644 --- a/lib/communicator/Communicator_mpi3.cc +++ b/lib/communicator/Communicator_mpi3.cc @@ -26,89 +26,20 @@ Author: Peter Boyle *************************************************************************************/ /* END LEGAL */ #include - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef HAVE_NUMAIF_H -#include -#endif - +#include namespace Grid { -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -int CartesianCommunicator::ShmSetup = 0; +Grid_MPI_Comm CartesianCommunicator::communicator_world; -int CartesianCommunicator::ShmRank; -int CartesianCommunicator::ShmSize; -int CartesianCommunicator::GroupRank; -int CartesianCommunicator::GroupSize; -int CartesianCommunicator::WorldRank; -int CartesianCommunicator::WorldSize; - -MPI_Comm CartesianCommunicator::communicator_world; -MPI_Comm CartesianCommunicator::ShmComm; -MPI_Win CartesianCommunicator::ShmWindow; - -std::vector CartesianCommunicator::GroupRanks; -std::vector CartesianCommunicator::MyGroup; -std::vector CartesianCommunicator::ShmCommBufs; - -int CartesianCommunicator::NodeCount(void) { return GroupSize;}; -int CartesianCommunicator::RankCount(void) { return WorldSize;}; - - -#undef FORCE_COMMS -void *CartesianCommunicator::ShmBufferSelf(void) +//////////////////////////////////////////// +// First initialise of comms system +//////////////////////////////////////////// +void CartesianCommunicator::Init(int *argc, char ***argv) { - return ShmCommBufs[ShmRank]; -} -void *CartesianCommunicator::ShmBuffer(int rank) -{ - int gpeer = GroupRanks[rank]; -#ifdef FORCE_COMMS - return NULL; -#endif - if (gpeer == MPI_UNDEFINED){ - return NULL; - } else { - return ShmCommBufs[gpeer]; - } -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) -{ - static int count =0; - int gpeer = GroupRanks[rank]; - assert(gpeer!=ShmRank); // never send to self - assert(rank!=WorldRank);// never send to self -#ifdef FORCE_COMMS - return NULL; -#endif - if (gpeer == MPI_UNDEFINED){ - return NULL; - } else { - uint64_t offset = (uint64_t)local_p - (uint64_t)ShmCommBufs[ShmRank]; - uint64_t remote = (uint64_t)ShmCommBufs[gpeer]+offset; - return (void *) remote; - } -} - -void CartesianCommunicator::Init(int *argc, char ***argv) { int flag; int provided; - // mtrace(); MPI_Initialized(&flag); // needed to coexist with other libs apparently if ( !flag ) { @@ -119,483 +50,213 @@ void CartesianCommunicator::Init(int *argc, char ***argv) { Grid_quiesce_nodes(); MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - MPI_Comm_rank(communicator_world,&WorldRank); - MPI_Comm_size(communicator_world,&WorldSize); - if ( WorldRank == 0 ) { - std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"< world_ranks(WorldSize); - GroupRanks.resize(WorldSize); - for(int r=0;r()); - int myleader = MyGroup[0]; - - std::vector leaders_1hot(WorldSize,0); - std::vector leaders_group(GroupSize,0); - leaders_1hot [ myleader ] = 1; - - /////////////////////////////////////////////////////////////////// - // global sum leaders over comm world - /////////////////////////////////////////////////////////////////// - int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world); - assert(ierr==0); - /////////////////////////////////////////////////////////////////// - // find the group leaders world rank - /////////////////////////////////////////////////////////////////// - int group=0; - for(int l=0;l shmids(ShmSize); - - if ( ShmRank == 0 ) { - for(int r=0;r coor = _processor_coor; // my coord - assert(std::abs(shift) <_processors[dim]); - - coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,source,_processors); - source = LexicographicToWorldRank[source]; - - coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,dest,_processors); - dest = LexicographicToWorldRank[dest]; - -}// rank is world rank. - + int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); + assert(ierr==0); +} int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) { int rank; - Lexicographic::IndexFromCoor(coor,rank,_processors); - rank = LexicographicToWorldRank[rank]; + int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); + assert(ierr==0); return rank; -}// rank is world rank - +} void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) { - int lr=-1; - for(int r=0;r &processors) +{ + MPI_Comm optimal_comm; + GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm); // Remap using the shared memory optimising routine + InitFromMPICommunicator(processors,optimal_comm); + SetCommunicator(optimal_comm); } ////////////////////////////////// // Try to subdivide communicator ////////////////////////////////// -CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent) - : CartesianCommunicator(processors) +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) { - std::cout << "Attempts to split MPI3 communicators will fail until implemented" < &processors) -{ - int ierr; - communicator=communicator_world; - _ndimension = processors.size(); + int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension); + std::vector parent_processor_coor(_ndimension,0); + std::vector parent_processors (_ndimension,1); + + // Can make 5d grid from 4d etc... + int pad = _ndimension-parent_ndimension; + for(int d=0;d ccoor(_ndimension); // coor within subcommunicator + std::vector scoor(_ndimension); // coor of split within parent + std::vector ssize(_ndimension); // coor of split within parent + + for(int d=0;d<_ndimension;d++){ + ccoor[d] = parent_processor_coor[d] % processors[d]; + scoor[d] = parent_processor_coor[d] / processors[d]; + ssize[d] = parent_processors[d] / processors[d]; + } + + // rank within subcomm ; srank is rank of subcomm within blocks of subcomms + int crank; + // Mpi uses the reverse Lexico convention to us; so reversed routines called + Lexicographic::IndexFromCoorReversed(ccoor,crank,processors); // processors is the split grid dimensions + Lexicographic::IndexFromCoorReversed(scoor,srank,ssize); // ssize is the number of split grids + + MPI_Comm comm_split; + if ( Nchild > 1 ) { + + if(0){ + std::cout << GridLogMessage<<"Child communicator of "<< std::hex << parent.communicator << std::dec< &processors, MPI_Comm communicator_base) +{ + _ndimension = processors.size(); + _processor_coor.resize(_ndimension); + + ///////////////////////////////// + // Count the requested nodes + ///////////////////////////////// + _Nprocessors=1; + _processors = processors; + for(int i=0;i<_ndimension;i++){ + _Nprocessors*=_processors[i]; + } + + std::vector periodic(_ndimension,1); + MPI_Cart_create(communicator_base, _ndimension,&_processors[0],&periodic[0],0,&communicator); + MPI_Comm_rank(communicator,&_processor); + MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); + + if ( 0 && (communicator_base != communicator_world) ) { + std::cout << "InitFromMPICommunicator Cartesian communicator created with a non-world communicator"< WorldDims = processors; - - ShmDims.resize (_ndimension,1); - GroupDims.resize(_ndimension); - ShmCoor.resize (_ndimension); - GroupCoor.resize(_ndimension); - WorldCoor.resize(_ndimension); - - int dim = 0; - for(int l2=0;l2 coor(_ndimension); - ProcessorCoorFromRank(wr,coor); // from world rank - int ck = RankFromProcessorCoor(coor); - assert(ck==wr); - - if ( wr == WorldRank ) { - for(int j=0;j mcoor = coor; - this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int)); - for(int d = 0 ; d< _ndimension; d++) { - assert(coor[d] == mcoor[d]); - } - } -}; + } +} void CartesianCommunicator::GlobalSum(uint32_t &u){ int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); assert(ierr==0); @@ -712,34 +373,31 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector row(_ndimension,1); + assert(dim>=0 && dim<_ndimension); + + // Split the communicator + row[dim] = _processors[dim]; + + int me; + CartesianCommunicator Comm(row,*this,me); + Comm.AllToAll(in,out,words,bytes); +} +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) +{ + // MPI is a pain and uses "int" arguments + // 64*64*64*128*16 == 500Million elements of data. + // When 24*4 bytes multiples get 50x 10^9 >>> 2x10^9 Y2K bug. + // (Turns up on 32^3 x 64 Gparity too) + MPI_Datatype object; + int iwords; + int ibytes; + iwords = words; + ibytes = bytes; + assert(words == iwords); // safe to cast to int ? + assert(bytes == ibytes); // safe to cast to int ? + MPI_Type_contiguous(ibytes,MPI_BYTE,&object); + MPI_Type_commit(&object); + MPI_Alltoall(in,iwords,object,out,iwords,object,communicator); + MPI_Type_free(&object); +} + + + } diff --git a/lib/communicator/Communicator_mpi3_leader.cc b/lib/communicator/Communicator_mpi3_leader.cc deleted file mode 100644 index 6e26bd3e..00000000 --- a/lib/communicator/Communicator_mpi3_leader.cc +++ /dev/null @@ -1,988 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include "Grid.h" -#include -//#include - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////// -/// Workarounds: -/// i) bloody mac os doesn't implement unnamed semaphores since it is "optional" posix. -/// darwin dispatch semaphores don't seem to be multiprocess. -/// -/// ii) openmpi under --mca shmem posix works with two squadrons per node; -/// openmpi under default mca settings (I think --mca shmem mmap) on MacOS makes two squadrons map the SAME -/// memory as each other, despite their living on different communicators. This appears to be a bug in OpenMPI. -/// -//////////////////////////////////////////////////////////////////////////////////////////////////////////////// -#include -#include -#include -#include -typedef sem_t *Grid_semaphore; - - -#error /*THis is deprecated*/ - -#if 0 -#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED ); -#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED ); -#define SEM_POST(S) assert ( sem_post(S) == 0 ); -#define SEM_WAIT(S) assert ( sem_wait(S) == 0 ); -#else -#define SEM_INIT(S) ; -#define SEM_INIT_EXCL(S) ; -#define SEM_POST(S) ; -#define SEM_WAIT(S) ; -#endif -#include - -namespace Grid { - -enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV }; - -struct Descriptor { - uint64_t buf; - size_t bytes; - int rank; - int tag; - int command; - uint64_t xbuf; - uint64_t rbuf; - int xtag; - int rtag; - int src; - int dest; - MPI_Request request; -}; - -const int pool = 48; - -class SlaveState { -public: - volatile int head; - volatile int start; - volatile int tail; - volatile Descriptor Descrs[pool]; -}; - -class Slave { -public: - Grid_semaphore sem_head; - Grid_semaphore sem_tail; - SlaveState *state; - MPI_Comm squadron; - uint64_t base; - int universe_rank; - int vertical_rank; - char sem_name [NAME_MAX]; - //////////////////////////////////////////////////////////// - // Descriptor circular pointers - //////////////////////////////////////////////////////////// - Slave() {}; - - void Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank); - - void SemInit(void) { - sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - SEM_INIT(sem_head); - sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - SEM_INIT(sem_tail); - } - void SemInitExcl(void) { - sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank); - SEM_INIT_EXCL(sem_head); - sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank); - SEM_INIT_EXCL(sem_tail); - } - void WakeUpDMA(void) { - SEM_POST(sem_head); - }; - void WakeUpCompute(void) { - SEM_POST(sem_tail); - }; - void WaitForCommand(void) { - SEM_WAIT(sem_head); - }; - void WaitForComplete(void) { - SEM_WAIT(sem_tail); - }; - void EventLoop (void) { - // std::cout<< " Entering event loop "<head,0,0); - int s=state->start; - if ( s != state->head ) { - _mm_mwait(0,0); - } -#endif - Event(); - } - } - - int Event (void) ; - - uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ; - void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ; - - void WaitAll() { - // std::cout << "Queueing WAIT command "<tail != state->head ); - } -}; - -//////////////////////////////////////////////////////////////////////// -// One instance of a data mover. -// Master and Slave must agree on location in shared memory -//////////////////////////////////////////////////////////////////////// - -class MPIoffloadEngine { -public: - - static std::vector Slaves; - - static int ShmSetup; - - static int UniverseRank; - static int UniverseSize; - - static MPI_Comm communicator_universe; - static MPI_Comm communicator_cached; - - static MPI_Comm HorizontalComm; - static int HorizontalRank; - static int HorizontalSize; - - static MPI_Comm VerticalComm; - static MPI_Win VerticalWindow; - static int VerticalSize; - static int VerticalRank; - - static std::vector VerticalShmBufs; - static std::vector > UniverseRanks; - static std::vector UserCommunicatorToWorldRanks; - - static MPI_Group WorldGroup, CachedGroup; - - static void CommunicatorInit (MPI_Comm &communicator_world, - MPI_Comm &ShmComm, - void * &ShmCommBuf); - - static void MapCommRankToWorldRank(int &hashtag, int & comm_world_peer,int tag, MPI_Comm comm,int commrank); - - ///////////////////////////////////////////////////////// - // routines for master proc must handle any communicator - ///////////////////////////////////////////////////////// - - static void QueueSend(int slave,void *buf, int bytes, int tag, MPI_Comm comm,int rank) { - // std::cout<< " Queueing send "<< bytes<< " slave "<< slave << " to comm "<= units ) { - mywork = myoff = 0; - } else { - mywork = (nwork+me)/units; - myoff = basework * me; - if ( me > backfill ) - myoff+= (me-backfill); - } - return; - }; - - static void QueueRoundRobinSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) { - uint8_t * cxbuf = (uint8_t *) xbuf; - uint8_t * crbuf = (uint8_t *) rbuf; - static int rrp=0; - int procs = VerticalSize-1; - int myoff=0; - int mywork=bytes; - QueueSendRecv(rrp+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src); - rrp = rrp+1; - if ( rrp == (VerticalSize-1) ) rrp = 0; - } - - static void QueueMultiplexedSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) { - uint8_t * cxbuf = (uint8_t *) xbuf; - uint8_t * crbuf = (uint8_t *) rbuf; - int mywork, myoff, procs; - procs = VerticalSize-1; - for(int s=0;s MPIoffloadEngine::Slaves; - -int MPIoffloadEngine::UniverseRank; -int MPIoffloadEngine::UniverseSize; - -MPI_Comm MPIoffloadEngine::communicator_universe; -MPI_Comm MPIoffloadEngine::communicator_cached; -MPI_Group MPIoffloadEngine::WorldGroup; -MPI_Group MPIoffloadEngine::CachedGroup; - -MPI_Comm MPIoffloadEngine::HorizontalComm; -int MPIoffloadEngine::HorizontalRank; -int MPIoffloadEngine::HorizontalSize; - -MPI_Comm MPIoffloadEngine::VerticalComm; -int MPIoffloadEngine::VerticalSize; -int MPIoffloadEngine::VerticalRank; -MPI_Win MPIoffloadEngine::VerticalWindow; -std::vector MPIoffloadEngine::VerticalShmBufs; -std::vector > MPIoffloadEngine::UniverseRanks; -std::vector MPIoffloadEngine::UserCommunicatorToWorldRanks; - -int CartesianCommunicator::NodeCount(void) { return HorizontalSize;}; -int MPIoffloadEngine::ShmSetup = 0; - -void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world, - MPI_Comm &ShmComm, - void * &ShmCommBuf) -{ - int flag; - assert(ShmSetup==0); - - ////////////////////////////////////////////////////////////////////// - // Universe is all nodes prior to squadron grouping - ////////////////////////////////////////////////////////////////////// - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_universe); - MPI_Comm_rank(communicator_universe,&UniverseRank); - MPI_Comm_size(communicator_universe,&UniverseSize); - - ///////////////////////////////////////////////////////////////////// - // Split into groups that can share memory (Verticals) - ///////////////////////////////////////////////////////////////////// -#undef MPI_SHARED_MEM_DEBUG -#ifdef MPI_SHARED_MEM_DEBUG - MPI_Comm_split(communicator_universe,(UniverseRank/4),UniverseRank,&VerticalComm); -#else - MPI_Comm_split_type(communicator_universe, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&VerticalComm); -#endif - MPI_Comm_rank(VerticalComm ,&VerticalRank); - MPI_Comm_size(VerticalComm ,&VerticalSize); - - ////////////////////////////////////////////////////////////////////// - // Split into horizontal groups by rank in squadron - ////////////////////////////////////////////////////////////////////// - MPI_Comm_split(communicator_universe,VerticalRank,UniverseRank,&HorizontalComm); - MPI_Comm_rank(HorizontalComm,&HorizontalRank); - MPI_Comm_size(HorizontalComm,&HorizontalSize); - assert(HorizontalSize*VerticalSize==UniverseSize); - - //////////////////////////////////////////////////////////////////////////////// - // What is my place in the world - //////////////////////////////////////////////////////////////////////////////// - int WorldRank=0; - if(VerticalRank==0) WorldRank = HorizontalRank; - int ierr=MPI_Allreduce(MPI_IN_PLACE,&WorldRank,1,MPI_INT,MPI_SUM,VerticalComm); - assert(ierr==0); - - //////////////////////////////////////////////////////////////////////////////// - // Where is the world in the universe? - //////////////////////////////////////////////////////////////////////////////// - UniverseRanks = std::vector >(HorizontalSize,std::vector(VerticalSize,0)); - UniverseRanks[WorldRank][VerticalRank] = UniverseRank; - for(int w=0;w0 ) size = sizeof(SlaveState); - - sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); - - shm_unlink(shm_name); - - int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); - if ( fd < 0 ) { - perror("failed shm_open"); - assert(0); - } - - ftruncate(fd, size); - - VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if ( VerticalShmBufs[r] == MAP_FAILED ) { - perror("failed mmap"); - assert(0); - } - - /* - for(uint64_t page=0;page0 ) size = sizeof(SlaveState); - - sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",WorldRank,r); - - int fd=shm_open(shm_name,O_RDWR|O_CREAT,0600); - if ( fd<0 ) { - perror("failed shm_open"); - assert(0); - } - VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - - uint64_t * check = (uint64_t *) VerticalShmBufs[r]; - assert(check[0]== WorldRank); - assert(check[1]== r); - // std::cerr<<"SHM "<"<"< cached_ranks(size); - - for(int r=0;r"<>0 )&0xFFFF)^((icomm>>16)&0xFFFF) - ^ ((icomm>>32)&0xFFFF)^((icomm>>48)&0xFFFF); - - // hashtag = (comm_hash<<15) | tag; - hashtag = tag; - -}; - -void Slave::Init(SlaveState * _state,MPI_Comm _squadron,int _universe_rank,int _vertical_rank) -{ - squadron=_squadron; - universe_rank=_universe_rank; - vertical_rank=_vertical_rank; - state =_state; - // std::cout << "state "<<_state<<" comm "<<_squadron<<" universe_rank"<head = state->tail = state->start = 0; - base = (uint64_t)MPIoffloadEngine::VerticalShmBufs[0]; - int rank; MPI_Comm_rank(_squadron,&rank); -} -#define PERI_PLUS(A) ( (A+1)%pool ) -int Slave::Event (void) { - - static int tail_last; - static int head_last; - static int start_last; - int ierr; - MPI_Status stat; - static int i=0; - - //////////////////////////////////////////////////// - // Try to advance the start pointers - //////////////////////////////////////////////////// - int s=state->start; - if ( s != state->head ) { - switch ( state->Descrs[s].command ) { - case COMMAND_ISEND: - ierr = MPI_Isend((void *)(state->Descrs[s].buf+base), - state->Descrs[s].bytes, - MPI_CHAR, - state->Descrs[s].rank, - state->Descrs[s].tag, - MPIoffloadEngine::communicator_universe, - (MPI_Request *)&state->Descrs[s].request); - assert(ierr==0); - state->start = PERI_PLUS(s); - return 1; - break; - - case COMMAND_IRECV: - ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base), - state->Descrs[s].bytes, - MPI_CHAR, - state->Descrs[s].rank, - state->Descrs[s].tag, - MPIoffloadEngine::communicator_universe, - (MPI_Request *)&state->Descrs[s].request); - - // std::cout<< " Request is "<Descrs[s].request<Descrs[0].request<start = PERI_PLUS(s); - return 1; - break; - - case COMMAND_SENDRECV: - - // fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10); - - ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10, - (void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10, - MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE); - - assert(ierr==0); - - // fprintf(stderr,"Sendrecv done %d %d\n",ierr,i); - // MPI_Barrier(MPIoffloadEngine::HorizontalComm); - // fprintf(stderr,"Barrier\n"); - i++; - - state->start = PERI_PLUS(s); - - return 1; - break; - - case COMMAND_WAITALL: - - for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){ - if ( state->Descrs[t].command != COMMAND_SENDRECV ) { - MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE); - } - }; - s=PERI_PLUS(s); - state->start = s; - state->tail = s; - - WakeUpCompute(); - - return 1; - break; - - default: - assert(0); - break; - } - } - return 0; -} - ////////////////////////////////////////////////////////////////////////////// - // External interaction with the queue - ////////////////////////////////////////////////////////////////////////////// - -void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) -{ - int head =state->head; - int next = PERI_PLUS(head); - - // Set up descriptor - int worldrank; - int hashtag; - MPI_Comm communicator; - MPI_Request request; - uint64_t relative; - - relative = (uint64_t)xbuf - base; - state->Descrs[head].xbuf = relative; - - relative= (uint64_t)rbuf - base; - state->Descrs[head].rbuf = relative; - - state->Descrs[head].bytes = bytes; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest); - state->Descrs[head].dest = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].xtag = hashtag; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src); - state->Descrs[head].src = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].rtag = hashtag; - - state->Descrs[head].command= COMMAND_SENDRECV; - - // Block until FIFO has space - while( state->tail==next ); - - // Msync on weak order architectures - - // Advance pointer - state->head = next; - -}; -uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank) -{ - ///////////////////////////////////////// - // Spin; if FIFO is full until not full - ///////////////////////////////////////// - int head =state->head; - int next = PERI_PLUS(head); - - // Set up descriptor - int worldrank; - int hashtag; - MPI_Comm communicator; - MPI_Request request; - - MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,tag,comm,commrank); - - uint64_t relative= (uint64_t)buf - base; - state->Descrs[head].buf = relative; - state->Descrs[head].bytes = bytes; - state->Descrs[head].rank = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank]; - state->Descrs[head].tag = hashtag; - state->Descrs[head].command= command; - - /* - if ( command == COMMAND_ISEND ) { - std::cout << "QueueSend from "<< universe_rank <<" to commrank " << commrank - << " to worldrank " << worldrank <tail==next ); - - // Msync on weak order architectures - // Advance pointer - state->head = next; - - return 0; -} - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// - -MPI_Comm CartesianCommunicator::communicator_world; - -void CartesianCommunicator::Init(int *argc, char ***argv) -{ - int flag; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init(argc,argv); - } - communicator_world = MPI_COMM_WORLD; - MPI_Comm ShmComm; - MPIoffloadEngine::CommunicatorInit (communicator_world,ShmComm,ShmCommBuf); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -CartesianCommunicator::CartesianCommunicator(const std::vector &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - int Size; - MPI_Comm_size(communicator_world,&Size); - assert(Size==_Nprocessors); - - _processor_coor.resize(_ndimension); - MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator); - MPI_Comm_rank (communicator,&_processor); - MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]); -}; - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - MPI_Request xrq; - MPI_Request rrq; - int rank = _processor; - int ierr; - ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - - assert(ierr==0); - - list.push_back(xrq); - list.push_back(rrq); -} - -void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - uint64_t xmit_i = (uint64_t) xmit; - uint64_t recv_i = (uint64_t) recv; - uint64_t shm = (uint64_t) ShmCommBuf; - // assert xmit and recv lie in shared memory region - assert( (xmit_i >= shm) && (xmit_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); - assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) ); - assert(from!=_processor); - assert(dest!=_processor); - - MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); - - //MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from); - - //MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest); - //MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from); -} - -void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &list) -{ - MPIoffloadEngine::WaitAll(); - //this->Barrier(); -} - -void CartesianCommunicator::StencilBarrier(void) { } - -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - -void *CartesianCommunicator::ShmBufferSelf(void) { return ShmCommBuf; } - -void *CartesianCommunicator::ShmBuffer(int rank) { - return NULL; -} -void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) { - return NULL; -} - - -}; - diff --git a/lib/communicator/Communicator_mpit.cc b/lib/communicator/Communicator_mpit.cc deleted file mode 100644 index 5137c27b..00000000 --- a/lib/communicator/Communicator_mpit.cc +++ /dev/null @@ -1,259 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_mpi.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include -#include - -namespace Grid { - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// -MPI_Comm CartesianCommunicator::communicator_world; - -// Should error check all MPI calls. -void CartesianCommunicator::Init(int *argc, char ***argv) { - int flag; - int provided; - MPI_Initialized(&flag); // needed to coexist with other libs apparently - if ( !flag ) { - MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided); - if ( provided != MPI_THREAD_MULTIPLE ) { - QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute; - } - } - MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world); - ShmInitGeneric(); -} - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint32_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalXOR(uint64_t &u){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(float &f){ - int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSum(double &d) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator); - assert(ierr==0); -} -void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest) -{ - int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest); - assert(ierr==0); -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - int ierr=MPI_Cart_rank (communicator, &coor[0], &rank); - assert(ierr==0); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - coor.resize(_ndimension); - int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]); - assert(ierr==0); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - MPI_Status stat; - assert(sender != receiver); - int tag = sender; - if ( _processor == sender ) { - MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator); - } - if ( _processor == receiver ) { - MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat); - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - int myrank = _processor; - int ierr; - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - MPI_Request xrq; - MPI_Request rrq; - - ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq); - ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq); - - assert(ierr==0); - list.push_back(xrq); - list.push_back(rrq); - } else { - // Give the CPU to MPI immediately; can use threads to overlap optionally - ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank, - recv,bytes,MPI_CHAR,from, from, - communicator,MPI_STATUS_IGNORE); - assert(ierr==0); - } -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { - int nreq=list.size(); - std::vector status(nreq); - int ierr = MPI_Waitall(nreq,&list[0],&status[0]); - assert(ierr==0); - } -} - -void CartesianCommunicator::Barrier(void) -{ - int ierr = MPI_Barrier(communicator); - assert(ierr==0); -} - -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - int ierr=MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator); - assert(ierr==0); -} - /////////////////////////////////////////////////////// - // Should only be used prior to Grid Init finished. - // Check for this? - /////////////////////////////////////////////////////// -int CartesianCommunicator::RankWorld(void){ - int r; - MPI_Comm_rank(communicator_world,&r); - return r; -} -void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) -{ - int ierr= MPI_Bcast(data, - bytes, - MPI_BYTE, - root, - communicator_world); - assert(ierr==0); -} - -double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, - void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes,int dir) -{ - int myrank = _processor; - int ierr; - assert(dir < communicator_halo.size()); - - // std::cout << " sending on communicator "< &waitall,int dir) -{ - int nreq=waitall.size(); - MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE); -}; -double CartesianCommunicator::StencilSendToRecvFrom(void *xmit, - int xmit_to_rank, - void *recv, - int recv_from_rank, - int bytes,int dir) -{ - int myrank = _processor; - int ierr; - assert(dir < communicator_halo.size()); - - // std::cout << " sending on communicator "< &processors,const CartesianCommunicator &parent) - : CartesianCommunicator(processors) {} +CartesianCommunicator::CartesianCommunicator(const std::vector &processors,const CartesianCommunicator &parent,int &srank) + : CartesianCommunicator(processors) +{ + srank=0; + SetCommunicator(communicator_world); +} CartesianCommunicator::CartesianCommunicator(const std::vector &processors) { @@ -54,8 +62,11 @@ CartesianCommunicator::CartesianCommunicator(const std::vector &processors) assert(_processors[d]==1); _processor_coor[d] = 0; } + SetCommunicator(communicator_world); } +CartesianCommunicator::~CartesianCommunicator(){} + void CartesianCommunicator::GlobalSum(float &){} void CartesianCommunicator::GlobalSumVector(float *,int N){} void CartesianCommunicator::GlobalSum(double &){} @@ -98,6 +109,14 @@ void CartesianCommunicator::SendToRecvFromComplete(std::vector & { assert(0); } +void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes) +{ + bcopy(in,out,bytes*words); +} +void CartesianCommunicator::AllToAll(void *in,void *out,uint64_t words,uint64_t bytes) +{ + bcopy(in,out,bytes*words); +} int CartesianCommunicator::RankWorld(void){return 0;} void CartesianCommunicator::Barrier(void){} @@ -111,6 +130,36 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest dest=0; } +double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, + int xmit_to_rank, + void *recv, + int recv_from_rank, + int bytes, int dir) +{ + std::vector list; + // Discard the "dir" + SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); + SendToRecvFromComplete(list); + return 2.0*bytes; +} +double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector &list, + void *xmit, + int xmit_to_rank, + void *recv, + int recv_from_rank, + int bytes, int dir) +{ + // Discard the "dir" + SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes); + return 2.0*bytes; +} +void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector &waitall,int dir) +{ + SendToRecvFromComplete(waitall); +} + +void CartesianCommunicator::StencilBarrier(void){}; + } diff --git a/lib/communicator/Communicator_shmem.cc b/lib/communicator/Communicator_shmem.cc deleted file mode 100644 index ed49285d..00000000 --- a/lib/communicator/Communicator_shmem.cc +++ /dev/null @@ -1,355 +0,0 @@ - /************************************************************************************* - - Grid physics library, www.github.com/paboyle/Grid - - Source file: ./lib/communicator/Communicator_shmem.cc - - Copyright (C) 2015 - -Author: Peter Boyle - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - See the full license in the file "LICENSE" in the top level distribution directory - *************************************************************************************/ - /* END LEGAL */ -#include -#include -#include - -namespace Grid { - - // Should error check all MPI calls. -#define SHMEM_VET(addr) - -#define SHMEM_VET_DEBUG(addr) { \ - if ( ! shmem_addr_accessible(addr,_processor) ) {\ - std::fprintf(stderr,"%d Inaccessible shmem address %lx %s %s\n",_processor,addr,__FUNCTION__,#addr); \ - BACKTRACEFILE(); \ - }\ -} - - -/////////////////////////////////////////////////////////////////////////////////////////////////// -// Info that is setup once and indept of cartesian layout -/////////////////////////////////////////////////////////////////////////////////////////////////// - -typedef struct HandShake_t { - uint64_t seq_local; - uint64_t seq_remote; -} HandShake; - -std::array make_psync_init(void) { - std::array ret; - ret.fill(SHMEM_SYNC_VALUE); - return ret; -} -static std::array psync_init = make_psync_init(); - -static Vector< HandShake > XConnections; -static Vector< HandShake > RConnections; - -void CartesianCommunicator::Init(int *argc, char ***argv) { - shmem_init(); - XConnections.resize(shmem_n_pes()); - RConnections.resize(shmem_n_pes()); - for(int pe =0 ; pe &processors,const CartesianCommunicator &parent) - : CartesianCommunicator(processors) -{ - std::cout << "Attempts to split SHMEM communicators will fail " < &processors) -{ - _ndimension = processors.size(); - std::vector periodic(_ndimension,1); - - _Nprocessors=1; - _processors = processors; - _processor_coor.resize(_ndimension); - - _processor = shmem_my_pe(); - - Lexicographic::CoorFromIndex(_processor_coor,_processor,_processors); - - for(int i=0;i<_ndimension;i++){ - _Nprocessors*=_processors[i]; - } - - int Size = shmem_n_pes(); - - - assert(Size==_Nprocessors); -} - -void CartesianCommunicator::GlobalSum(uint32_t &u){ - static long long source ; - static long long dest ; - static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - // int nreduce=1; - // int pestart=0; - // int logStride=0; - - source = u; - dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); // necessary? - u = dest; -} -void CartesianCommunicator::GlobalSum(uint64_t &u){ - static long long source ; - static long long dest ; - static long long llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - // int nreduce=1; - // int pestart=0; - // int logStride=0; - - source = u; - dest = 0; - shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); // necessary? - u = dest; -} -void CartesianCommunicator::GlobalSum(float &f){ - static float source ; - static float dest ; - static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - source = f; - dest =0.0; - shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - f = dest; -} -void CartesianCommunicator::GlobalSumVector(float *f,int N) -{ - static float source ; - static float dest = 0 ; - static float llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - if ( shmem_addr_accessible(f,_processor) ){ - shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - return; - } - - for(int i=0;i psync = psync_init; - - source = d; - dest = 0; - shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - d = dest; -} -void CartesianCommunicator::GlobalSumVector(double *d,int N) -{ - static double source ; - static double dest ; - static double llwrk[_SHMEM_REDUCE_MIN_WRKDATA_SIZE]; - static std::array psync = psync_init; - - - if ( shmem_addr_accessible(d,_processor) ){ - shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync.data()); - shmem_barrier_all(); - return; - } - - for(int i=0;i coor = _processor_coor; - - assert(std::abs(shift) <_processors[dim]); - - coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,source,_processors); - - coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim]; - Lexicographic::IndexFromCoor(coor,dest,_processors); - -} -int CartesianCommunicator::RankFromProcessorCoor(std::vector &coor) -{ - int rank; - Lexicographic::IndexFromCoor(coor,rank,_processors); - return rank; -} -void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector &coor) -{ - Lexicographic::CoorFromIndex(coor,rank,_processors); -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFrom(void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - SHMEM_VET(xmit); - SHMEM_VET(recv); - std::vector reqs(0); - SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes); - SendToRecvFromComplete(reqs); -} - -void CartesianCommunicator::SendRecvPacket(void *xmit, - void *recv, - int sender, - int receiver, - int bytes) -{ - static uint64_t seq; - - assert(recv!=xmit); - volatile HandShake *RecvSeq = (volatile HandShake *) & RConnections[sender]; - volatile HandShake *SendSeq = (volatile HandShake *) & XConnections[receiver]; - - if ( _processor == sender ) { - - // Check he has posted a receive - while(SendSeq->seq_remote == SendSeq->seq_local); - - // Advance our send count - seq = ++(SendSeq->seq_local); - - // Send this packet - SHMEM_VET(recv); - shmem_putmem(recv,xmit,bytes,receiver); - shmem_fence(); - - //Notify him we're done - shmem_putmem((void *)&(RecvSeq->seq_remote),&seq,sizeof(seq),receiver); - shmem_fence(); - } - if ( _processor == receiver ) { - - // Post a receive - seq = ++(RecvSeq->seq_local); - shmem_putmem((void *)&(SendSeq->seq_remote),&seq,sizeof(seq),sender); - - // Now wait until he has advanced our reception counter - while(RecvSeq->seq_remote != RecvSeq->seq_local); - - } -} - -// Basic Halo comms primitive -void CartesianCommunicator::SendToRecvFromBegin(std::vector &list, - void *xmit, - int dest, - void *recv, - int from, - int bytes) -{ - SHMEM_VET(xmit); - SHMEM_VET(recv); - // shmem_putmem_nb(recv,xmit,bytes,dest,NULL); - shmem_putmem(recv,xmit,bytes,dest); - - if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all(); -} -void CartesianCommunicator::SendToRecvFromComplete(std::vector &list) -{ - // shmem_quiet(); // I'm done - if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too -} -void CartesianCommunicator::Barrier(void) -{ - shmem_barrier_all(); -} -void CartesianCommunicator::Broadcast(int root,void* data, int bytes) -{ - static std::array psync = psync_init; - static uint32_t word; - uint32_t *array = (uint32_t *) data; - assert( (bytes % 4)==0); - int words = bytes/4; - - if ( shmem_addr_accessible(data,_processor) ){ - shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data()); - return; - } - - for(int w=0;w psync = psync_init; - static uint32_t word; - uint32_t *array = (uint32_t *) data; - assert( (bytes % 4)==0); - int words = bytes/4; - - for(int w=0;w + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +// static data + +uint64_t GlobalSharedMemory::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL; +int GlobalSharedMemory::Hugepages = 0; +int GlobalSharedMemory::_ShmSetup; +int GlobalSharedMemory::_ShmAlloc; +uint64_t GlobalSharedMemory::_ShmAllocBytes; + +std::vector GlobalSharedMemory::WorldShmCommBufs; + +Grid_MPI_Comm GlobalSharedMemory::WorldShmComm; +int GlobalSharedMemory::WorldShmRank; +int GlobalSharedMemory::WorldShmSize; +std::vector GlobalSharedMemory::WorldShmRanks; + +Grid_MPI_Comm GlobalSharedMemory::WorldComm; +int GlobalSharedMemory::WorldSize; +int GlobalSharedMemory::WorldRank; + +int GlobalSharedMemory::WorldNodes; +int GlobalSharedMemory::WorldNode; + +void GlobalSharedMemory::SharedMemoryFree(void) +{ + assert(_ShmAlloc); + assert(_ShmAllocBytes>0); + for(int r=0;r= heap_size) { + std::cout<< " ShmBufferMalloc exceeded shared heap size -- try increasing with --shm flag" < + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + + +// TODO +// 1) move includes into SharedMemory.cc +// +// 2) split shared memory into a) optimal communicator creation from comm world +// +// b) shared memory buffers container +// -- static globally shared; init once +// -- per instance set of buffers. +// + +#pragma once + +#include + +#if defined (GRID_COMMS_MPI3) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_NUMAIF_H +#include +#endif + +namespace Grid { + +#if defined (GRID_COMMS_MPI3) + typedef MPI_Comm Grid_MPI_Comm; + typedef MPI_Request CommsRequest_t; +#else + typedef int CommsRequest_t; + typedef int Grid_MPI_Comm; +#endif + +class GlobalSharedMemory { + private: + static const int MAXLOG2RANKSPERNODE = 16; + + // Init once lock on the buffer allocation + static int _ShmSetup; + static int _ShmAlloc; + static uint64_t _ShmAllocBytes; + + public: + static int ShmSetup(void) { return _ShmSetup; } + static int ShmAlloc(void) { return _ShmAlloc; } + static uint64_t ShmAllocBytes(void) { return _ShmAllocBytes; } + static uint64_t MAX_MPI_SHM_BYTES; + static int Hugepages; + + static std::vector WorldShmCommBufs; + + static Grid_MPI_Comm WorldComm; + static int WorldRank; + static int WorldSize; + + static Grid_MPI_Comm WorldShmComm; + static int WorldShmRank; + static int WorldShmSize; + + static int WorldNodes; + static int WorldNode; + + static std::vector WorldShmRanks; + + ////////////////////////////////////////////////////////////////////////////////////// + // Create an optimal reordered communicator that makes MPI_Cart_create get it right + ////////////////////////////////////////////////////////////////////////////////////// + static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD + static void OptimalCommunicator(const std::vector &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian + /////////////////////////////////////////////////// + // Provide shared memory facilities off comm world + /////////////////////////////////////////////////// + static void SharedMemoryAllocate(uint64_t bytes, int flags); + static void SharedMemoryFree(void); + +}; + +////////////////////////////// +// one per communicator +////////////////////////////// +class SharedMemory +{ + private: + static const int MAXLOG2RANKSPERNODE = 16; + + size_t heap_top; + size_t heap_bytes; + size_t heap_size; + + protected: + + Grid_MPI_Comm ShmComm; // for barriers + int ShmRank; + int ShmSize; + std::vector ShmCommBufs; + std::vector ShmRanks;// Mapping comm ranks to Shm ranks + + public: + SharedMemory() {}; + /////////////////////////////////////////////////////////////////////////////////////// + // set the buffers & sizes + /////////////////////////////////////////////////////////////////////////////////////// + void SetCommunicator(Grid_MPI_Comm comm); + + //////////////////////////////////////////////////////////////////////// + // For this instance ; disjoint buffer sets between splits if split grid + //////////////////////////////////////////////////////////////////////// + void ShmBarrier(void); + + /////////////////////////////////////////////////// + // Call on any instance + /////////////////////////////////////////////////// + void SharedMemoryTest(void); + void *ShmBufferSelf(void); + void *ShmBuffer (int rank); + void *ShmBufferTranslate(int rank,void * local_p); + void *ShmBufferMalloc(size_t bytes); + void ShmBufferFreeAll(void) ; + + ////////////////////////////////////////////////////////////////////////// + // Make info on Nodes & ranks and Shared memory available + ////////////////////////////////////////////////////////////////////////// + int NodeCount(void) { return GlobalSharedMemory::WorldNodes;}; + int RankCount(void) { return GlobalSharedMemory::WorldSize;}; + +}; + +} diff --git a/lib/communicator/SharedMemoryMPI.cc b/lib/communicator/SharedMemoryMPI.cc new file mode 100644 index 00000000..d7bd7c65 --- /dev/null +++ b/lib/communicator/SharedMemoryMPI.cc @@ -0,0 +1,395 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./lib/communicator/SharedMemory.cc + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +/*Construct from an MPI communicator*/ +void GlobalSharedMemory::Init(Grid_MPI_Comm comm) +{ + assert(_ShmSetup==0); + WorldComm = comm; + MPI_Comm_rank(WorldComm,&WorldRank); + MPI_Comm_size(WorldComm,&WorldSize); + // WorldComm, WorldSize, WorldRank + + ///////////////////////////////////////////////////////////////////// + // Split into groups that can share memory + ///////////////////////////////////////////////////////////////////// + MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL,&WorldShmComm); + MPI_Comm_rank(WorldShmComm ,&WorldShmRank); + MPI_Comm_size(WorldShmComm ,&WorldShmSize); + // WorldShmComm, WorldShmSize, WorldShmRank + + // WorldNodes + WorldNodes = WorldSize/WorldShmSize; + assert( (WorldNodes * WorldShmSize) == WorldSize ); + + // FIXME: Check all WorldShmSize are the same ? + + ///////////////////////////////////////////////////////////////////// + // find world ranks in our SHM group (i.e. which ranks are on our node) + ///////////////////////////////////////////////////////////////////// + MPI_Group WorldGroup, ShmGroup; + MPI_Comm_group (WorldComm, &WorldGroup); + MPI_Comm_group (WorldShmComm, &ShmGroup); + + std::vector world_ranks(WorldSize); for(int r=0;r MyGroup; + MyGroup.resize(WorldShmSize); + for(int rank=0;rank()); + int myleader = MyGroup[0]; + + std::vector leaders_1hot(WorldSize,0); + std::vector leaders_group(WorldNodes,0); + leaders_1hot [ myleader ] = 1; + + /////////////////////////////////////////////////////////////////// + // global sum leaders over comm world + /////////////////////////////////////////////////////////////////// + int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,WorldComm); + assert(ierr==0); + + /////////////////////////////////////////////////////////////////// + // find the group leaders world rank + /////////////////////////////////////////////////////////////////// + int group=0; + for(int l=0;l &processors,Grid_MPI_Comm & optimal_comm) +{ + //////////////////////////////////////////////////////////////// + // Assert power of two shm_size. + //////////////////////////////////////////////////////////////// + int log2size = -1; + for(int i=0;i<=MAXLOG2RANKSPERNODE;i++){ + if ( (0x1< processor_coor(ndimension); + std::vector WorldDims = processors; std::vector ShmDims (ndimension,1); std::vector NodeDims (ndimension); + std::vector ShmCoor (ndimension); std::vector NodeCoor (ndimension); std::vector WorldCoor(ndimension); + int dim = 0; + for(int l2=0;l2 ranks(size); for(int r=0;r + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory +*************************************************************************************/ +/* END LEGAL */ + +#include + +namespace Grid { + +/*Construct from an MPI communicator*/ +void GlobalSharedMemory::Init(Grid_MPI_Comm comm) +{ + assert(_ShmSetup==0); + WorldComm = 0; + WorldRank = 0; + WorldSize = 1; + WorldShmComm = 0 ; + WorldShmRank = 0 ; + WorldShmSize = 1 ; + WorldNodes = 1 ; + WorldNode = 0 ; + WorldShmRanks.resize(WorldSize); WorldShmRanks[0] = 0; + WorldShmCommBufs.resize(1); + _ShmSetup=1; +} + +void GlobalSharedMemory::OptimalCommunicator(const std::vector &processors,Grid_MPI_Comm & optimal_comm) +{ + optimal_comm = WorldComm; +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// Hugetlbfs mapping intended, use anonymous mmap +//////////////////////////////////////////////////////////////////////////////////////////// +void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) +{ + void * ShmCommBuf ; + assert(_ShmSetup==1); + assert(_ShmAlloc==0); + int mmap_flag =0; +#ifdef MAP_ANONYMOUS + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANONYMOUS; +#endif +#ifdef MAP_ANON + mmap_flag = mmap_flag| MAP_SHARED | MAP_ANON; +#endif +#ifdef MAP_HUGETLB + if ( flags ) mmap_flag |= MAP_HUGETLB; +#endif + ShmCommBuf =(void *) mmap(NULL, bytes, PROT_READ | PROT_WRITE, mmap_flag, -1, 0); + if (ShmCommBuf == (void *)MAP_FAILED) { + perror("mmap failed "); + exit(EXIT_FAILURE); + } +#ifdef MADV_HUGEPAGE + if (!Hugepages ) madvise(ShmCommBuf,bytes,MADV_HUGEPAGE); +#endif + bzero(ShmCommBuf,bytes); + WorldShmCommBufs[0] = ShmCommBuf; + _ShmAllocBytes=bytes; + _ShmAlloc=1; +}; + + //////////////////////////////////////////////////////// + // Global shared functionality finished + // Now move to per communicator functionality + //////////////////////////////////////////////////////// +void SharedMemory::SetCommunicator(Grid_MPI_Comm comm) +{ + assert(GlobalSharedMemory::ShmAlloc()==1); + ShmRanks.resize(1); + ShmCommBufs.resize(1); + ShmRanks[0] = 0; + ShmRank = 0; + ShmSize = 1; + ////////////////////////////////////////////////////////////////////// + // Map ShmRank to WorldShmRank and use the right buffer + ////////////////////////////////////////////////////////////////////// + ShmCommBufs[0] = GlobalSharedMemory::WorldShmCommBufs[0]; + heap_size = GlobalSharedMemory::ShmAllocBytes(); + ShmBufferFreeAll(); + return; +} +////////////////////////////////////////////////////////////////// +// On node barrier +////////////////////////////////////////////////////////////////// +void SharedMemory::ShmBarrier(void){ return ; } + +////////////////////////////////////////////////////////////////////////////////////////////////////////// +// Test the shared memory is working +////////////////////////////////////////////////////////////////////////////////////////////////////////// +void SharedMemory::SharedMemoryTest(void) { return; } + +void *SharedMemory::ShmBuffer(int rank) +{ + return NULL; +} +void *SharedMemory::ShmBufferTranslate(int rank,void * local_p) +{ + return NULL; +} + +} diff --git a/lib/lattice/Lattice_rng.h b/lib/lattice/Lattice_rng.h index 6dc50fd2..edf9dd23 100644 --- a/lib/lattice/Lattice_rng.h +++ b/lib/lattice/Lattice_rng.h @@ -77,9 +77,6 @@ namespace Grid { // merge of April 11 2017 -//<<<<<<< HEAD - - // this function is necessary for the LS vectorised field inline int RNGfillable_general(GridBase *coarse,GridBase *fine) { @@ -91,7 +88,6 @@ namespace Grid { // all further divisions are local for(int d=0;d_processors[d]==1); for(int d=0;d_processors[d] == fine->_processors[d+lowerdims]); - // then divide the number of local sites // check that the total number of sims agree, meanse the iSites are the same @@ -102,27 +98,6 @@ namespace Grid { return fine->lSites() / coarse->lSites(); } - - /* - // Wrap seed_seq to give common interface with random_device - class fixedSeed { - public: - typedef std::seed_seq::result_type result_type; - std::seed_seq src; - - fixedSeed(const std::vector &seeds) : src(seeds.begin(),seeds.end()) {}; - - result_type operator () (void){ - std::vector list(1); - src.generate(list.begin(),list.end()); - return list[0]; - } - - }; - -======= ->>>>>>> develop - */ // real scalars are one component template @@ -171,7 +146,7 @@ namespace Grid { // support for parallel init /////////////////////// #ifdef RNG_FAST_DISCARD - static void Skip(RngEngine &eng) + static void Skip(RngEngine &eng,uint64_t site) { ///////////////////////////////////////////////////////////////////////////////////// // Skip by 2^40 elements between successive lattice sites @@ -184,8 +159,11 @@ namespace Grid { // and margin of safety is orders of magnitude. // We could hack Sitmo to skip in the higher order words of state if necessary ///////////////////////////////////////////////////////////////////////////////////// - uint64_t skip = 0x1; skip = skip<<40; + // uint64_t skip = site+1; // Old init Skipped then drew. Checked compat with faster init + uint64_t skip = site; + skip = skip<<40; eng.discard(skip); + // std::cout << " Engine " < gcoor; - int rank,o_idx,i_idx; // Everybody loops over global volume. - for(int gidx=0;gidx<_grid->_gsites;gidx++){ - - Skip(master_engine); // Skip to next RNG sequence + parallel_for(int gidx=0;gidx<_grid->_gsites;gidx++){ // Where is it? + int rank,o_idx,i_idx; + std::vector gcoor; + _grid->GlobalIndexToGlobalCoor(gidx,gcoor); _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); @@ -423,6 +400,7 @@ namespace Grid { if( rank == _grid->ThisRank() ){ int l_idx=generator_idx(o_idx,i_idx); _generators[l_idx] = master_engine; + Skip(_generators[l_idx],gidx); // Skip to next RNG sequence } } diff --git a/lib/lattice/Lattice_transfer.h b/lib/lattice/Lattice_transfer.h index cbf31f86..32c15d22 100644 --- a/lib/lattice/Lattice_transfer.h +++ b/lib/lattice/Lattice_transfer.h @@ -50,26 +50,22 @@ inline void subdivides(GridBase *coarse,GridBase *fine) //////////////////////////////////////////////////////////////////////////////////////////// template inline void pickCheckerboard(int cb,Lattice &half,const Lattice &full){ half.checkerboard = cb; - int ssh=0; - //parallel_for - for(int ss=0;ssoSites();ss++){ - std::vector coor; + + parallel_for(int ss=0;ssoSites();ss++){ int cbos; - + std::vector coor; full._grid->oCoorFromOindex(coor,ss); cbos=half._grid->CheckerBoard(coor); if (cbos==cb) { + int ssh=half._grid->oIndex(coor); half._odata[ssh] = full._odata[ss]; - ssh++; } } } template inline void setCheckerboard(Lattice &full,const Lattice &half){ int cb = half.checkerboard; - int ssh=0; - //parallel_for - for(int ss=0;ssoSites();ss++){ + parallel_for(int ss=0;ssoSites();ss++){ std::vector coor; int cbos; @@ -77,8 +73,8 @@ inline void subdivides(GridBase *coarse,GridBase *fine) cbos=half._grid->CheckerBoard(coor); if (cbos==cb) { + int ssh=half._grid->oIndex(coor); full._odata[ss]=half._odata[ssh]; - ssh++; } } } @@ -109,8 +105,8 @@ inline void blockProject(Lattice > &coarseData, coarseData=zero; - // Loop with a cache friendly loop ordering - for(int sf=0;sfoSites();sf++){ + // Loop over coars parallel, and then loop over fine associated with coarse. + parallel_for(int sf=0;sfoSites();sf++){ int sc; std::vector coor_c(_ndimension); @@ -119,8 +115,9 @@ inline void blockProject(Lattice > &coarseData, for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); +PARALLEL_CRITICAL for(int i=0;i &fineZ, GridBase * coarse= coarseA._grid; fineZ.checkerboard=fineX.checkerboard; + assert(fineX.checkerboard==fineY.checkerboard); subdivides(coarse,fine); // require they map conformable(fineX,fineY); conformable(fineX,fineZ); @@ -180,9 +178,10 @@ template GridBase *coarse(CoarseInner._grid); GridBase *fine (fineX._grid); - Lattice fine_inner(fine); + Lattice fine_inner(fine); fine_inner.checkerboard = fineX.checkerboard; Lattice coarse_inner(coarse); + // Precision promotion? fine_inner = localInnerProduct(fineX,fineY); blockSum(coarse_inner,fine_inner); parallel_for(int ss=0;ssoSites();ss++){ @@ -193,7 +192,7 @@ template inline void blockNormalise(Lattice &ip,Lattice &fineX) { GridBase *coarse = ip._grid; - Lattice zz(fineX._grid); zz=zero; + Lattice zz(fineX._grid); zz=zero; zz.checkerboard=fineX.checkerboard; blockInnerProduct(ip,fineX,fineX); ip = pow(ip,-0.5); blockZAXPY(fineX,ip,fineX,zz); @@ -216,19 +215,25 @@ inline void blockSum(Lattice &coarseData,const Lattice &fineData) block_r[d] = fine->_rdimensions[d] / coarse->_rdimensions[d]; } + // Turn this around to loop threaded over sc and interior loop + // over sf would thread better coarseData=zero; - for(int sf=0;sfoSites();sf++){ - + parallel_region { + int sc; std::vector coor_c(_ndimension); std::vector coor_f(_ndimension); - Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); - for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; - Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); - - coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf]; + parallel_for_internal(int sf=0;sfoSites();sf++){ + + Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); + for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; + Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); + +PARALLEL_CRITICAL + coarseData._odata[sc]=coarseData._odata[sc]+fineData._odata[sf]; + } } return; } @@ -238,7 +243,7 @@ inline void blockPick(GridBase *coarse,const Lattice &unpicked,Lattice zz(fine); + Lattice zz(fine); zz.checkerboard = unpicked.checkerboard; Lattice > fcoor(fine); zz = zero; @@ -303,20 +308,21 @@ inline void blockPromote(const Lattice > &coarseData, } // Loop with a cache friendly loop ordering - for(int sf=0;sfoSites();sf++){ - + parallel_region { int sc; std::vector coor_c(_ndimension); std::vector coor_f(_ndimension); - Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); - for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; - Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); - - for(int i=0;ioSites();sf++){ + Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions); + for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d]; + Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions); + + for(int i=0;i &out, const Lattice &in){ merge(out._odata[out_oidx], ptrs, 0); } } - + +//////////////////////////////////////////////////////////////////////////////// +// Communicate between grids +//////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SIMPLE CASE: +/////////////////////////////////////////////////////////////////////////////////////////////////////////// +// +// Mesh of nodes (2x2) ; subdivide to 1x1 subdivisions +// +// Lex ord: +// N0 va0 vb0 vc0 vd0 N1 va1 vb1 vc1 vd1 +// N2 va2 vb2 vc2 vd2 N3 va3 vb3 vc3 vd3 +// +// Ratio = full[dim] / split[dim] +// +// For each dimension do an all to all; get Nvec -> Nvec / ratio +// Ldim -> Ldim * ratio +// LocalVol -> LocalVol * ratio +// full AllToAll(0) +// N0 va0 vb0 va1 vb1 N1 vc0 vd0 vc1 vd1 +// N2 va2 vb2 va3 vb3 N3 vc2 vd2 vc3 vd3 +// +// REARRANGE +// N0 va01 vb01 N1 vc01 vd01 +// N2 va23 vb23 N3 vc23 vd23 +// +// full AllToAll(1) // Not what is wanted. FIXME +// N0 va01 va23 N1 vc01 vc23 +// N2 vb01 vb23 N3 vd01 vd23 +// +// REARRANGE +// N0 va0123 N1 vc0123 +// N2 vb0123 N3 vd0123 +// +// Must also rearrange data to get into the NEW lex order of grid at each stage. Some kind of "insert/extract". +// NB: Easiest to programme if keep in lex order. +/* + * Let chunk = (fvol*nvec)/sP be size of a chunk. ( Divide lexico vol * nvec into fP/sP = M chunks ) + * + * 2nd A2A (over sP nodes; subdivide the fP into sP chunks of M) + * + * node 0 1st chunk of node 0M..(1M-1); 2nd chunk of node 0M..(1M-1).. data chunk x M x sP = fL / sP * M * sP = fL * M growth + * node 1 1st chunk of node 1M..(2M-1); 2nd chunk of node 1M..(2M-1).. + * node 2 1st chunk of node 2M..(3M-1); 2nd chunk of node 2M..(3M-1).. + * node 3 1st chunk of node 3M..(3M-1); 2nd chunk of node 2M..(3M-1).. + * etc... + */ +template +void Grid_split(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + uint64_t lsites = full_grid->lSites(); + uint64_t sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + + for(int v=0;v ldims = full_grid->_ldimensions; + + for(int d=ndim-1;d>=0;d--){ + + if ( ratio[d] != 1 ) { + + full_grid ->AllToAll(d,alldata,tmpdata); + if ( split_grid->_processors[d] > 1 ) { + alldata=tmpdata; + split_grid->AllToAll(d,alldata,tmpdata); + } + + auto rdims = ldims; + auto M = ratio[d]; + auto rsites= lsites*M;// increases rsites by M + nvec /= M; // Reduce nvec by subdivision factor + rdims[d] *= M; // increase local dim by same factor + + int sP = split_grid->_processors[d]; + int fP = full_grid->_processors[d]; + + int fvol = lsites; + + int chunk = (nvec*fvol)/sP; assert(chunk*sP == nvec*fvol); + + // Loop over reordered data post A2A + parallel_for(int c=0;c coor(ndim); + for(int m=0;m +void Grid_split(Lattice &full,Lattice & split) +{ + int nvector = full._grid->_Nprocessors / split._grid->_Nprocessors; + std::vector > full_v(nvector,full._grid); + for(int n=0;n +void Grid_unsplit(std::vector > & full,Lattice & split) +{ + typedef typename Vobj::scalar_object Sobj; + + int full_vecs = full.size(); + + assert(full_vecs>=1); + + GridBase * full_grid = full[0]._grid; + GridBase *split_grid = split._grid; + + int ndim = full_grid->_ndimension; + int full_nproc = full_grid->_Nprocessors; + int split_nproc =split_grid->_Nprocessors; + + //////////////////////////////// + // Checkerboard management + //////////////////////////////// + int cb = full[0].checkerboard; + split.checkerboard = cb; + + ////////////////////////////// + // Checks + ////////////////////////////// + assert(full_grid->_ndimension==split_grid->_ndimension); + for(int n=0;n_gdimensions[d]==split._grid->_gdimensions[d]); + assert(full[n]._grid->_fdimensions[d]==split._grid->_fdimensions[d]); + } + } + + int nvector =full_nproc/split_nproc; + assert(nvector*split_nproc==full_nproc); + assert(nvector == full_vecs); + + std::vector ratio(ndim); + for(int d=0;d_processors[d]/ split_grid->_processors[d]; + } + + uint64_t lsites = full_grid->lSites(); + uint64_t sz = lsites * nvector; + std::vector tmpdata(sz); + std::vector alldata(sz); + std::vector scalardata(lsites); + + unvectorizeToLexOrdArray(alldata,split); + + ///////////////////////////////////////////////////////////////// + // Start from split grid and work towards full grid + ///////////////////////////////////////////////////////////////// + + int nvec = 1; + uint64_t rsites = split_grid->lSites(); + std::vector rdims = split_grid->_ldimensions; + + for(int d=0;d_processors[d]; + int fP = full_grid->_processors[d]; + + auto ldims = rdims; ldims[d] /= M; // Decrease local dims by same factor + auto lsites= rsites/M; // Decreases rsites by M + + int fvol = lsites; + int chunk = (nvec*fvol)/sP; assert(chunk*sP == nvec*fvol); + + { + // Loop over reordered data post A2A + parallel_for(int c=0;c coor(ndim); + for(int m=0;m_processors[d] > 1 ) { + split_grid->AllToAll(d,tmpdata,alldata); + tmpdata=alldata; + } + full_grid ->AllToAll(d,tmpdata,alldata); + rdims[d]/= M; + rsites /= M; + nvec *= M; // Increase nvec by subdivision factor + } + } + + lsites = full_grid->lSites(); + for(int v=0;v &logstreams) { GridLogError.Active(0); diff --git a/lib/log/Log.h b/lib/log/Log.h index 74d080bb..011a7250 100644 --- a/lib/log/Log.h +++ b/lib/log/Log.h @@ -85,12 +85,16 @@ class Logger { protected: Colours &Painter; int active; + int timing_mode; + int topWidth{-1}; static int timestamp; std::string name, topName; std::string COLOUR; public: - static GridStopWatch StopWatch; + static GridStopWatch GlobalStopWatch; + GridStopWatch LocalStopWatch; + GridStopWatch *StopWatch; static std::ostream devnull; std::string background() {return Painter.colour["NORMAL"];} @@ -101,22 +105,44 @@ public: name(nm), topName(topNm), Painter(col_class), - COLOUR(col) {} ; + timing_mode(0), + COLOUR(col) + { + StopWatch = & GlobalStopWatch; + }; void Active(int on) {active = on;}; int isActive(void) {return active;}; static void Timestamp(int on) {timestamp = on;}; - + void Reset(void) { + StopWatch->Reset(); + StopWatch->Start(); + } + void TimingMode(int on) { + timing_mode = on; + if(on) { + StopWatch = &LocalStopWatch; + Reset(); + } + } + void setTopWidth(const int w) {topWidth = w;} + friend std::ostream& operator<< (std::ostream& stream, Logger& log){ if ( log.active ) { - stream << log.background()<< std::setw(8) << std::left << log.topName << log.background()<< " : "; - stream << log.colour() << std::setw(10) << std::left << log.name << log.background() << " : "; + stream << log.background()<< std::left; + if (log.topWidth > 0) + { + stream << std::setw(log.topWidth); + } + stream << log.topName << log.background()<< " : "; + stream << log.colour() << std::left << log.name << log.background() << " : "; if ( log.timestamp ) { - StopWatch.Stop(); - GridTime now = StopWatch.Elapsed(); - StopWatch.Start(); - stream << log.evidence()<< now << log.background() << " : " ; + log.StopWatch->Stop(); + GridTime now = log.StopWatch->Elapsed(); + if ( log.timing_mode==1 ) log.StopWatch->Reset(); + log.StopWatch->Start(); + stream << log.evidence()<< std::setw(6)< &logstreams); +extern GridLogger GridLogIRL; +extern GridLogger GridLogSolver; extern GridLogger GridLogError; extern GridLogger GridLogWarning; extern GridLogger GridLogMessage; diff --git a/lib/parallelIO/BinaryIO.h b/lib/parallelIO/BinaryIO.h index d14f3fe2..b40a75af 100644 --- a/lib/parallelIO/BinaryIO.h +++ b/lib/parallelIO/BinaryIO.h @@ -261,7 +261,7 @@ class BinaryIO { GridBase *grid, std::vector &iodata, std::string file, - int offset, + Integer offset, const std::string &format, int control, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -356,7 +356,7 @@ class BinaryIO { if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) { #ifdef USE_MPI_IO - std::cout<< GridLogMessage<< "MPI read I/O "<< file<< std::endl; + std::cout<< GridLogMessage<<"IOobject: MPI read I/O "<< file<< std::endl; ierr=MPI_File_open(grid->communicator,(char *) file.c_str(), MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); assert(ierr==0); ierr=MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL); assert(ierr==0); ierr=MPI_File_read_all(fh, &iodata[0], 1, localArray, &status); assert(ierr==0); @@ -367,7 +367,7 @@ class BinaryIO { assert(0); #endif } else { - std::cout << GridLogMessage << "C++ read I/O " << file << " : " + std::cout << GridLogMessage <<"IOobject: C++ read I/O " << file << " : " << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ifstream fin; fin.open(file, std::ios::binary | std::ios::in); @@ -413,9 +413,9 @@ class BinaryIO { timer.Start(); if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) { #ifdef USE_MPI_IO - std::cout << GridLogMessage << "MPI write I/O " << file << std::endl; + std::cout << GridLogMessage <<"IOobject: MPI write I/O " << file << std::endl; ierr = MPI_File_open(grid->communicator, (char *)file.c_str(), MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh); - std::cout << GridLogMessage << "Checking for errors" << std::endl; + // std::cout << GridLogMessage << "Checking for errors" << std::endl; if (ierr != MPI_SUCCESS) { char error_string[BUFSIZ]; @@ -444,48 +444,56 @@ class BinaryIO { assert(0); #endif } else { + + std::cout << GridLogMessage << "IOobject: C++ write I/O " << file << " : " + << iodata.size() * sizeof(fobj) << " bytes" << std::endl; std::ofstream fout; - fout.exceptions ( std::fstream::failbit | std::fstream::badbit ); - try { - fout.open(file,std::ios::binary|std::ios::out|std::ios::in); - } catch (const std::fstream::failure& exc) { - std::cout << GridLogError << "Error in opening the file " << file << " for output" < &Umu, std::string file, munger munge, - int offset, + Integer offset, const std::string &format, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -552,7 +560,7 @@ class BinaryIO { static inline void writeLatticeObject(Lattice &Umu, std::string file, munger munge, - int offset, + Integer offset, const std::string &format, uint32_t &nersc_csum, uint32_t &scidac_csuma, @@ -589,7 +597,7 @@ class BinaryIO { static inline void readRNG(GridSerialRNG &serial, GridParallelRNG ¶llel, std::string file, - int offset, + Integer offset, uint32_t &nersc_csum, uint32_t &scidac_csuma, uint32_t &scidac_csumb) @@ -651,7 +659,7 @@ class BinaryIO { static inline void writeRNG(GridSerialRNG &serial, GridParallelRNG ¶llel, std::string file, - int offset, + Integer offset, uint32_t &nersc_csum, uint32_t &scidac_csuma, uint32_t &scidac_csumb) diff --git a/lib/parallelIO/IldgIO.h b/lib/parallelIO/IldgIO.h index ba71153d..b86e250f 100644 --- a/lib/parallelIO/IldgIO.h +++ b/lib/parallelIO/IldgIO.h @@ -147,7 +147,7 @@ namespace QCD { _scidacRecord = sr; - std::cout << GridLogMessage << "Build SciDAC datatype " < munge; BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); @@ -237,7 +237,7 @@ class GridLimeReader : public BinaryIO { ///////////////////////////////////////////// // Verify checksums ///////////////////////////////////////////// - scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb); + assert(scidacChecksumVerify(scidacChecksum_,scidac_csuma,scidac_csumb)==1); return; } } @@ -253,16 +253,13 @@ class GridLimeReader : public BinaryIO { while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) { // std::cout << GridLogMessage<< " readLimeObject seeking "<< record_name <<" found record :" < xmlc(nbytes+1,'\0'); limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR); - // std::cout << GridLogMessage<< " readLimeObject matches XML " << &xmlc[0] <=0); err=limeWriterCloseRecord(LimeW); assert(err>=0); limeDestroyHeader(h); - // std::cout << " File offset is now"<_gsites; createLimeRecordHeader(record_name, 0, 0, PayloadSize); - // std::cout << "W sizeof(sobj)" <_gsites<(); BinarySimpleMunger munge; BinaryIO::writeLatticeObject(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); + // fseek(File,0,SEEK_END); offset = ftello(File);std::cout << " offset now "<=0); + //////////////////////////////////////// // Write checksum element, propagaing forward from the BinaryIO // Always pair a checksum with a binary object, and close message @@ -382,7 +380,7 @@ class GridLimeWriter : public BinaryIO { std::stringstream streamb; streamb << std::hex << scidac_csumb; checksum.suma= streama.str(); checksum.sumb= streamb.str(); - std::cout << GridLogMessage<<" writing scidac checksums "< xmlc(nbytes+1,'\0'); limeReaderReadData((void *)&xmlc[0], &nbytes, LimeR); - std::cout << GridLogMessage<< "Non binary record :" < munge; BinaryIO::readLatticeObject< vobj, dobj >(Umu, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb); diff --git a/lib/qcd/action/fermion/DomainWallEOFAFermion.cc b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc index dd8a500d..37ab5fa6 100644 --- a/lib/qcd/action/fermion/DomainWallEOFAFermion.cc +++ b/lib/qcd/action/fermion/DomainWallEOFAFermion.cc @@ -61,10 +61,10 @@ namespace QCD { } /*************************************************************** - /* Additional EOFA operators only called outside the inverter. - /* Since speed is not essential, simple axpby-style - /* implementations should be fine. - /***************************************************************/ + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ template void DomainWallEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) { @@ -116,8 +116,8 @@ namespace QCD { } /******************************************************************** - /* Performance critical fermion operators called inside the inverter - /********************************************************************/ + * Performance critical fermion operators called inside the inverter + ********************************************************************/ template void DomainWallEOFAFermion::M5D(const FermionField& psi, FermionField& chi) diff --git a/lib/qcd/action/fermion/FermionOperator.h b/lib/qcd/action/fermion/FermionOperator.h index 144b70f6..1d395d53 100644 --- a/lib/qcd/action/fermion/FermionOperator.h +++ b/lib/qcd/action/fermion/FermionOperator.h @@ -47,6 +47,7 @@ namespace Grid { INHERIT_IMPL_TYPES(Impl); FermionOperator(const ImplParams &p= ImplParams()) : Impl(p) {}; + virtual ~FermionOperator(void) = default; virtual FermionField &tmp(void) = 0; diff --git a/lib/qcd/action/fermion/MobiusEOFAFermion.cc b/lib/qcd/action/fermion/MobiusEOFAFermion.cc index 085fa988..0344afbf 100644 --- a/lib/qcd/action/fermion/MobiusEOFAFermion.cc +++ b/lib/qcd/action/fermion/MobiusEOFAFermion.cc @@ -77,11 +77,11 @@ namespace QCD { } } - /*************************************************************** - /* Additional EOFA operators only called outside the inverter. - /* Since speed is not essential, simple axpby-style - /* implementations should be fine. - /***************************************************************/ + /**************************************************************** + * Additional EOFA operators only called outside the inverter. + * Since speed is not essential, simple axpby-style + * implementations should be fine. + ***************************************************************/ template void MobiusEOFAFermion::Omega(const FermionField& psi, FermionField& Din, int sign, int dag) { @@ -194,8 +194,8 @@ namespace QCD { } /******************************************************************** - /* Performance critical fermion operators called inside the inverter - /********************************************************************/ + * Performance critical fermion operators called inside the inverter + ********************************************************************/ template void MobiusEOFAFermion::M5D(const FermionField& psi, FermionField& chi) diff --git a/lib/qcd/action/fermion/WilsonCompressor.h b/lib/qcd/action/fermion/WilsonCompressor.h index cc5c3c63..b47700ac 100644 --- a/lib/qcd/action/fermion/WilsonCompressor.h +++ b/lib/qcd/action/fermion/WilsonCompressor.h @@ -265,7 +265,6 @@ public: if ( timer3 ) std::cout << GridLogMessage << " timer3 (commsMergeShm) " < same_node; std::vector surface_list; diff --git a/lib/qcd/action/scalar/ScalarImpl.h b/lib/qcd/action/scalar/ScalarImpl.h index f85ab840..55f5049d 100644 --- a/lib/qcd/action/scalar/ScalarImpl.h +++ b/lib/qcd/action/scalar/ScalarImpl.h @@ -16,12 +16,12 @@ class ScalarImplTypes { typedef iImplField SiteField; typedef SiteField SitePropagator; typedef SiteField SiteComplex; - + typedef Lattice Field; typedef Field ComplexField; typedef Field FermionField; typedef Field PropagatorField; - + static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){ gaussian(pRNG, P); } @@ -47,54 +47,60 @@ class ScalarImplTypes { static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) { U = 1.0; } - + static void MomentumSpacePropagator(Field &out, RealD m) { GridBase *grid = out._grid; Field kmu(grid), one(grid); const unsigned int nd = grid->_ndimension; std::vector &l = grid->_fdimensions; - + one = Complex(1.0,0.0); out = m*m; for(int mu = 0; mu < nd; mu++) { Real twoPiL = M_PI*2./l[mu]; - + LatticeCoordinate(kmu,mu); kmu = 2.*sin(.5*twoPiL*kmu); out = out + kmu*kmu; } out = one/out; } - + static void FreePropagator(const Field &in, Field &out, const Field &momKernel) { FFT fft((GridCartesian *)in._grid); Field inFT(in._grid); - + fft.FFT_all_dim(inFT, in, FFT::forward); inFT = inFT*momKernel; fft.FFT_all_dim(out, inFT, FFT::backward); } - + static void FreePropagator(const Field &in, Field &out, RealD m) { Field momKernel(in._grid); - + MomentumSpacePropagator(momKernel, m); FreePropagator(in, out, momKernel); } - + }; + #ifdef USE_FFT_ACCELERATION + #ifndef FFT_MASS + #error "USE_FFT_ACCELERATION is defined but not FFT_MASS" + #endif + #endif + template class ScalarAdjMatrixImplTypes { public: typedef S Simd; typedef QCD::SU Group; - + template using iImplField = iScalar>>; template @@ -103,24 +109,119 @@ class ScalarImplTypes { typedef iImplField SiteField; typedef SiteField SitePropagator; typedef iImplComplex SiteComplex; - + typedef Lattice Field; typedef Lattice ComplexField; typedef Field FermionField; typedef Field PropagatorField; - static inline void generate_momenta(Field& P, GridParallelRNG& pRNG) { + static void MomentaSquare(ComplexField &out) + { + GridBase *grid = out._grid; + const std::vector &l = grid->FullDimensions(); + ComplexField kmu(grid); + + for (int mu = 0; mu < grid->Nd(); mu++) + { + Real twoPiL = M_PI * 2.0 / l[mu]; + LatticeCoordinate(kmu, mu); + kmu = 2.0 * sin(0.5 * twoPiL * kmu); + out += kmu * kmu; + } + } + + static void MomentumSpacePropagator(ComplexField &out, RealD m) + { + GridBase *grid = out._grid; + ComplexField one(grid); + one = Complex(1.0, 0.0); + out = m * m; + MomentaSquare(out); + out = one / out; + } + + static inline void generate_momenta(Field &P, GridParallelRNG &pRNG) + { +#ifndef USE_FFT_ACCELERATION Group::GaussianFundamentalLieAlgebraMatrix(pRNG, P); +#else + + Field Pgaussian(P._grid), Pp(P._grid); + ComplexField p2(P._grid); p2 = zero; + RealD M = FFT_MASS; + + Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Pgaussian); + + FFT theFFT((GridCartesian*)P._grid); + theFFT.FFT_all_dim(Pp, Pgaussian, FFT::forward); + MomentaSquare(p2); + p2 += M * M; + p2 = sqrt(p2); + Pp *= p2; + theFFT.FFT_all_dim(P, Pp, FFT::backward); + +#endif //USE_FFT_ACCELERATION } static inline Field projectForce(Field& P) {return P;} - static inline void update_field(Field& P, Field& U, double ep) { - U += P*ep; + static inline void update_field(Field &P, Field &U, double ep) + { +#ifndef USE_FFT_ACCELERATION + double t0=usecond(); + U += P * ep; + double t1=usecond(); + double total_time = (t1-t0)/1e6; + std::cout << GridLogIntegrator << "Total time for updating field (s) : " << total_time << std::endl; +#else + // FFT transform P(x) -> P(p) + // divide by (M^2+p^2) M external parameter (how to pass?) + // P'(p) = P(p)/(M^2+p^2) + // Transform back -> P'(x) + // U += P'(x)*ep + + Field Pp(U._grid), P_FFT(U._grid); + static ComplexField p2(U._grid); + RealD M = FFT_MASS; + + FFT theFFT((GridCartesian*)U._grid); + theFFT.FFT_all_dim(Pp, P, FFT::forward); + + static bool first_call = true; + if (first_call) + { + // avoid recomputing + MomentumSpacePropagator(p2, M); + first_call = false; + } + Pp *= p2; + theFFT.FFT_all_dim(P_FFT, Pp, FFT::backward); + U += P_FFT * ep; + +#endif //USE_FFT_ACCELERATION } - static inline RealD FieldSquareNorm(Field& U) { - return (TensorRemove(sum(trace(U*U))).real()); + static inline RealD FieldSquareNorm(Field &U) + { +#ifndef USE_FFT_ACCELERATION + return (TensorRemove(sum(trace(U * U))).real()); +#else + // In case of Fourier acceleration we have to: + // compute U(p)*U(p)/(M^2+p^2)) Parseval theorem + // 1 FFT needed U(x) -> U(p) + // M to be passed + + FFT theFFT((GridCartesian*)U._grid); + Field Up(U._grid); + + theFFT.FFT_all_dim(Up, U, FFT::forward); + RealD M = FFT_MASS; + ComplexField p2(U._grid); + MomentumSpacePropagator(p2, M); + Field Up2 = Up * p2; + // from the definition of the DFT we need to divide by the volume + return (-TensorRemove(sum(trace(adj(Up) * Up2))).real() / U._grid->gSites()); +#endif //USE_FFT_ACCELERATION } static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) { @@ -146,7 +247,7 @@ class ScalarImplTypes { typedef ScalarImplTypes ScalarImplCR; typedef ScalarImplTypes ScalarImplCF; typedef ScalarImplTypes ScalarImplCD; - + // Hardcoding here the size of the matrices typedef ScalarAdjMatrixImplTypes ScalarAdjImplR; typedef ScalarAdjMatrixImplTypes ScalarAdjImplF; @@ -155,7 +256,7 @@ class ScalarImplTypes { template using ScalarNxNAdjImplR = ScalarAdjMatrixImplTypes; template using ScalarNxNAdjImplF = ScalarAdjMatrixImplTypes; template using ScalarNxNAdjImplD = ScalarAdjMatrixImplTypes; - + //} } diff --git a/lib/qcd/action/scalar/ScalarInteractionAction.h b/lib/qcd/action/scalar/ScalarInteractionAction.h index 4d189352..8738b647 100644 --- a/lib/qcd/action/scalar/ScalarInteractionAction.h +++ b/lib/qcd/action/scalar/ScalarInteractionAction.h @@ -30,119 +30,179 @@ directory #ifndef SCALAR_INT_ACTION_H #define SCALAR_INT_ACTION_H - // Note: this action can completely absorb the ScalarAction for real float fields // use the scalarObjs to generalise the structure -namespace Grid { - // FIXME drop the QCD namespace everywhere here +namespace Grid +{ +// FIXME drop the QCD namespace everywhere here - template - class ScalarInteractionAction : public QCD::Action { - public: - INHERIT_FIELD_TYPES(Impl); - private: - RealD mass_square; - RealD lambda; +template +class ScalarInteractionAction : public QCD::Action +{ +public: + INHERIT_FIELD_TYPES(Impl); +private: + RealD mass_square; + RealD lambda; + RealD g; + const unsigned int N = Impl::Group::Dimension; - typedef typename Field::vector_object vobj; - typedef CartesianStencil Stencil; + typedef typename Field::vector_object vobj; + typedef CartesianStencil Stencil; - SimpleCompressor compressor; - int npoint = 2*Ndim; - std::vector directions;// = {0,1,2,3,0,1,2,3}; // forcing 4 dimensions - std::vector displacements;// = {1,1,1,1, -1,-1,-1,-1}; + SimpleCompressor compressor; + int npoint = 2 * Ndim; + std::vector directions; // + std::vector displacements; // - - public: - - ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2*Ndim,0), directions(2*Ndim,0){ - for (int mu = 0 ; mu < Ndim; mu++){ - directions[mu] = mu; directions[mu+Ndim] = mu; - displacements[mu] = 1; displacements[mu+Ndim] = -1; - } +public: + ScalarInteractionAction(RealD ms, RealD l, RealD gval) : mass_square(ms), lambda(l), g(gval), displacements(2 * Ndim, 0), directions(2 * Ndim, 0) + { + for (int mu = 0; mu < Ndim; mu++) + { + directions[mu] = mu; + directions[mu + Ndim] = mu; + displacements[mu] = 1; + displacements[mu + Ndim] = -1; } + } - virtual std::string LogParameters() { - std::stringstream sstream; - sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; - sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; - return sstream.str(); - } + virtual std::string LogParameters() + { + std::stringstream sstream; + sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl; + sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl; + sstream << GridLogMessage << "[ScalarAction] g : " << g << std::endl; + return sstream.str(); + } - virtual std::string action_name() {return "ScalarAction";} + virtual std::string action_name() { return "ScalarAction"; } - virtual void refresh(const Field &U, GridParallelRNG &pRNG) {} + virtual void refresh(const Field &U, GridParallelRNG &pRNG) {} - virtual RealD S(const Field &p) { - assert(p._grid->Nd() == Ndim); - static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); - phiStencil.HaloExchange(p, compressor); - Field action(p._grid), pshift(p._grid), phisquared(p._grid); - phisquared = p*p; - action = (2.0*Ndim + mass_square)*phisquared - lambda/24.*phisquared*phisquared; - for (int mu = 0; mu < Ndim; mu++) { - // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils - parallel_for (int i = 0; i < p._grid->oSites(); i++) { - int permute_type; - StencilEntry *SE; - vobj temp2; - const vobj *temp, *t_p; - - SE = phiStencil.GetEntry(permute_type, mu, i); - t_p = &p._odata[i]; - if ( SE->_is_local ) { - temp = &p._odata[SE->_offset]; - if ( SE->_permute ) { - permute(temp2, *temp, permute_type); - action._odata[i] -= temp2*(*t_p) + (*t_p)*temp2; - } else { - action._odata[i] -= (*temp)*(*t_p) + (*t_p)*(*temp); - } - } else { - action._odata[i] -= phiStencil.CommBuf()[SE->_offset]*(*t_p) + (*t_p)*phiStencil.CommBuf()[SE->_offset]; - } - } - // action -= pshift*p + p*pshift; - } - // NB the trace in the algebra is normalised to 1/2 - // minus sign coming from the antihermitian fields - return -(TensorRemove(sum(trace(action)))).real(); - }; - - virtual void deriv(const Field &p, Field &force) { - assert(p._grid->Nd() == Ndim); - force = (2.0*Ndim + mass_square)*p - lambda/12.*p*p*p; - // move this outside - static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); - phiStencil.HaloExchange(p, compressor); - - //for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1); - for (int point = 0; point < npoint; point++) { - parallel_for (int i = 0; i < p._grid->oSites(); i++) { - const vobj *temp; - vobj temp2; - int permute_type; - StencilEntry *SE; - SE = phiStencil.GetEntry(permute_type, point, i); - - if ( SE->_is_local ) { - temp = &p._odata[SE->_offset]; - if ( SE->_permute ) { - permute(temp2, *temp, permute_type); - force._odata[i] -= temp2; - } else { - force._odata[i] -= *temp; - } - } else { - force._odata[i] -= phiStencil.CommBuf()[SE->_offset]; - } - } + virtual RealD S(const Field &p) + { + assert(p._grid->Nd() == Ndim); + static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); + phiStencil.HaloExchange(p, compressor); + Field action(p._grid), pshift(p._grid), phisquared(p._grid); + phisquared = p * p; + action = (2.0 * Ndim + mass_square) * phisquared - lambda * phisquared * phisquared; + for (int mu = 0; mu < Ndim; mu++) + { + // pshift = Cshift(p, mu, +1); // not efficient, implement with stencils + parallel_for(int i = 0; i < p._grid->oSites(); i++) + { + int permute_type; + StencilEntry *SE; + vobj temp2; + const vobj *temp, *t_p; + + SE = phiStencil.GetEntry(permute_type, mu, i); + t_p = &p._odata[i]; + if (SE->_is_local) + { + temp = &p._odata[SE->_offset]; + if (SE->_permute) + { + permute(temp2, *temp, permute_type); + action._odata[i] -= temp2 * (*t_p) + (*t_p) * temp2; + } + else + { + action._odata[i] -= (*temp) * (*t_p) + (*t_p) * (*temp); + } + } + else + { + action._odata[i] -= phiStencil.CommBuf()[SE->_offset] * (*t_p) + (*t_p) * phiStencil.CommBuf()[SE->_offset]; + } } + // action -= pshift*p + p*pshift; } + // NB the trace in the algebra is normalised to 1/2 + // minus sign coming from the antihermitian fields + return -(TensorRemove(sum(trace(action)))).real() * N / g; }; - -} // namespace Grid -#endif // SCALAR_INT_ACTION_H + virtual void deriv(const Field &p, Field &force) + { + double t0 = usecond(); + assert(p._grid->Nd() == Ndim); + force = (2. * Ndim + mass_square) * p - 2. * lambda * p * p * p; + double interm_t = usecond(); + + // move this outside + static Stencil phiStencil(p._grid, npoint, 0, directions, displacements); + + phiStencil.HaloExchange(p, compressor); + double halo_t = usecond(); + int chunk = 128; + //for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1); + + // inverting the order of the loops slows down the code(! g++ 7) + // cannot try to reduce the number of force writes by factor npoint... + // use cache blocking + for (int point = 0; point < npoint; point++) + { + +#pragma omp parallel +{ + int permute_type; + StencilEntry *SE; + const vobj *temp; + +#pragma omp for schedule(static, chunk) + for (int i = 0; i < p._grid->oSites(); i++) + { + SE = phiStencil.GetEntry(permute_type, point, i); + // prefetch next p? + + if (SE->_is_local) + { + temp = &p._odata[SE->_offset]; + + if (SE->_permute) + { + vobj temp2; + permute(temp2, *temp, permute_type); + force._odata[i] -= temp2; + } + else + { + force._odata[i] -= *temp; // slow part. Dominated by this read/write (BW) + } + } + else + { + force._odata[i] -= phiStencil.CommBuf()[SE->_offset]; + } + } + + } + } + force *= N / g; + + double t1 = usecond(); + double total_time = (t1 - t0) / 1e6; + double interm_time = (interm_t - t0) / 1e6; + double halo_time = (halo_t - interm_t) / 1e6; + double stencil_time = (t1 - halo_t) / 1e6; + std::cout << GridLogIntegrator << "Total time for force computation (s) : " << total_time << std::endl; + std::cout << GridLogIntegrator << "Intermediate time for force computation (s): " << interm_time << std::endl; + std::cout << GridLogIntegrator << "Halo time in force computation (s) : " << halo_time << std::endl; + std::cout << GridLogIntegrator << "Stencil time in force computation (s) : " << stencil_time << std::endl; + double flops = p._grid->gSites() * (14 * N * N * N + 18 * N * N + 2); + double flops_no_stencil = p._grid->gSites() * (14 * N * N * N + 6 * N * N + 2); + double Gflops = flops / (total_time * 1e9); + double Gflops_no_stencil = flops_no_stencil / (interm_time * 1e9); + std::cout << GridLogIntegrator << "Flops: " << flops << " - Gflop/s : " << Gflops << std::endl; + std::cout << GridLogIntegrator << "Flops NS: " << flops_no_stencil << " - Gflop/s NS: " << Gflops_no_stencil << std::endl; +} +}; + +} // namespace Grid + +#endif // SCALAR_INT_ACTION_H diff --git a/lib/qcd/hmc/GenericHMCrunner.h b/lib/qcd/hmc/GenericHMCrunner.h index 4f6c1af0..26fec3d5 100644 --- a/lib/qcd/hmc/GenericHMCrunner.h +++ b/lib/qcd/hmc/GenericHMCrunner.h @@ -211,7 +211,7 @@ typedef HMCWrapperTemplate ScalarAdjGenericHMCRunner; template -using ScalarNxNAdjGenericHMCRunner = HMCWrapperTemplate < ScalarNxNAdjImplR, MinimumNorm2, ScalarNxNMatrixFields >; +using ScalarNxNAdjGenericHMCRunner = HMCWrapperTemplate < ScalarNxNAdjImplR, ForceGradient, ScalarNxNMatrixFields >; } // namespace QCD } // namespace Grid diff --git a/lib/qcd/hmc/integrators/Integrator_algorithm.h b/lib/qcd/hmc/integrators/Integrator_algorithm.h index ecc125ef..13a37aeb 100644 --- a/lib/qcd/hmc/integrators/Integrator_algorithm.h +++ b/lib/qcd/hmc/integrators/Integrator_algorithm.h @@ -231,7 +231,7 @@ class ForceGradient : public Integrator } } template - static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + static void ColdConfiguration(GaugeField &out){ typedef typename GaugeField::vector_type vector_type; typedef iSUnMatrix vMatrixType; typedef Lattice LatticeMatrixType; @@ -757,6 +757,10 @@ template PokeIndex(out,Umu,mu); } } + template + static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){ + ColdConfiguration(out); + } template static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){ diff --git a/lib/serialisation/JSON_IO.cc b/lib/serialisation/JSON_IO.cc index 99a9cdd6..6a01aa84 100644 --- a/lib/serialisation/JSON_IO.cc +++ b/lib/serialisation/JSON_IO.cc @@ -25,7 +25,7 @@ See the full license in the file "LICENSE" in the top level distribution directory *************************************************************************************/ /* END LEGAL */ -#include +#include using namespace Grid; using namespace std; diff --git a/lib/serialisation/MacroMagic.h b/lib/serialisation/MacroMagic.h index 774c947f..5df2c780 100644 --- a/lib/serialisation/MacroMagic.h +++ b/lib/serialisation/MacroMagic.h @@ -125,7 +125,11 @@ static inline void write(Writer &WR,const std::string &s, const cname &obj){ }\ template \ static inline void read(Reader &RD,const std::string &s, cname &obj){ \ - push(RD,s);\ + if (!push(RD,s))\ + {\ + std::cout << Grid::GridLogWarning << "IO: Cannot open node '" << s << "'" << std::endl;\ + return;\ + };\ GRID_MACRO_EVAL(GRID_MACRO_MAP(GRID_MACRO_READ_MEMBER,__VA_ARGS__)) \ pop(RD);\ }\ diff --git a/lib/serialisation/XmlIO.cc b/lib/serialisation/XmlIO.cc index a132a2f0..8ac7422c 100644 --- a/lib/serialisation/XmlIO.cc +++ b/lib/serialisation/XmlIO.cc @@ -70,8 +70,8 @@ XmlReader::XmlReader(const char *xmlstring,string toplev) : fileName_("") pugi::xml_parse_result result; result = doc_.load_string(xmlstring); if ( !result ) { - cerr << "XML error description: " << result.description() << "\n"; - cerr << "XML error offset : " << result.offset << "\n"; + cerr << "XML error description (from char *): " << result.description() << "\nXML\n"<< xmlstring << "\n"; + cerr << "XML error offset (from char *) " << result.offset << "\nXML\n"<< xmlstring <<"\n"; abort(); } if ( toplev == std::string("") ) { @@ -87,8 +87,8 @@ XmlReader::XmlReader(const string &fileName,string toplev) : fileName_(fileName) pugi::xml_parse_result result; result = doc_.load_file(fileName_.c_str()); if ( !result ) { - cerr << "XML error description: " << result.description() << "\n"; - cerr << "XML error offset : " << result.offset << "\n"; + cerr << "XML error description: " << result.description() <<" "<< fileName_ <<"\n"; + cerr << "XML error offset : " << result.offset <<" "<< fileName_ <<"\n"; abort(); } if ( toplev == std::string("") ) { @@ -100,13 +100,16 @@ XmlReader::XmlReader(const string &fileName,string toplev) : fileName_(fileName) bool XmlReader::push(const string &s) { + if (node_.child(s.c_str())) + { + node_ = node_.child(s.c_str()); - if (node_.child(s.c_str()) == NULL ) + return true; + } + else + { return false; - - node_ = node_.child(s.c_str()); - return true; - + } } void XmlReader::pop(void) @@ -117,20 +120,30 @@ void XmlReader::pop(void) bool XmlReader::nextElement(const std::string &s) { if (node_.next_sibling(s.c_str())) - { - node_ = node_.next_sibling(s.c_str()); - - return true; - } + { + node_ = node_.next_sibling(s.c_str()); + + return true; + } else - { - return false; - } + { + return false; + } } template <> void XmlReader::readDefault(const string &s, string &output) { - output = node_.child(s.c_str()).first_child().value(); + if (node_.child(s.c_str())) + { + output = node_.child(s.c_str()).first_child().value(); + } + else + { + std::cout << GridLogWarning << "XML: cannot open node '" << s << "'"; + std::cout << std::endl; + + output = ""; + } } diff --git a/lib/serialisation/XmlIO.h b/lib/serialisation/XmlIO.h index fcdbf1e4..e37eb8d9 100644 --- a/lib/serialisation/XmlIO.h +++ b/lib/serialisation/XmlIO.h @@ -39,6 +39,7 @@ Author: paboyle #include #include +#include namespace Grid { @@ -119,7 +120,6 @@ namespace Grid std::string buf; readDefault(s, buf); - // std::cout << s << " " << buf << std::endl; fromString(output, buf); } @@ -132,7 +132,13 @@ namespace Grid std::string buf; unsigned int i = 0; - push(s); + if (!push(s)) + { + std::cout << GridLogWarning << "XML: cannot open node '" << s << "'"; + std::cout << std::endl; + + return; + } while (node_.child("elem")) { output.resize(i + 1); diff --git a/lib/stencil/Stencil.h b/lib/stencil/Stencil.h index 887d8a7c..69c010f4 100644 --- a/lib/stencil/Stencil.h +++ b/lib/stencil/Stencil.h @@ -105,7 +105,6 @@ template class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal fill in. public: - typedef CartesianCommunicator::CommsRequest_t CommsRequest_t; typedef typename cobj::vector_type vector_type; typedef typename cobj::scalar_type scalar_type; typedef typename cobj::scalar_object scalar_object; diff --git a/lib/threads/Threads.h b/lib/threads/Threads.h index d15f15ce..36daf2af 100644 --- a/lib/threads/Threads.h +++ b/lib/threads/Threads.h @@ -51,7 +51,9 @@ Author: paboyle #define PARALLEL_CRITICAL #endif +#define parallel_region PARALLEL_REGION #define parallel_for PARALLEL_FOR_LOOP for +#define parallel_for_internal PARALLEL_FOR_LOOP_INTERN for #define parallel_for_nest2 PARALLEL_NESTED_LOOP2 for namespace Grid { diff --git a/lib/util/Init.cc b/lib/util/Init.cc index 3232d32f..fb3d7a1e 100644 --- a/lib/util/Init.cc +++ b/lib/util/Init.cc @@ -204,11 +204,11 @@ std::string GridCmdVectorIntToString(const std::vector & vec){ // Reinit guard ///////////////////////////////////////////////////////// static int Grid_is_initialised = 0; - +static MemoryStats dbgMemStats; void Grid_init(int *argc,char ***argv) { - GridLogger::StopWatch.Start(); + GridLogger::GlobalStopWatch.Start(); std::string arg; @@ -220,11 +220,11 @@ void Grid_init(int *argc,char ***argv) arg= GridCmdOptionPayload(*argv,*argv+*argc,"--shm"); GridCmdOptionInt(arg,MB); uint64_t MB64 = MB; - CartesianCommunicator::MAX_MPI_SHM_BYTES = MB64*1024LL*1024LL; + GlobalSharedMemory::MAX_MPI_SHM_BYTES = MB64*1024LL*1024LL; } if( GridCmdOptionExists(*argv,*argv+*argc,"--shm-hugepages") ){ - CartesianCommunicator::Hugepages = 1; + GlobalSharedMemory::Hugepages = 1; } @@ -243,6 +243,17 @@ void Grid_init(int *argc,char ***argv) fname<& coor,int &index,const std::vector &dims){ + int nd=dims.size(); + int stride=1; + index=0; + for(int d=nd-1;d>=0;d--){ + index = index+stride*coor[d]; + stride=stride*dims[d]; + } + } + static inline void CoorFromIndexReversed (std::vector& coor,int index,const std::vector &dims){ + int nd= dims.size(); + coor.resize(nd); + for(int d=nd-1;d>=0;d--){ + coor[d] = index % dims[d]; + index = index / dims[d]; + } + } + + }; } diff --git a/scripts/copyright b/scripts/copyright index cc9ed6e5..a461b54c 100755 --- a/scripts/copyright +++ b/scripts/copyright @@ -11,8 +11,7 @@ Grid physics library, www.github.com/paboyle/Grid Source file: $1 -Copyright (C) 2015 -Copyright (C) 2016 +Copyright (C) 2015-2018 EOF @@ -60,4 +59,4 @@ shift done - +rm message tmp.fil diff --git a/scripts/filelist b/scripts/filelist index 8d4b8e1a..74f8e334 100755 --- a/scripts/filelist +++ b/scripts/filelist @@ -6,7 +6,7 @@ home=`pwd` cd $home/lib HFILES=`find . -type f -name '*.h' -not -name '*Hdf5*' -not -path '*/gamma-gen/*' -not -path '*/Old/*' -not -path '*/Eigen/*'` HFILES="$HFILES" -CCFILES=`find . -type f -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*Hdf5*'` +CCFILES=`find . -type f -name '*.cc' -not -path '*/gamma-gen/*' -not -name '*Communicator*.cc' -not -name '*SharedMemory*.cc' -not -name '*Hdf5*'` HPPFILES=`find . -type f -name '*.hpp'` echo HFILES=$HFILES $HPPFILES > Make.inc echo >> Make.inc diff --git a/tests/Makefile.am b/tests/Makefile.am index a8935268..7928a7fe 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = . core forces hmc solver debug smearing IO +SUBDIRS = . core forces hmc solver debug smearing IO lanczos if BUILD_CHROMA_REGRESSION SUBDIRS+= qdpxx diff --git a/tests/debug/Test_cheby.cc b/tests/debug/Test_cheby.cc index 40544c56..72d07885 100644 --- a/tests/debug/Test_cheby.cc +++ b/tests/debug/Test_cheby.cc @@ -37,8 +37,15 @@ RealD InverseApproximation(RealD x){ RealD SqrtApproximation(RealD x){ return std::sqrt(x); } +RealD Approximation32(RealD x){ + return std::pow(x,-1.0/32.0); +} +RealD Approximation2(RealD x){ + return std::pow(x,-1.0/2.0); +} + RealD StepFunction(RealD x){ - if ( x<0.1 ) return 1.0; + if ( x<10.0 ) return 1.0; else return 0.0; } @@ -56,7 +63,6 @@ int main (int argc, char ** argv) Chebyshev ChebyInv(lo,hi,2000,InverseApproximation); - { std::ofstream of("chebyinv"); ChebyInv.csv(of); @@ -78,7 +84,6 @@ int main (int argc, char ** argv) ChebyStep.JacksonSmooth(); - { std::ofstream of("chebystepjack"); ChebyStep.csv(of); @@ -100,5 +105,30 @@ int main (int argc, char ** argv) ChebyNE.csv(of); } + lo=0.0; + hi=4.0; + Chebyshev Cheby32(lo,hi,2000,Approximation32); + { + std::ofstream of("cheby32"); + Cheby32.csv(of); + } + Cheby32.JacksonSmooth(); + { + std::ofstream of("cheby32jack"); + Cheby32.csv(of); + } + + Chebyshev ChebySqrt(lo,hi,2000,Approximation2); + { + std::ofstream of("chebysqrt"); + ChebySqrt.csv(of); + } + ChebySqrt.JacksonSmooth(); + { + std::ofstream of("chebysqrtjack"); + ChebySqrt.csv(of); + } + + Grid_finalize(); } diff --git a/tests/hadrons/Test_hadrons.hpp b/tests/hadrons/Test_hadrons.hpp index 9bd3ee0a..0265f5a6 100644 --- a/tests/hadrons/Test_hadrons.hpp +++ b/tests/hadrons/Test_hadrons.hpp @@ -118,7 +118,7 @@ inline void makeWilsonAction(Application &application, std::string actionName, std::string &gaugeField, double mass, std::string boundary = "1 1 1 -1") { - if (!(Environment::getInstance().hasModule(actionName))) + if (!(VirtualMachine::getInstance().hasModule(actionName))) { MAction::Wilson::Par actionPar; actionPar.gauge = gaugeField; @@ -144,7 +144,7 @@ inline void makeDWFAction(Application &application, std::string actionName, std::string &gaugeField, double mass, double M5, unsigned int Ls, std::string boundary = "1 1 1 -1") { - if (!(Environment::getInstance().hasModule(actionName))) + if (!(VirtualMachine::getInstance().hasModule(actionName))) { MAction::DWF::Par actionPar; actionPar.gauge = gaugeField; @@ -173,7 +173,7 @@ inline void makeDWFAction(Application &application, std::string actionName, inline void makeRBPrecCGSolver(Application &application, std::string &solverName, std::string &actionName, double residual = 1e-8) { - if (!(Environment::getInstance().hasModule(solverName))) + if (!(VirtualMachine::getInstance().hasModule(solverName))) { MSolver::RBPrecCG::Par solverPar; solverPar.action = actionName; @@ -195,7 +195,7 @@ inline void makePointSource(Application &application, std::string srcName, std::string pos) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Point::Par pointPar; pointPar.position = pos; @@ -219,7 +219,7 @@ inline void makeSequentialSource(Application &application, std::string srcName, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::SeqGamma::Par seqPar; seqPar.q = qSrc; @@ -255,7 +255,7 @@ inline void makeConservedSequentialSource(Application &application, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::SeqConserved::Par seqPar; seqPar.q = qSrc; @@ -280,7 +280,7 @@ inline void makeConservedSequentialSource(Application &application, inline void makeNoiseSource(Application &application, std::string &srcName, unsigned int tA, unsigned int tB) { - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Z2::Par noisePar; noisePar.tA = tA; @@ -302,7 +302,7 @@ inline void makeWallSource(Application &application, std::string &srcName, unsigned int tW, std::string mom = ZERO_MOM) { // If the source already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(srcName))) + if (!(VirtualMachine::getInstance().hasModule(srcName))) { MSource::Wall::Par wallPar; wallPar.tW = tW; @@ -324,7 +324,7 @@ inline void makePointSink(Application &application, std::string &sinkFnct, std::string mom = ZERO_MOM) { // If the sink function already exists, don't make it again. - if (!(Environment::getInstance().hasModule(sinkFnct))) + if (!(VirtualMachine::getInstance().hasModule(sinkFnct))) { MSink::Point::Par pointPar; pointPar.mom = mom; @@ -345,7 +345,7 @@ inline void sinkSmear(Application &application, std::string &sinkFnct, std::string &propName, std::string &smearedProp) { // If the propagator has already been smeared, don't smear it again. - if (!(Environment::getInstance().hasModule(smearedProp))) + if (!(VirtualMachine::getInstance().hasModule(smearedProp))) { MSink::Smear::Par smearPar; smearPar.q = propName; @@ -367,7 +367,7 @@ inline void makePropagator(Application &application, std::string &propName, std::string &srcName, std::string &solver) { // If the propagator already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(propName))) + if (!(VirtualMachine::getInstance().hasModule(propName))) { MFermion::GaugeProp::Par quarkPar; quarkPar.source = srcName; @@ -390,7 +390,7 @@ inline void makeLoop(Application &application, std::string &propName, std::string &srcName, std::string &resName) { // If the loop propagator already exists, don't make the module again. - if (!(Environment::getInstance().hasModule(propName))) + if (!(VirtualMachine::getInstance().hasModule(propName))) { MLoop::NoiseLoop::Par loopPar; loopPar.q = resName; @@ -421,7 +421,7 @@ inline void mesonContraction(Application &application, std::string &sink, std::string gammas = "") { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::Meson::Par mesPar; mesPar.output = output; @@ -453,7 +453,7 @@ inline void gamma3ptContraction(Application &application, unsigned int npt, Gamma::Algebra gamma = Gamma::Algebra::Identity) { std::string modName = std::to_string(npt) + "pt_" + label; - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::Gamma3pt::Par gamma3ptPar; gamma3ptPar.output = std::to_string(npt) + "pt/" + label; @@ -487,7 +487,7 @@ inline void weakContraction##top(Application &application, unsigned int npt,\ std::string &label, unsigned int tSnk = 0)\ {\ std::string modName = std::to_string(npt) + "pt_" + label;\ - if (!(Environment::getInstance().hasModule(modName)))\ + if (!(VirtualMachine::getInstance().hasModule(modName)))\ {\ MContraction::WeakHamiltonian##top::Par weakPar;\ weakPar.output = std::to_string(npt) + "pt/" + label;\ @@ -521,7 +521,7 @@ inline void disc0Contraction(Application &application, std::string &label) { std::string modName = "4pt_" + label; - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::WeakNeutral4ptDisc::Par disc0Par; disc0Par.output = "4pt/" + label; @@ -547,7 +547,7 @@ inline void discLoopContraction(Application &application, std::string &q_loop, std::string &modName, Gamma::Algebra gamma = Gamma::Algebra::Identity) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::DiscLoop::Par discPar; discPar.output = "disc/" + modName; @@ -574,7 +574,7 @@ inline void makeWITest(Application &application, std::string &modName, std::string &propName, std::string &actionName, double mass, unsigned int Ls = 1, bool test_axial = false) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MContraction::WardIdentity::Par wiPar; if (Ls > 1) @@ -613,7 +613,7 @@ inline void makeSeqCurrComparison(Application &application, std::string &modName std::string &actionName, std::string &origin, unsigned int t_J, unsigned int mu, Current curr) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MUtilities::TestSeqConserved::Par seqPar; seqPar.q = propName; @@ -646,7 +646,7 @@ inline void makeSeqGamComparison(Application &application, std::string &modName, std::string &origin, Gamma::Algebra gamma, unsigned int t_g) { - if (!(Environment::getInstance().hasModule(modName))) + if (!(VirtualMachine::getInstance().hasModule(modName))) { MUtilities::TestSeqGamma::Par seqPar; seqPar.q = propName; diff --git a/tests/hmc/Test_hmc_ScalarActionNxN.cc b/tests/hmc/Test_hmc_ScalarActionNxN.cc index a4dad1a3..9e40beac 100644 --- a/tests/hmc/Test_hmc_ScalarActionNxN.cc +++ b/tests/hmc/Test_hmc_ScalarActionNxN.cc @@ -31,7 +31,8 @@ class ScalarActionParameters : Serializable { public: GRID_SERIALIZABLE_CLASS_MEMBERS(ScalarActionParameters, double, mass_squared, - double, lambda); + double, lambda, + double, g); template ScalarActionParameters(Reader& Reader){ @@ -140,7 +141,7 @@ int main(int argc, char **argv) { // Scalar action in adjoint representation ScalarActionParameters SPar(Reader); - ScalarAction Saction(SPar.mass_squared, SPar.lambda); + ScalarAction Saction(SPar.mass_squared, SPar.lambda, SPar.g); // Collect actions ActionLevel> Level1(1); diff --git a/tests/hmc/Test_remez.cc b/tests/hmc/Test_remez.cc index bc851173..5f4b0a25 100644 --- a/tests/hmc/Test_remez.cc +++ b/tests/hmc/Test_remez.cc @@ -38,11 +38,11 @@ int main (int argc, char ** argv) std::cout< +class BlockProjector { +public: + + BasisFieldVector& _evec; + BlockedGrid& _bgrid; + + BlockProjector(BasisFieldVector& evec, BlockedGrid& bgrid) : _evec(evec), _bgrid(bgrid) { + } + + void createOrthonormalBasis(RealD thres = 0.0) { + + GridStopWatch sw; + sw.Start(); + + int cnt = 0; + +#pragma omp parallel shared(cnt) + { + int lcnt = 0; + +#pragma omp for + for (int b=0;b<_bgrid._o_blocks;b++) { + + for (int i=0;i<_evec._Nm;i++) { + + auto nrm0 = _bgrid.block_sp(b,_evec._v[i],_evec._v[i]); + + // |i> -= |j> + for (int j=0;j + void coarseToFine(const CoarseField& in, Field& out) { + + out = zero; + out.checkerboard = _evec._v[0].checkerboard; + + int Nbasis = sizeof(in._odata[0]._internal._internal) / sizeof(in._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + _bgrid.block_caxpy(b,out,in._odata[b]._internal._internal[j],_evec._v[j],out); + } + } + + } + + template + void fineToCoarse(const Field& in, CoarseField& out) { + + out = zero; + + int Nbasis = sizeof(out._odata[0]._internal._internal) / sizeof(out._odata[0]._internal._internal[0]); + assert(Nbasis == _evec._Nm); + + + Field tmp(_bgrid._grid); + tmp = in; + +#pragma omp parallel for + for (int b=0;b<_bgrid._o_blocks;b++) { + for (int j=0;j<_evec._Nm;j++) { + // |rhs> -= |j> + auto c = _bgrid.block_sp(b,_evec._v[j],tmp); + _bgrid.block_caxpy(b,tmp,-c,_evec._v[j],tmp); // may make this more numerically stable + out._odata[b]._internal._internal[j] = c; + } + } + + } + + template + void deflateFine(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + result = zero; + for (int i=0;i + void deflateCoarse(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + CoarseField src_coarse(_coef._v[0]._grid); + CoarseField result_coarse = src_coarse; + result_coarse = zero; + fineToCoarse(src_orig,src_coarse); + for (int i=0;i + void deflate(BasisFieldVector& _coef,const std::vector& eval,int N,const Field& src_orig,Field& result) { + // Deflation on coarse Grid is much faster, so use it by default. Deflation on fine Grid is kept for legacy reasons for now. + deflateCoarse(_coef,eval,N,src_orig,result); + } + +}; +} diff --git a/tests/lanczos/BlockedGrid.h b/tests/lanczos/BlockedGrid.h new file mode 100644 index 00000000..821272de --- /dev/null +++ b/tests/lanczos/BlockedGrid.h @@ -0,0 +1,401 @@ +namespace Grid { + +template +class BlockedGrid { +public: + GridBase* _grid; + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + + std::vector _bs; // block size + std::vector _nb; // number of blocks + std::vector _l; // local dimensions irrespective of cb + std::vector _l_cb; // local dimensions of checkerboarded vector + std::vector _l_cb_o; // local dimensions of inner checkerboarded vector + std::vector _bs_cb; // block size in checkerboarded vector + std::vector _nb_o; // number of blocks of simd o-sites + + int _nd, _blocks, _cf_size, _cf_block_size, _cf_o_block_size, _o_blocks, _block_sites; + + BlockedGrid(GridBase* grid, const std::vector& block_size) : + _grid(grid), _bs(block_size), _nd((int)_bs.size()), + _nb(block_size), _l(block_size), _l_cb(block_size), _nb_o(block_size), + _l_cb_o(block_size), _bs_cb(block_size) { + + _blocks = 1; + _o_blocks = 1; + _l = grid->FullDimensions(); + _l_cb = grid->LocalDimensions(); + _l_cb_o = grid->_rdimensions; + + _cf_size = 1; + _block_sites = 1; + for (int i=0;i<_nd;i++) { + _l[i] /= grid->_processors[i]; + + assert(!(_l[i] % _bs[i])); // lattice must accommodate choice of blocksize + + int r = _l[i] / _l_cb[i]; + assert(!(_bs[i] % r)); // checkerboarding must accommodate choice of blocksize + _bs_cb[i] = _bs[i] / r; + _block_sites *= _bs_cb[i]; + _nb[i] = _l[i] / _bs[i]; + _nb_o[i] = _nb[i] / _grid->_simd_layout[i]; + if (_nb[i] % _grid->_simd_layout[i]) { // simd must accommodate choice of blocksize + std::cout << GridLogMessage << "Problem: _nb[" << i << "] = " << _nb[i] << " _grid->_simd_layout[" << i << "] = " << _grid->_simd_layout[i] << std::endl; + assert(0); + } + _blocks *= _nb[i]; + _o_blocks *= _nb_o[i]; + _cf_size *= _l[i]; + } + + _cf_size *= 12 / 2; + _cf_block_size = _cf_size / _blocks; + _cf_o_block_size = _cf_size / _o_blocks; + + std::cout << GridLogMessage << "BlockedGrid:" << std::endl; + std::cout << GridLogMessage << " _l = " << _l << std::endl; + std::cout << GridLogMessage << " _l_cb = " << _l_cb << std::endl; + std::cout << GridLogMessage << " _l_cb_o = " << _l_cb_o << std::endl; + std::cout << GridLogMessage << " _bs = " << _bs << std::endl; + std::cout << GridLogMessage << " _bs_cb = " << _bs_cb << std::endl; + + std::cout << GridLogMessage << " _nb = " << _nb << std::endl; + std::cout << GridLogMessage << " _nb_o = " << _nb_o << std::endl; + std::cout << GridLogMessage << " _blocks = " << _blocks << std::endl; + std::cout << GridLogMessage << " _o_blocks = " << _o_blocks << std::endl; + std::cout << GridLogMessage << " sizeof(vCoeff_t) = " << sizeof(vCoeff_t) << std::endl; + std::cout << GridLogMessage << " _cf_size = " << _cf_size << std::endl; + std::cout << GridLogMessage << " _cf_block_size = " << _cf_block_size << std::endl; + std::cout << GridLogMessage << " _block_sites = " << _block_sites << std::endl; + std::cout << GridLogMessage << " _grid->oSites() = " << _grid->oSites() << std::endl; + + // _grid->Barrier(); + //abort(); + } + + void block_to_coor(int b, std::vector& x0) { + + std::vector bcoor; + bcoor.resize(_nd); + x0.resize(_nd); + assert(b < _o_blocks); + Lexicographic::CoorFromIndex(bcoor,b,_nb_o); + int i; + + for (i=0;i<_nd;i++) { + x0[i] = bcoor[i]*_bs_cb[i]; + } + + //std::cout << GridLogMessage << "Map block b -> " << x0 << std::endl; + + } + + void block_site_to_o_coor(const std::vector& x0, std::vector& coor, int i) { + Lexicographic::CoorFromIndex(coor,i,_bs_cb); + for (int j=0;j<_nd;j++) + coor[j] += x0[j]; + } + + int block_site_to_o_site(const std::vector& x0, int i) { + std::vector coor; coor.resize(_nd); + block_site_to_o_coor(x0,coor,i); + Lexicographic::IndexFromCoor(coor,i,_l_cb_o); + return i; + } + + vCoeff_t block_sp(int b, const Field& x, const Field& y) { + + std::vector x0; + block_to_coor(b,x0); + + vCoeff_t ret = 0.0; + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + ret += TensorRemove(innerProduct(x._odata[ss],y._odata[ss])); + } + + return ret; + + } + + vCoeff_t block_sp(int b, const Field& x, const std::vector< ComplexD >& y) { + + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + std::vector< ComplexD > ret(nsimd); + for (int i=0;i + void vcaxpy(iScalar& r,const vCoeff_t& a,const iScalar& x,const iScalar& y) { + vcaxpy(r._internal,a,x._internal,y._internal); + } + + template + void vcaxpy(iVector& r,const vCoeff_t& a,const iVector& x,const iVector& y) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcaxpy(ret._odata[ss],a,x._odata[ss],y._odata[ss]); + } + + } + + void block_caxpy(int b, std::vector< ComplexD >& ret, const vCoeff_t& a, const Field& x, const std::vector< ComplexD >& y) { + std::vector x0; + block_to_coor(b,x0); + + constexpr int nsimd = sizeof(vCoeff_t) / sizeof(Coeff_t); + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + int n = lsize / nsimd; + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l& x) { + std::vector x0; + block_to_coor(b,x0); + + int lsize = _cf_o_block_size / _block_sites; + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + + for (int l=0;l + void vcscale(iScalar& r,const vCoeff_t& a,const iScalar& x) { + vcscale(r._internal,a,x._internal); + } + + template + void vcscale(iVector& r,const vCoeff_t& a,const iVector& x) { + for (int i=0;i x0; + block_to_coor(b,x0); + + for (int i=0;i<_block_sites;i++) { // only odd sites + int ss = block_site_to_o_site(x0,i); + vcscale(ret._odata[ss],a,ret._odata[ss]); + } + } + + void getCanonicalBlockOffset(int cb, std::vector& x0) { + const int ndim = 5; + assert(_nb.size() == ndim); + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + x0.resize(ndim); + + assert(cb >= 0); + assert(cb < _nbc[0]*_nbc[1]*_nbc[2]*_nbc[3]*_nbc[4]); + + Lexicographic::CoorFromIndex(x0,cb,_nbc); + int i; + + for (i=0;i& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + ld = Coeff_t(buf[2*ti+0], buf[2*ti+1]); + } + } + } + } + + void peekBlockOfVectorCanonical(int cb,const Field& v,std::vector& buf) { + std::vector _bsc = { _bs[1], _bs[2], _bs[3], _bs[4], _bs[0] }; + std::vector ldim = v._grid->LocalDimensions(); + std::vector cldim = { ldim[1], ldim[2], ldim[3], ldim[4], ldim[0] }; + const int _nbsc = _bs_cb[0]*_bs_cb[1]*_bs_cb[2]*_bs_cb[3]*_bs_cb[4]; + // take canonical block cb of v and put it in canonical ordering in buf + std::vector cx0; + getCanonicalBlockOffset(cb,cx0); + + buf.resize(_cf_block_size * 2); + +#pragma omp parallel + { + std::vector co0,cl0; + co0=cx0; cl0=cx0; + +#pragma omp for + for (int i=0;i<_nbsc;i++) { + Lexicographic::CoorFromIndex(co0,2*i,_bsc); // 2* for eo + for (int j=0;j<(int)_bsc.size();j++) + cl0[j] = cx0[j] + co0[j]; + + std::vector l0 = { cl0[4], cl0[0], cl0[1], cl0[2], cl0[3] }; + int oi = v._grid->oIndex(l0); + int ii = v._grid->iIndex(l0); + int lti = i; + + //if (cb < 2 && i<2) + // std::cout << GridLogMessage << "Map: " << cb << ", " << i << " To: " << cl0 << ", " << cx0 << ", " << oi << ", " << ii << std::endl; + + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) { + Coeff_t& ld = ((Coeff_t*)&v._odata[oi]._internal._internal[s]._internal[c])[ii]; + int ti = 12*lti + 3*s + c; + buf[2*ti+0] = ld.real(); + buf[2*ti+1] = ld.imag(); + } + } + } + } + + int globalToLocalCanonicalBlock(int slot,const std::vector& src_nodes,int nb) { + // processor coordinate + int _nd = (int)src_nodes.size(); + std::vector _src_nodes = src_nodes; + std::vector pco(_nd); + Lexicographic::CoorFromIndex(pco,slot,_src_nodes); + std::vector cpco = { pco[1], pco[2], pco[3], pco[4], pco[0] }; + + // get local block + std::vector _nbc = { _nb[1], _nb[2], _nb[3], _nb[4], _nb[0] }; + assert(_nd == 5); + std::vector c_src_local_blocks(_nd); + for (int i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % (src_nodes[i] * _bs[i]) == 0); + c_src_local_blocks[(i+4) % 5] = _grid->_fdimensions[i] / src_nodes[i] / _bs[i]; + } + std::vector cbcoor(_nd); // coordinate of block in slot in canonical form + Lexicographic::CoorFromIndex(cbcoor,nb,c_src_local_blocks); + + // cpco, cbcoor + std::vector clbcoor(_nd); + for (int i=0;i<_nd;i++) { + int cgcoor = cpco[i] * c_src_local_blocks[i] + cbcoor[i]; // global block coordinate + int pcoor = cgcoor / _nbc[i]; // processor coordinate in my Grid + int tpcoor = _grid->_processor_coor[(i+1)%5]; + if (pcoor != tpcoor) + return -1; + clbcoor[i] = cgcoor - tpcoor * _nbc[i]; // canonical local block coordinate for canonical dimension i + } + + int lnb; + Lexicographic::IndexFromCoor(clbcoor,lnb,_nbc); + //std::cout << "Mapped slot = " << slot << " nb = " << nb << " to " << lnb << std::endl; + return lnb; + } + + + }; + +} diff --git a/tests/lanczos/FieldBasisVector.h b/tests/lanczos/FieldBasisVector.h new file mode 100644 index 00000000..9a21aa46 --- /dev/null +++ b/tests/lanczos/FieldBasisVector.h @@ -0,0 +1,81 @@ +namespace Grid { + +template +class BasisFieldVector { + public: + int _Nm; + + typedef typename Field::scalar_type Coeff_t; + typedef typename Field::vector_type vCoeff_t; + typedef typename Field::vector_object vobj; + typedef typename vobj::scalar_object sobj; + + std::vector _v; // _Nfull vectors + + void report(int n,GridBase* value) { + + std::cout << GridLogMessage << "BasisFieldVector allocated:\n"; + std::cout << GridLogMessage << " Delta N = " << n << "\n"; + std::cout << GridLogMessage << " Size of full vectors (size) = " << + ((double)n*sizeof(vobj)*value->oSites() / 1024./1024./1024.) << " GB\n"; + std::cout << GridLogMessage << " Size = " << _v.size() << " Capacity = " << _v.capacity() << std::endl; + + value->Barrier(); + +#ifdef __linux + if (value->IsBoss()) { + system("cat /proc/meminfo"); + } +#endif + + value->Barrier(); + + } + + BasisFieldVector(int Nm,GridBase* value) : _Nm(Nm), _v(Nm,value) { + report(Nm,value); + } + + ~BasisFieldVector() { + } + + Field& operator[](int i) { + return _v[i]; + } + + void orthogonalize(Field& w, int k) { + basisOrthogonalize(_v,w,k); + } + + void rotate(Eigen::MatrixXd& Qt,int j0, int j1, int k0,int k1,int Nm) { + basisRotate(_v,Qt,j0,j1,k0,k1,Nm); + } + + size_t size() const { + return _Nm; + } + + void resize(int n) { + if (n > _Nm) + _v.reserve(n); + + _v.resize(n,_v[0]._grid); + + if (n < _Nm) + _v.shrink_to_fit(); + + report(n - _Nm,_v[0]._grid); + + _Nm = n; + } + + void sortInPlace(std::vector& sort_vals, bool reverse) { + basisSortInPlace(_v,sort_vals,reverse); + } + + void deflate(const std::vector& eval,const Field& src_orig,Field& result) { + basisDeflate(_v,eval,src_orig,result); + } + + }; +} diff --git a/tests/lanczos/FieldVectorIO.h b/tests/lanczos/FieldVectorIO.h new file mode 100644 index 00000000..344e48fe --- /dev/null +++ b/tests/lanczos/FieldVectorIO.h @@ -0,0 +1,1085 @@ +namespace Grid { + + namespace FieldVectorIO { + + // zlib's crc32 gets 0.4 GB/s on KNL single thread + // below gets 4.8 GB/s + static uint32_t crc32_threaded(unsigned char* data, int64_t len, uint32_t previousCrc32 = 0) { + + // crc32 of zlib was incorrect for very large sizes, so do it block-wise + uint32_t crc = previousCrc32; + off_t blk = 0; + off_t step = 1024*1024*1024; + while (len > step) { + crc = crc32(crc,&data[blk],step); + blk += step; + len -= step; + } + + crc = crc32(crc,&data[blk],len); + return crc; + + } + + static int get_bfm_index( int* pos, int co, int* s ) { + + int ls = s[0]; + int NtHalf = s[4] / 2; + int simd_coor = pos[4] / NtHalf; + int regu_coor = (pos[1] + s[1] * (pos[2] + s[2] * ( pos[3] + s[3] * (pos[4] % NtHalf) ) )) / 2; + + return regu_coor * ls * 48 + pos[0] * 48 + co * 4 + simd_coor * 2; + } + + static void get_read_geometry(const GridBase* _grid,const std::vector& cnodes, + std::map >& slots, + std::vector& slot_lvol, + std::vector& lvol, + int64_t& slot_lsites,int& ntotal) { + + int _nd = (int)cnodes.size(); + std::vector nodes = cnodes; + + slots.clear(); + slot_lvol.clear(); + lvol.clear(); + + int i; + ntotal = 1; + int64_t lsites = 1; + slot_lsites = 1; + for (i=0;i<_nd;i++) { + assert(_grid->_fdimensions[i] % nodes[i] == 0); + slot_lvol.push_back(_grid->_fdimensions[i] / nodes[i]); + lvol.push_back(_grid->_fdimensions[i] / _grid->_processors[i]); + lsites *= lvol.back(); + slot_lsites *= slot_lvol.back(); + ntotal *= nodes[i]; + } + + std::vector lcoor, gcoor, scoor; + lcoor.resize(_nd); gcoor.resize(_nd); scoor.resize(_nd); + + // create mapping of indices to slots + for (int lidx = 0; lidx < lsites; lidx++) { + Lexicographic::CoorFromIndex(lcoor,lidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + } + int slot; + Lexicographic::IndexFromCoor(scoor,slot,nodes); + auto sl = slots.find(slot); + if (sl == slots.end()) + slots[slot] = std::vector(); + slots[slot].push_back(lidx); + } + } + + static void canonical_block_to_coarse_coordinates(GridBase* _coarsegrid,int nb,int& ii,int& oi) { + // canonical nb needs to be mapped in a coordinate on my coarsegrid (ii,io) + std::vector _l = _coarsegrid->LocalDimensions(); + std::vector _cl = { _l[1], _l[2], _l[3], _l[4], _l[0] }; + std::vector _cc(_l.size()); + Lexicographic::CoorFromIndex(_cc,nb,_cl); + std::vector _c = { _cc[4], _cc[0], _cc[1], _cc[2], _cc[3] }; + ii = _coarsegrid->iIndex(_c); + oi = _coarsegrid->oIndex(_c); + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir, const std::vector& cnodes) { + + GridBase* _grid = ret._v[0]._grid; + + std::map > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + get_read_geometry(_grid,cnodes, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // this is slow code to read the argonne file format for debugging purposes + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Read " << dir << " nodes = " << cnodes << std::endl; + std::cout << GridLogMessage << " lvol = " << lvol << std::endl; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + // now load one slot at a time and fill the vector + for (auto sl=slots.begin();sl!=slots.end();sl++) { + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + sprintf(buf,"%s/checksums.txt",dir); printf("read_argonne: Reading from %s\n",buf); + FILE* f = fopen(buf,"rt"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + for (int l=0;l<3+slot;l++) + fgets(buf,sizeof(buf),f); + uint32_t crc_exp = strtol(buf, NULL, 16); + fclose(f); + + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + + fseeko(f,0,SEEK_END); + off_t total_size = ftello(f); + fseeko(f,0,SEEK_SET); + + int64_t size = slot_lsites / 2 * 24*4; + rdata.resize(size); + + assert(total_size % size == 0); + + int _Nfull = total_size / size; + ret._v.resize(_Nfull,ret._v[0]); + ret._Nm = _Nfull; + + uint32_t crc = 0x0; + GridStopWatch gsw,gsw2; + for (int nev = 0;nev < _Nfull;nev++) { + + gsw.Start(); + assert(fread(&rdata[0],size,1,f) == 1); + gsw.Stop(); + + gsw2.Start(); + crc = crc32_threaded((unsigned char*)&rdata[0],size,crc); + gsw2.Stop(); + + for (int i=0;i lcoor, gcoor, scoor, slcoor; + lcoor.resize(_nd); gcoor.resize(_nd); + slcoor.resize(_nd); scoor.resize(_nd); + +#pragma omp for + for (int64_t lidx = 0; lidx < idx.size(); lidx++) { + int llidx = idx[lidx]; + Lexicographic::CoorFromIndex(lcoor,llidx,lvol); + for (int i=0;i<_nd;i++) { + gcoor[i] = lcoor[i] + _grid->_processor_coor[i]*lvol[i]; + scoor[i] = gcoor[i] / slot_lvol[i]; + slcoor[i] = gcoor[i] - scoor[i]*slot_lvol[i]; + } + + if ((lcoor[1]+lcoor[2]+lcoor[3]+lcoor[4]) % 2 == 1) { + // poke + iScalar, 4> > sc; + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + sc()(s)(c) = *(std::complex*)&rdata[get_bfm_index(&slcoor[0],c+s*3, &slot_lvol[0] )]; + + pokeLocalSite(sc,ret._v[nev],lcoor); + } + + } + } + } + + fclose(f); + std::cout << GridLogMessage << "Loading slot " << slot << " with " << idx.size() << " points and " + << _Nfull << " vectors in " + << gsw.Elapsed() << " at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw.useconds()*1000.*1000. ) + << " GB/s " << " crc32 = " << std::hex << crc << " crc32_expected = " << crc_exp << std::dec + << " computed at " + << ( (double)size * _Nfull / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + assert(crc == crc_exp); + } + + _grid->Barrier(); + std::cout << GridLogMessage << "Loading complete" << std::endl; + + return true; + } + + template + static bool read_argonne(BasisFieldVector& ret,const char* dir) { + + + GridBase* _grid = ret._v[0]._grid; + + char buf[4096]; + sprintf(buf,"%s/nodes.txt",dir); + FILE* f = fopen(buf,"rt"); + if (!f) { + if (_grid->IsBoss()) { + fprintf(stderr,"Attempting to load eigenvectors without secifying node layout failed due to absence of nodes.txt\n"); + fflush(stderr); + } + return false; + } + + + std::vector nodes((int)_grid->_processors.size()); + for (int i =0;i<(int)_grid->_processors.size();i++) + assert(fscanf(f,"%d\n",&nodes[i])==1); + fclose(f); + + return read_argonne(ret,dir,nodes); + } + + static void flush_bytes(FILE* f, std::vector& fbuf) { + if (fbuf.size()) { + + if (fwrite(&fbuf[0],fbuf.size(),1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)fbuf.size() / 1024./1024./1024.); + exit(2); + } + + fbuf.resize(0); + + } + } + + static void write_bytes(void* buf, int64_t s, FILE* f, std::vector& fbuf, uint32_t& crc) { + static double data_counter = 0.0; + static GridStopWatch gsw_crc, gsw_flush1,gsw_flush2,gsw_write,gsw_memcpy; + if (s == 0) + return; + + // checksum + gsw_crc.Start(); + crc = crc32_threaded((unsigned char*)buf,s,crc); + gsw_crc.Stop(); + + if (s > fbuf.capacity()) { + // cannot buffer this, so first flush current buffer contents and then write this directly to file + gsw_flush1.Start(); + flush_bytes(f,fbuf); + gsw_flush1.Stop(); + + gsw_write.Start(); + if (fwrite(buf,s,1,f) != 1) { + fprintf(stderr,"Write failed of %g GB!\n",(double)s / 1024./1024./1024.); + exit(2); + } + gsw_write.Stop(); + + } + + // no room left in buffer, flush to disk + if (fbuf.size() + s > fbuf.capacity()) { + gsw_flush2.Start(); + flush_bytes(f,fbuf); + gsw_flush2.Stop(); + } + + // then fill buffer again + { + gsw_memcpy.Start(); + size_t t = fbuf.size(); + fbuf.resize(t + s); + memcpy(&fbuf[t],buf,s); + gsw_memcpy.Stop(); + } + + data_counter += (double)s; + if (data_counter > 1024.*1024.*20.) { + std::cout << GridLogMessage << "Writing " << ((double)data_counter / 1024./1024./1024.) << " GB at" + " crc = " << gsw_crc.Elapsed() << " flush1 = " << gsw_flush1.Elapsed() << " flush2 = " << gsw_flush2.Elapsed() << + " write = " << gsw_write.Elapsed() << " memcpy = " << gsw_memcpy.Elapsed() << std::endl; + data_counter = 0.0; + gsw_crc.Reset(); + gsw_write.Reset(); + gsw_memcpy.Reset(); + gsw_flush1.Reset(); + gsw_flush2.Reset(); + } + } + + static void write_floats(FILE* f, std::vector& fbuf, uint32_t& crc, float* buf, int64_t n) { + write_bytes(buf,n*sizeof(float),f,fbuf,crc); + } + + static void read_floats(char* & ptr, float* out, int64_t n) { + float* in = (float*)ptr; + ptr += 4*n; + + for (int64_t i=0;i 0, [0,6] -> 1; reconstruct 0 -> -3, 1-> 3 + // + // N=2 + // [-6,-2] -> 0, [-2,2] -> 1, [2,6] -> 2; reconstruct 0 -> -4, 1->0, 2->4 + int ret = (int) ( (float)(N+1) * ( (in - min) / (max - min) ) ); + if (ret == N+1) { + ret = N; + } + return ret; + } + + static float fp_unmap(int val, float min, float max, int N) { + return min + (float)(val + 0.5) * (max - min) / (float)( N + 1 ); + } + +#define SHRT_UMAX 65535 +#define FP16_BASE 1.4142135623730950488 +#define FP16_COEF_EXP_SHARE_FLOATS 10 + static float unmap_fp16_exp(unsigned short e) { + float de = (float)((int)e - SHRT_UMAX / 2); + return ::pow( FP16_BASE, de ); + } + + // can assume that v >=0 and need to guarantee that unmap_fp16_exp(map_fp16_exp(v)) >= v + static unsigned short map_fp16_exp(float v) { + // float has exponents 10^{-44.85} .. 10^{38.53} + int exp = (int)ceil(::log(v) / ::log(FP16_BASE)) + SHRT_UMAX / 2; + if (exp < 0 || exp > SHRT_UMAX) { + fprintf(stderr,"Error in map_fp16_exp(%g,%d)\n",v,exp); + exit(3); + } + + return (unsigned short)exp; + } + + template + static void read_floats_fp16(char* & ptr, OPT* out, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* in = (unsigned short*)ptr; + ptr += 2*(n+nsites); + + // do for each site + for (int64_t site = 0;site + static void write_floats_fp16(FILE* f, std::vector& fbuf, uint32_t& crc, OPT* in, int64_t n, int nsc) { + + int64_t nsites = n / nsc; + if (n % nsc) { + fprintf(stderr,"Invalid size in write_floats_fp16\n"); + exit(4); + } + + unsigned short* buf = (unsigned short*)malloc( sizeof(short) * (n + nsites) ); + if (!buf) { + fprintf(stderr,"Out of mem\n"); + exit(1); + } + + // do for each site +#pragma omp parallel for + for (int64_t site = 0;site max) + max = fabs(ev[i]); + } + + unsigned short exp = map_fp16_exp(max); + max = unmap_fp16_exp(exp); + min = -max; + + *bptr++ = exp; + + for (int i=0;i SHRT_UMAX) { + fprintf(stderr,"Assert failed: val = %d (%d), ev[i] = %.15g, max = %.15g, exp = %d\n",val,SHRT_UMAX,ev[i],max,(int)exp); + exit(48); + } + *bptr++ = (unsigned short)val; + } + + } + + write_bytes(buf,sizeof(short)*(n + nsites),f,fbuf,crc); + + free(buf); + } + + template + static bool read_compressed_vectors(const char* dir,BlockProjector& pr,BasisFieldVector& coef, int ngroups = 1) { + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + + // for error messages + char hostname[1024]; + gethostname(hostname, 1024); + + std::cout << GridLogMessage << "Ready on host " << hostname << " with " << ngroups << " reader groups" << std::endl; + + // first read metadata + char buf[4096]; + sprintf(buf,"%s/metadata.txt",dir); + + std::vector s,b,nb,nn,crc32; + s.resize(5); b.resize(5); nb.resize(5); nn.resize(5); + uint32_t neig, nkeep, nkeep_single, blocks, _FP16_COEF_EXP_SHARE_FLOATS; + uint32_t nprocessors = 1; + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(buf,"rb"); + status=f ? 1 : 0; + } + _grid->GlobalSum(status); + std::cout << GridLogMessage << "Read params status " << status << std::endl; + + if (!status) { + return false; + } + +#define _IRL_READ_INT(buf,p) if (f) { assert(fscanf(f,buf,p)==1); } else { *(p) = 0; } _grid->GlobalSum(*(p)); + + for (int i=0;i<5;i++) { + sprintf(buf,"s[%d] = %%d\n",i); + _IRL_READ_INT(buf,&s[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"b[%d] = %%d\n",i); + _IRL_READ_INT(buf,&b[(i+1)%5]); + } + for (int i=0;i<5;i++) { + sprintf(buf,"nb[%d] = %%d\n",i); + _IRL_READ_INT(buf,&nb[(i+1)%5]); + } + _IRL_READ_INT("neig = %d\n",&neig); + _IRL_READ_INT("nkeep = %d\n",&nkeep); + _IRL_READ_INT("nkeep_single = %d\n",&nkeep_single); + _IRL_READ_INT("blocks = %d\n",&blocks); + _IRL_READ_INT("FP16_COEF_EXP_SHARE_FLOATS = %d\n",&_FP16_COEF_EXP_SHARE_FLOATS); + + for (int i=0;i<5;i++) { + assert(_grid->FullDimensions()[i] % s[i] == 0); + nn[i] = _grid->FullDimensions()[i] / s[i]; + nprocessors *= nn[i]; + } + + std::cout << GridLogMessage << "Reading data that was generated on node-layout " << nn << std::endl; + + crc32.resize(nprocessors); + for (int i =0;i > slots; + std::vector slot_lvol, lvol; + int64_t slot_lsites; + int ntotal; + std::vector _nn(nn.begin(),nn.end()); + get_read_geometry(_grid,_nn, + slots,slot_lvol,lvol,slot_lsites, + ntotal); + int _nd = (int)lvol.size(); + + // types + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + // slot layout + int nperdir = ntotal / 32; + if (nperdir < 1) + nperdir=1; + + // add read groups + for (int ngroup=0;ngroupThisRank() % ngroups == ngroup; + + std::cout << GridLogMessage << "Reading in group " << ngroup << " / " << ngroups << std::endl; + + // load all necessary slots and store them appropriately + for (auto sl=slots.begin();sl!=slots.end();sl++) { + + std::vector& idx = sl->second; + int slot = sl->first; + std::vector rdata; + + char buf[4096]; + + if (action) { + // load one slot vector + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + f = fopen(buf,"rb"); + if (!f) { + fprintf(stderr,"Node %s cannot read %s\n",hostname,buf); fflush(stderr); + return false; + } + } + + uint32_t crc = 0x0; + off_t size; + + GridStopWatch gsw; + _grid->Barrier(); + gsw.Start(); + + std::vector raw_in(0); + if (action) { + fseeko(f,0,SEEK_END); + size = ftello(f); + fseeko(f,0,SEEK_SET); + + raw_in.resize(size); + assert(fread(&raw_in[0],size,1,f) == 1); + } + + _grid->Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)size / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + + if (action) { + std::cout << GridLogMessage << "[" << slot << "] Read " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s" << std::endl; + + uint32_t crc_comp = crc32_threaded((unsigned char*)&raw_in[0],size,0); + + if (crc_comp != crc32[slot]) { + std::cout << "Node " << hostname << " found crc mismatch for file " << buf << " (" << std::hex << crc_comp << " vs " << crc32[slot] << std::dec << ")" << std::endl; + std::cout << "Byte size: " << size << std::endl; + } + + assert(crc_comp == crc32[slot]); + } + + _grid->Barrier(); + + if (action) { + fclose(f); + } + + char* ptr = &raw_in[0]; + + GridStopWatch gsw2; + gsw2.Start(); + if (action) { + int nsingleCap = nkeep_single; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + int _cf_block_size = slot_lsites * 12 / 2 / blocks; + +#define FP_16_SIZE(a,b) (( (a) + (a/b) )*2) + + // first read single precision basis vectors +#pragma omp parallel + { + std::vector buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf(_cf_block_size * 2); +#pragma omp for + for (int nb=0;nb buf1(nkeep_single*2); + std::vector buf2((nkeep - nkeep_single)*2); + +#pragma omp for + for (int j=0;j<(int)coef.size();j++) + for (int nb=0;nb + static void write_compressed_vectors(const char* dir,const BlockProjector& pr, + const BasisFieldVector& coef, + int nsingle,int writer_nodes = 0) { + + GridStopWatch gsw; + + const BasisFieldVector& basis = pr._evec; + GridBase* _grid = basis._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + _grid->Barrier(); + gsw.Start(); + + char buf[4096]; + + // Making the directories is somewhat tricky. + // If we run on a joint filesystem we would just + // have the boss create the directories and then + // have a barrier. We also want to be able to run + // on local /scratch, so potentially all nodes need + // to create their own directories. So do the following + // for now. + for (int j=0;j<_grid->_Nprocessors;j++) { + if (j == _grid->ThisRank()) { + conditionalMkDir(dir); + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + conditionalMkDir(buf); + } + _grid->Barrier(); // make sure directories are ready + } + } + + + typedef typename Field::scalar_type Coeff_t; + typedef typename CoarseField::scalar_type CoeffCoarse_t; + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + + int64_t off = 0x0; + uint32_t crc = 0x0; + if (writer_nodes < 1) + writer_nodes = _grid->_Nprocessors; + int groups = _grid->_Nprocessors / writer_nodes; + if (groups<1) + groups = 1; + + std::cout << GridLogMessage << " Write " << dir << " nodes = " << writer_nodes << std::endl; + + for (int group=0;groupBarrier(); + if (_grid->ThisRank() % groups == group) { + + sprintf(buf,"%s/%2.2d/%10.10d.compressed",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + //buffer does not seem to help + //assert(!setvbuf ( f , NULL , _IOFBF , 1024*1024*2 )); + + int nsingleCap = nsingle; + if (pr._evec.size() < nsingleCap) + nsingleCap = pr._evec.size(); + + GridStopWatch gsw1,gsw2,gsw3,gsw4,gsw5; + + gsw1.Start(); + + std::vector fbuf; + fbuf.reserve( 1024 * 1024 * 8 ); + + // first write single precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + +#if 0 + { + RealD nrm = 0.0; + for (int j=0;j<(int)buf.size();j++) + nrm += buf[j]*buf[j]; + std::cout << GridLogMessage << "Norm: " << nrm << std::endl; + } +#endif + write_floats(f,fbuf,crc, &buf[0], buf.size() ); + } + } + + gsw1.Stop(); + gsw2.Start(); + + // then write fixed precision basis vectors + for (int nb=0;nb buf; + pr._bgrid.peekBlockOfVectorCanonical(nb,pr._evec._v[i],buf); + write_floats_fp16(f,fbuf,crc, &buf[0], buf.size(), 24); + } + } + + gsw2.Stop(); + assert(coef._v[0]._grid->_isites*coef._v[0]._grid->_osites == pr._bgrid._blocks); + + gsw3.Start(); + for (int j=0;j<(int)coef.size();j++) { + + int64_t size1 = nsingleCap*2; + int64_t size2 = 2*(pr._evec.size()-nsingleCap); + int64_t size = size1; + if (size2>size) + size=size2; + std::vector buf(size); + + //RealD nrmTest = 0.0; + for (int nb=0;nbGlobalSum(nrmTest); + //std::cout << GridLogMessage << "Test norm: " << nrmTest << std::endl; + } + gsw3.Stop(); + + flush_bytes(f,fbuf); + + off = ftello(f); + fclose(f); + + std::cout<Barrier(); + gsw.Stop(); + + RealD totalGB = (RealD)off / 1024./1024./1024 * _grid->_Nprocessors; + RealD seconds = gsw.useconds() / 1e6; + std::cout << GridLogMessage << "Write " << totalGB << " GB of compressed data at " << totalGB/seconds << " GB/s in " << seconds << " s" << std::endl; + + // gather crcs + std::vector crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/metadata.txt",dir); + FILE* f = fopen(buf,"wb"); + assert(f); + for (int i=0;i<5;i++) + fprintf(f,"s[%d] = %d\n",i,_grid->FullDimensions()[(i+1)%5] / _grid->_processors[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"b[%d] = %d\n",i,pr._bgrid._bs[(i+1)%5]); + for (int i=0;i<5;i++) + fprintf(f,"nb[%d] = %d\n",i,pr._bgrid._nb[(i+1)%5]); + fprintf(f,"neig = %d\n",(int)coef.size()); + fprintf(f,"nkeep = %d\n",(int)pr._evec.size()); + fprintf(f,"nkeep_single = %d\n",nsingle); + fprintf(f,"blocks = %d\n",pr._bgrid._blocks); + fprintf(f,"FP16_COEF_EXP_SHARE_FLOATS = %d\n",FP16_COEF_EXP_SHARE_FLOATS); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"crc32[%d] = %X\n",i,crcs[i]); + fclose(f); + } + + } + + template + static void write_argonne(const BasisFieldVector& ret,const char* dir) { + + GridBase* _grid = ret._v[0]._grid; + std::vector _l = _grid->FullDimensions(); + for (int i=0;i<(int)_l.size();i++) + _l[i] /= _grid->_processors[i]; + + char buf[4096]; + + if (_grid->IsBoss()) { + mkdir(dir,ACCESSPERMS); + + for (int i=0;i<32;i++) { + sprintf(buf,"%s/%2.2d",dir,i); + mkdir(buf,ACCESSPERMS); + } + } + + _grid->Barrier(); // make sure directories are ready + + + int nperdir = _grid->_Nprocessors / 32; + if (nperdir < 1) + nperdir=1; + std::cout << GridLogMessage << " Write " << dir << " nodes = " << _grid->_Nprocessors << std::endl; + + int slot; + Lexicographic::IndexFromCoor(_grid->_processor_coor,slot,_grid->_processors); + //printf("Slot: %d <> %d\n",slot, _grid->ThisRank()); + + sprintf(buf,"%s/%2.2d/%10.10d",dir,slot/nperdir,slot); + FILE* f = fopen(buf,"wb"); + assert(f); + + int N = (int)ret._v.size(); + uint32_t crc = 0x0; + int64_t cf_size = _grid->oSites()*_grid->iSites()*12; + std::vector< float > rdata(cf_size*2); + + GridStopWatch gsw1,gsw2; + + for (int i=0;i coor(_l.size()); + for (coor[1] = 0;coor[1]<_l[1];coor[1]++) { + for (coor[2] = 0;coor[2]<_l[2];coor[2]++) { + for (coor[3] = 0;coor[3]<_l[3];coor[3]++) { + for (coor[4] = 0;coor[4]<_l[4];coor[4]++) { + for (coor[0] = 0;coor[0]<_l[0];coor[0]++) { + + if ((coor[1]+coor[2]+coor[3]+coor[4]) % 2 == 1) { + // peek + iScalar, 4> > sc; + peekLocalSite(sc,ret._v[i],coor); + for (int s=0;s<4;s++) + for (int c=0;c<3;c++) + *(std::complex*)&rdata[get_bfm_index(&coor[0],c+s*3, &_l[0] )] = sc()(s)(c); + } + } + } + } + } + } + + // endian flip + for (int i=0;i crcs(_grid->_Nprocessors); + for (int i=0;i<_grid->_Nprocessors;i++) { + crcs[i] = 0x0; + } + crcs[slot] = crc; + for (int i=0;i<_grid->_Nprocessors;i++) { + _grid->GlobalSum(crcs[i]); + } + + if (_grid->IsBoss()) { + sprintf(buf,"%s/checksums.txt",dir); + FILE* f = fopen(buf,"wt"); + assert(f); + fprintf(f,"00000000\n\n"); + for (int i =0;i<_grid->_Nprocessors;i++) + fprintf(f,"%X\n",crcs[i]); + fclose(f); + + sprintf(buf,"%s/nodes.txt",dir); + f = fopen(buf,"wt"); + assert(f); + for (int i =0;i<(int)_grid->_processors.size();i++) + fprintf(f,"%d\n",_grid->_processors[i]); + fclose(f); + } + + + std::cout << GridLogMessage << "Writing slot " << slot << " with " + << N << " vectors in " + << gsw2.Elapsed() << " at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw2.useconds()*1000.*1000. ) + << " GB/s with crc computed at " + << ( (double)cf_size*2*4 * N / 1024./1024./1024. / gsw1.useconds()*1000.*1000. ) + << " GB/s " + << std::endl; + + _grid->Barrier(); + std::cout << GridLogMessage << "Writing complete" << std::endl; + + } + } + +} diff --git a/tests/lanczos/Makefile.am b/tests/lanczos/Makefile.am new file mode 100644 index 00000000..60b82dd7 --- /dev/null +++ b/tests/lanczos/Makefile.am @@ -0,0 +1 @@ +include Make.inc diff --git a/tests/lanczos/Params.h b/tests/lanczos/Params.h new file mode 100644 index 00000000..d9a6d3b3 --- /dev/null +++ b/tests/lanczos/Params.h @@ -0,0 +1,136 @@ +/* + Params IO + + Author: Christoph Lehner + Date: 2017 +*/ + +#define PADD(p,X) p.get(#X,X); + +class Params { + protected: + + std::string trim(const std::string& sc) { + std::string s = sc; + s.erase(s.begin(), std::find_if(s.begin(), s.end(), + std::not1(std::ptr_fun(std::isspace)))); + s.erase(std::find_if(s.rbegin(), s.rend(), + std::not1(std::ptr_fun(std::isspace))).base(), s.end()); + return s; + } + + public: + + std::map< std::string, std::string > lines; + std::string _fn; + + Params(const char* fn) : _fn(fn) { + FILE* f = fopen(fn,"rt"); + assert(f); + while (!feof(f)) { + char buf[4096]; + if (fgets(buf,sizeof(buf),f)) { + if (buf[0] != '#' && buf[0] != '\r' && buf[0] != '\n') { + char* sep = strchr(buf,'='); + assert(sep); + *sep = '\0'; + lines[trim(buf)] = trim(sep+1); + } + } + } + fclose(f); + } + + ~Params() { + } + + std::string loghead() { + return _fn + ": "; + } + + bool has(const char* name) { + auto f = lines.find(name); + return (f != lines.end()); + } + + const std::string& get(const char* name) { + auto f = lines.find(name); + if (f == lines.end()) { + std::cout << Grid::GridLogMessage << loghead() << "Could not find value for " << name << std::endl; + abort(); + } + return f->second; + } + + void parse(std::string& s, const std::string& cval) { + std::stringstream trimmer; + trimmer << cval; + s.clear(); + trimmer >> s; + } + + void parse(int& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%d",&i)==1); + } + + void parse(long long& i, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lld",&i)==1); + } + + void parse(double& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%lf",&f)==1); + } + + void parse(float& f, const std::string& cval) { + assert(sscanf(cval.c_str(),"%f",&f)==1); + } + + void parse(bool& b, const std::string& cval) { + std::string lcval = cval; + std::transform(lcval.begin(), lcval.end(), lcval.begin(), ::tolower); + if (lcval == "true" || lcval == "yes") { + b = true; + } else if (lcval == "false" || lcval == "no") { + b = false; + } else { + std::cout << "Invalid value for boolean: " << b << std::endl; + assert(0); + } + } + + void parse(std::complex& f, const std::string& cval) { + double r,i; + assert(sscanf(cval.c_str(),"%lf %lf",&r,&i)==2); + f = std::complex(r,i); + } + + void parse(std::complex& f, const std::string& cval) { + float r,i; + assert(sscanf(cval.c_str(),"%f %f",&r,&i)==2); + f = std::complex(r,i); + } + + template + void get(const char* name, std::vector& v) { + int i = 0; + v.resize(0); + while (true) { + char buf[4096]; + sprintf(buf,"%s[%d]",name,i++); + if (!has(buf)) + break; + T val; + parse(val,get(buf)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << buf << " to " << val << std::endl; + v.push_back(val); + } + } + + template + void get(const char* name, T& f) { + parse(f,get(name)); + std::cout << Grid::GridLogMessage << loghead() << "Set " << name << " to " << f << std::endl; + } + + +}; diff --git a/tests/lanczos/Test_dwf_compressed_lanczos.cc b/tests/lanczos/Test_dwf_compressed_lanczos.cc new file mode 100644 index 00000000..45690f05 --- /dev/null +++ b/tests/lanczos/Test_dwf_compressed_lanczos.cc @@ -0,0 +1,712 @@ +/* + Authors: Christoph Lehner + Date: 2017 + + Multigrid Lanczos + + + + TODO: + + High priority: + - Explore filtering of starting vector again, should really work: If cheby has 4 for low mode region and 1 for high mode, applying 15 iterations has 1e9 suppression + of high modes, which should create the desired invariant subspace already? Missing something here??? Maybe dynamic range dangerous, i.e., could also kill interesting + eigenrange if not careful. + + Better: Use all Cheby up to order N in order to approximate a step function; try this! Problem: width of step function. Can kill eigenspace > 1e-3 and have < 1e-5 equal + to 1 + + Low priority: + - Given that I seem to need many restarts and high degree poly to create the base and this takes about 1 day, seriously consider a simple method to create a basis + (ortho krylov low poly); and then fix up lowest say 200 eigenvalues by 1 run with high-degree poly (600 could be enough) +*/ +#include +#include +///////////////////////////////////////////////////////////////////////////// +// The following are now decoupled from the Lanczos and deal with grids. +// Safe to replace functionality +///////////////////////////////////////////////////////////////////////////// +#include "BlockedGrid.h" +#include "FieldBasisVector.h" +#include "BlockProjector.h" +#include "FieldVectorIO.h" +#include "Params.h" + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +bool read_evals(GridBase* _grid, char* fn, std::vector& evals) { + + FILE* f = 0; + uint32_t status = 0; + if (_grid->IsBoss()) { + f = fopen(fn,"rt"); + status = f ? 1 : 0; + } + _grid->GlobalSum(status); + + if (!status) + return false; + + uint32_t N; + if (f) + assert(fscanf(f,"%d\n",&N)==1); + else + N = 0; + _grid->GlobalSum(N); + + std::cout << "Reading " << N << " eigenvalues" << std::endl; + + evals.resize(N); + + for (int i=0;iGlobalSumVector(&evals[0],evals.size()); + + if (f) + fclose(f); + return true; +} + +void write_evals(char* fn, std::vector& evals) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)evals.size(); + fprintf(f,"%d\n",N); + + for (int i=0;i& hist) { + FILE* f = fopen(fn,"wt"); + assert(f); + + int N = (int)hist.size(); + for (int i=0;i +class CheckpointedLinearFunction : public LinearFunction { +public: + LinearFunction& _op; + std::string _dir; + int _max_apply; + int _apply, _apply_actual; + GridBase* _grid; + FILE* _f; + + CheckpointedLinearFunction(GridBase* grid, LinearFunction& op, const char* dir,int max_apply) : _op(op), _dir(dir), _grid(grid), _f(0), + _max_apply(max_apply), _apply(0), _apply_actual(0) { + + FieldVectorIO::conditionalMkDir(dir); + + char fn[4096]; + sprintf(fn,"%s/ckpt_op.%4.4d",_dir.c_str(),_grid->ThisRank()); + printf("CheckpointLinearFunction:: file %s\n",fn); + _f = fopen(fn,"r+b"); + if (!_f) + _f = fopen(fn,"w+b"); + assert(_f); + fseek(_f,0,SEEK_CUR); + + } + + ~CheckpointedLinearFunction() { + if (_f) { + fclose(_f); + _f = 0; + } + } + + bool load_ckpt(const Field& in, Field& out) { + + off_t cur = ftello(_f); + fseeko(_f,0,SEEK_END); + if (cur == ftello(_f)) + return false; + fseeko(_f,cur,SEEK_SET); + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc_exp; + assert(fread(&crc_exp,4,1,_f)==1); + assert(fread(&out._odata[0],sz,1,_f)==1); + assert(FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0)==crc_exp); + gsw.Stop(); + + printf("CheckpointLinearFunction:: reading %lld\n",(long long)sz); + std::cout << GridLogMessage << "Loading " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + return true; + } + + void save_ckpt(const Field& in, Field& out) { + + fseek(_f,0,SEEK_CUR); // switch to write + + size_t sz = sizeof(out._odata[0]) * out._odata.size(); + + GridStopWatch gsw; + gsw.Start(); + uint32_t crc = FieldVectorIO::crc32_threaded((unsigned char*)&out._odata[0],sz,0x0); + assert(fwrite(&crc,4,1,_f)==1); + assert(fwrite(&out._odata[0],sz,1,_f)==1); + fflush(_f); // try this on the GPFS to suppress OPA usage for disk during dslash; this is not needed at Lustre/JLAB + gsw.Stop(); + + printf("CheckpointLinearFunction:: writing %lld\n",(long long)sz); + std::cout << GridLogMessage << "Saving " << ((RealD)sz/1024./1024./1024.) << " GB in " << gsw.Elapsed() << std::endl; + } + + void operator()(const Field& in, Field& out) { + + _apply++; + + if (load_ckpt(in,out)) + return; + + _op(in,out); + + save_ckpt(in,out); + + if (_apply_actual++ >= _max_apply) { + std::cout << GridLogMessage << "Maximum application of operator reached, checkpoint and finish in future job" << std::endl; + if (_f) { fclose(_f); _f=0; } + in._grid->Barrier(); + Grid_finalize(); + exit(3); + } + } +}; + +template +class ProjectedFunctionHermOp : public LinearFunction { +public: + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedFunctionHermOp(BlockProjector& pr,OperatorFunction & poly,LinearOperatorBase& linop) : _poly(poly), _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + + GridStopWatch gsw1,gsw2,gsw3; + // fill fin + gsw1.Start(); + _pr.coarseToFine(in,fin); + gsw1.Stop(); + + // apply poly + gsw2.Start(); + _poly(_Linop,fin,fout); + gsw2.Stop(); + + // fill out + gsw3.Start(); + _pr.fineToCoarse(fout,out); + gsw3.Stop(); + + auto eps = innerProduct(in,out); + std::cout << GridLogMessage << "Operator timing details: c2f = " << gsw1.Elapsed() << " poly = " << gsw2.Elapsed() << " f2c = " << gsw3.Elapsed() << + " Complimentary Hermiticity check: " << eps.imag() / std::abs(eps) << std::endl; + + } +}; + +template +class ProjectedHermOp : public LinearFunction { +public: + LinearOperatorBase &_Linop; + BlockProjector& _pr; + + ProjectedHermOp(BlockProjector& pr,LinearOperatorBase& linop) : _Linop(linop), _pr(pr) { + } + + void operator()(const CoarseField& in, CoarseField& out) { + assert(_pr._bgrid._o_blocks == in._grid->oSites()); + Field fin(_pr._bgrid._grid); + Field fout(_pr._bgrid._grid); + _pr.coarseToFine(in,fin); + _Linop.HermOp(fin,fout); + _pr.fineToCoarse(fout,out); + + } +}; + +template using CoarseSiteFieldGeneral = iScalar< iVector >; +template using CoarseSiteFieldD = CoarseSiteFieldGeneral< vComplexD, N >; +template using CoarseSiteFieldF = CoarseSiteFieldGeneral< vComplexF, N >; +template using CoarseSiteField = CoarseSiteFieldGeneral< vComplex, N >; +template using CoarseLatticeFermion = Lattice< CoarseSiteField >; +template using CoarseLatticeFermionD = Lattice< CoarseSiteFieldD >; + +template +void CoarseGridLanczos(BlockProjector& pr,RealD alpha2,RealD beta,int Npoly2, + int Nstop2,int Nk2,int Nm2,RealD resid2,RealD betastp2,int MaxIt,int MinRes2, + LinearOperatorBase& HermOp, std::vector& eval1, bool cg_test_enabled, + int cg_test_maxiter,int nsingle,int SkipTest2, int MaxApply2,bool smoothed_eval_enabled, + int smoothed_eval_inner,int smoothed_eval_outer,int smoothed_eval_begin, + int smoothed_eval_end,RealD smoothed_eval_inner_resid) { + + BlockedGrid& bgrid = pr._bgrid; + BasisFieldVector& basis = pr._evec; + + + std::vector coarseFourDimLatt; + for (int i=0;i<4;i++) + coarseFourDimLatt.push_back(bgrid._nb[1+i] * bgrid._grid->_processors[1+i]); + assert(bgrid._grid->_processors[0] == 1); + + std::cout << GridLogMessage << "CoarseGrid = " << coarseFourDimLatt << " with basis = " << Nstop1 << std::endl; + GridCartesian * UCoarseGrid = SpaceTimeGrid::makeFourDimGrid(coarseFourDimLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FCoarseGrid = SpaceTimeGrid::makeFiveDimGrid(bgrid._nb[0],UCoarseGrid); + + Chebyshev Cheb2(alpha2,beta,Npoly2); + CoarseLatticeFermion src_coarse(FCoarseGrid); + + // Second round of Lanczos in blocked space + std::vector eval2(Nm2); + std::vector eval3(Nm2); + BasisFieldVector > coef(Nm2,FCoarseGrid); + + ProjectedFunctionHermOp,LatticeFermion> Op2plain(pr,Cheb2,HermOp); + CheckpointedLinearFunction > Op2ckpt(src_coarse._grid,Op2plain,"checkpoint",MaxApply2); + LinearFunction< CoarseLatticeFermion >* Op2; + if (MaxApply2) { + Op2 = &Op2ckpt; + } else { + Op2 = &Op2plain; + } + ProjectedHermOp,LatticeFermion> Op2nopoly(pr,HermOp); + ImplicitlyRestartedLanczos > IRL2(*Op2,*Op2,Nstop2,Nk2,Nm2,resid2,MaxIt,betastp2,MinRes2); + + + src_coarse = 1.0; + + // Precision test + { + Field tmp(bgrid._grid); + CoarseLatticeFermion tmp2(FCoarseGrid); + CoarseLatticeFermion tmp3(FCoarseGrid); + tmp2 = 1.0; + tmp3 = 1.0; + + pr.coarseToFine(tmp2,tmp); + pr.fineToCoarse(tmp,tmp2); + + tmp2 -= tmp3; + std::cout << GridLogMessage << "Precision Test c->f->c: " << norm2(tmp2) / norm2(tmp3) << std::endl; + + //bgrid._grid->Barrier(); + //return; + } + + int Nconv; + if (!FieldVectorIO::read_compressed_vectors("lanczos.output",pr,coef) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt",eval3) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.linear",eval1) || + !read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.poly",eval2) + ) { + + + IRL2.calc(eval2,coef._v,src_coarse,Nconv,true); + + coef.resize(Nstop2); + eval2.resize(Nstop2); + eval3.resize(Nstop2); + + std::vector step3_cache; + + // reconstruct eigenvalues of original operator + for (int i=0;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt.linear",eval1); + write_evals((char *)"lanczos.output/eigen-values.txt.poly",eval2); + } + + } + + // fix up eigenvalues + if (!read_evals(UCoarseGrid,(char *)"lanczos.output/eigen-values.txt.smoothed",eval3) && smoothed_eval_enabled) { + + ConjugateGradient CG(smoothed_eval_inner_resid, smoothed_eval_inner, false); + + LatticeFermion v_i(basis[0]._grid); + auto tmp = v_i; + auto tmp2 = v_i; + + for (int i=smoothed_eval_begin;iIsBoss()) { + write_evals((char *)"lanczos.output/eigen-values.txt.smoothed",eval3); + write_evals((char *)"lanczos.output/eigen-values.txt",eval3); // also reset this to the best ones we have available + } + } + + // do CG test with and without deflation + if (cg_test_enabled) { + ConjugateGradient CG(1.0e-8, cg_test_maxiter, false); + LatticeFermion src_orig(bgrid._grid); + src_orig.checkerboard = Odd; + src_orig = 1.0; + src_orig = src_orig * (1.0 / ::sqrt(norm2(src_orig)) ); + auto result = src_orig; + + // undeflated solve + std::cout << GridLogMessage << " Undeflated solve "<IsBoss()) + // write_history("cg_test.undefl",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors + std::cout << GridLogMessage << " Deflated solve with all evectors"<IsBoss()) + // write_history("cg_test.defl_all",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with non-blocked eigenvectors + std::cout << GridLogMessage << " Deflated solve with non-blocked evectors"<IsBoss()) + // write_history("cg_test.defl_full",CG.ResHistory); + // CG.ResHistory.clear(); + + // deflated solve with all eigenvectors and original eigenvalues from proj + std::cout << GridLogMessage << " Deflated solve with all eigenvectors and original eigenvalues from proj"<IsBoss()) + // write_history("cg_test.defl_all_ev3",CG.ResHistory); + // CG.ResHistory.clear(); + + } + +} + + +template +void quick_krylov_basis(BasisFieldVector& evec,Field& src,LinearFunction& Op,int Nstop) { + Field tmp = src; + Field tmp2 = tmp; + + for (int i=0;i HermOp(Ddwf); + + // Eigenvector storage + const int Nm1 = Np1 + Nk1; + const int Nm2 = Np2 + Nk2; // maximum number of vectors we need to keep + std::cout << GridLogMessage << "Keep " << Nm1 << " full vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << Nm2 << " total vectors" << std::endl; + assert(Nm2 >= Nm1); + BasisFieldVector evec(Nm1,FrbGrid); // start off with keeping full vectors + + // First and second cheby + Chebyshev Cheb1(alpha1,beta,Npoly1); + FunctionHermOp Op1(Cheb1,HermOp); + PlainHermOp Op1test(HermOp); + + // Eigenvalue storage + std::vector eval1(evec.size()); + + // Construct source vector + LatticeFermion src(FrbGrid); + { + src=1.0; + src.checkerboard = Odd; + + // normalize + RealD nn = norm2(src); + nn = Grid::sqrt(nn); + src = src * (1.0/nn); + } + + // Do a benchmark and a quick exit if performance is too little (ugly but needed due to performance fluctuations) + if (max_cheb_time_ms) { + // one round of warmup + auto tmp = src; + GridStopWatch gsw1,gsw2; + gsw1.Start(); + Cheb1(HermOp,src,tmp); + gsw1.Stop(); + Ddwf.ZeroCounters(); + gsw2.Start(); + Cheb1(HermOp,src,tmp); + gsw2.Stop(); + Ddwf.Report(); + std::cout << GridLogMessage << "Performance check; warmup = " << gsw1.Elapsed() << " test = " << gsw2.Elapsed() << std::endl; + int ms = (int)(gsw2.useconds()/1e3); + if (ms > max_cheb_time_ms) { + std::cout << GridLogMessage << "Performance too poor: " << ms << " ms, cutoff = " << max_cheb_time_ms << " ms" << std::endl; + Grid_finalize(); + return 2; + } + + } + + // First round of Lanczos to get low mode basis + ImplicitlyRestartedLanczos IRL1(Op1,Op1test,Nstop1,Nk1,Nm1,resid1,MaxIt,betastp1,MinRes1); + int Nconv; + + char tag[1024]; + if (!FieldVectorIO::read_argonne(evec,(char *)"checkpoint") || !read_evals(UGrid,(char *)"checkpoint/eigen-values.txt",eval1)) { + + if (simple_krylov_basis) { + quick_krylov_basis(evec,src,Op1,Nstop1); + } else { + IRL1.calc(eval1,evec._v,src,Nconv,false); + } + evec.resize(Nstop1); // and throw away superfluous + eval1.resize(Nstop1); + if (checkpoint_basis) + FieldVectorIO::write_argonne(evec,(char *)"checkpoint"); + if (UGrid->IsBoss() && checkpoint_basis) + write_evals((char *)"checkpoint/eigen-values.txt",eval1); + + Ddwf.Report(); + + if (exit_after_basis_calculation) { + Grid_finalize(); + return 0; + } + } + + // now test eigenvectors + if (!simple_krylov_basis) { + for (int i=0;i + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +/* + * Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features + * in Grid that were intended to be used to support blocked Aggregates, from + */ +#include +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +class LocalCoherenceLanczosScidac : public LocalCoherenceLanczos +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LocalCoherenceLanczosScidac(GridBase *FineGrid,GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) + // Base constructor + : LocalCoherenceLanczos(FineGrid,CoarseGrid,FineOp,checkerboard) + {}; + + void checkpointFine(std::string evecs_file,std::string evals_file) + { + assert(this->_Aggregate.subspace.size()==nbasis); + emptyUserRecord record; + Grid::QCD::ScidacWriter WR; + WR.open(evecs_file); + for(int k=0;k_Aggregate.subspace[k],record); + } + WR.close(); + + XmlWriter WRx(evals_file); + write(WRx,"evals",this->evals_fine); + } + + void checkpointFineRestore(std::string evecs_file,std::string evals_file) + { + this->evals_fine.resize(nbasis); + this->_Aggregate.subspace.resize(nbasis,this->_FineGrid); + + std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "<evals_fine); + + assert(this->evals_fine.size()==nbasis); + + std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<_Aggregate.subspace[k].checkerboard=this->_checkerboard; + RD.readScidacFieldRecord(this->_Aggregate.subspace[k],record); + + } + RD.close(); + } + + void checkpointCoarse(std::string evecs_file,std::string evals_file) + { + int n = this->evec_coarse.size(); + emptyUserRecord record; + Grid::QCD::ScidacWriter WR; + WR.open(evecs_file); + for(int k=0;kevec_coarse[k],record); + } + WR.close(); + + XmlWriter WRx(evals_file); + write(WRx,"evals",this->evals_coarse); + } + + void checkpointCoarseRestore(std::string evecs_file,std::string evals_file,int nvec) + { + std::cout << "resizing coarse vecs to " << nvec<< std::endl; + this->evals_coarse.resize(nvec); + this->evec_coarse.resize(nvec,this->_CoarseGrid); + std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<evals_coarse); + + assert(this->evals_coarse.size()==nvec); + emptyUserRecord record; + std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<evec_coarse[k],record); + } + RD.close(); + } +}; + +int main (int argc, char ** argv) { + + Grid_init(&argc,&argv); + GridLogIRL.TimingMode(1); + + LocalCoherenceLanczosParams Params; + { + Params.omega.resize(10); + Params.blockSize.resize(5); + XmlWriter writer("Params_template.xml"); + write(writer,"Params",Params); + std::cout << GridLogMessage << " Written Params_template.xml" < blockSize = Params.blockSize; + + // Grids + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector fineLatt = GridDefaultLatt(); + int dims=fineLatt.size(); + assert(blockSize.size()==dims+1); + std::vector coarseLatt(dims); + std::vector coarseLatt5d ; + + for (int d=0;d HermOp(Ddwf); + + // Eigenvector storage + LanczosParams fine =Params.FineParams; + LanczosParams coarse=Params.CoarseParams; + + const int Ns1 = fine.Nstop; const int Ns2 = coarse.Nstop; + const int Nk1 = fine.Nk; const int Nk2 = coarse.Nk; + const int Nm1 = fine.Nm; const int Nm2 = coarse.Nm; + + std::cout << GridLogMessage << "Keep " << fine.Nstop << " fine vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << coarse.Nstop << " coarse vectors" << std::endl; + assert(Nm2 >= Nm1); + + const int nbasis= 60; + assert(nbasis==Ns1); + LocalCoherenceLanczosScidac _LocalCoherenceLanczos(FrbGrid,CoarseGrid5rb,HermOp,Odd); + std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl; + + assert( (Params.doFine)||(Params.doFineRead)); + + if ( Params.doFine ) { + std::cout << GridLogMessage << "Performing fine grid IRL Nstop "<< Ns1 << " Nk "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +/* + * Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features + * in Grid that were intended to be used to support blocked Aggregates, from + */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +class ProjectedHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedHermOp(LinearOperatorBase& linop, Aggregation &aggregate) : + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + FineField fin(FineGrid); + FineField fout(FineGrid); + + _Aggregate.PromoteFromSubspace(in,fin); + _Linop.HermOp(fin,fout); + _Aggregate.ProjectToSubspace(out,fout); + } +}; + +template +class ProjectedFunctionHermOp : public LinearFunction > > { +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseField; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice FineField; + + + OperatorFunction & _poly; + LinearOperatorBase &_Linop; + Aggregation &_Aggregate; + + ProjectedFunctionHermOp(OperatorFunction & poly,LinearOperatorBase& linop, + Aggregation &aggregate) : + _poly(poly), + _Linop(linop), + _Aggregate(aggregate) { }; + + void operator()(const CoarseField& in, CoarseField& out) { + + GridBase *FineGrid = _Aggregate.FineGrid; + + FineField fin(FineGrid) ;fin.checkerboard =_Aggregate.checkerboard; + FineField fout(FineGrid);fout.checkerboard =_Aggregate.checkerboard; + + _Aggregate.PromoteFromSubspace(in,fin); + _poly(_Linop,fin,fout); + _Aggregate.ProjectToSubspace(out,fout); + } +}; + +// Make serializable Lanczos params + +template +class CoarseFineIRL +{ +public: + typedef iVector CoarseSiteVector; + typedef Lattice CoarseScalar; // used for inner products on fine field + typedef Lattice CoarseField; + typedef Lattice FineField; + +private: + GridBase *_CoarseGrid; + GridBase *_FineGrid; + int _checkerboard; + LinearOperatorBase & _FineOp; + Aggregation _Aggregate; + +public: + CoarseFineIRL(GridBase *FineGrid, + GridBase *CoarseGrid, + LinearOperatorBase &FineOp, + int checkerboard) : + _CoarseGrid(CoarseGrid), + _FineGrid(FineGrid), + _Aggregate(CoarseGrid,FineGrid,checkerboard), + _FineOp(FineOp), + _checkerboard(checkerboard) + {}; + + template static RealD normalise(T& v) + { + RealD nn = norm2(v); + nn = ::sqrt(nn); + v = v * (1.0/nn); + return nn; + } + + void testFine(void) + { + int Nk = nbasis; + _Aggregate.subspace.resize(Nk,_FineGrid); + _Aggregate.subspace[0]=1.0; + _Aggregate.subspace[0].checkerboard=_checkerboard; + normalise(_Aggregate.subspace[0]); + PlainHermOp Op(_FineOp); + for(int k=1;k Cheby(alpha,beta,Npoly); + FunctionHermOp ChebyOp(Cheby,_FineOp); + PlainHermOp Op(_FineOp); + + int Nk = nbasis; + + std::vector eval(Nm); + + FineField src(_FineGrid); src=1.0; src.checkerboard = _checkerboard; + + ImplicitlyRestartedLanczos IRL(ChebyOp,Op,Nk,Nk,Nm,resid,MaxIt,betastp,MinRes); + _Aggregate.subspace.resize(Nm,_FineGrid); + IRL.calc(eval,_Aggregate.subspace,src,Nk,false); + _Aggregate.subspace.resize(Nk,_FineGrid); + for(int k=0;k Cheby(alpha,beta,Npoly); + ProjectedHermOp Op(_FineOp,_Aggregate); + ProjectedFunctionHermOp ChebyOp(Cheby,_FineOp,_Aggregate); + + std::vector eval(Nm); + std::vector evec(Nm,_CoarseGrid); + + CoarseField src(_CoarseGrid); src=1.0; + + ImplicitlyRestartedLanczos IRL(ChebyOp,ChebyOp,Nk,Nk,Nm,resid,MaxIt,betastp,MinRes); + IRL.calc(eval,evec,src,Nk,false); + + // We got the evalues of the Cheby operator; + // Reconstruct eigenvalues of original operator via Chebyshev inverse + for (int i=0;i, blockSize, + std::string, config, + std::vector < std::complex >, omega, + RealD, mass, + RealD, M5 + ); +}; + +int main (int argc, char ** argv) { + + Grid_init(&argc,&argv); + + CompressedLanczosParams Params; + { + Params.omega.resize(10); + Params.blockSize.resize(5); + XmlWriter writer("Params_template.xml"); + write(writer,"Params",Params); + std::cout << GridLogMessage << " Written Params_template.xml" < blockSize = Params.blockSize; + + // Grids + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector fineLatt = GridDefaultLatt(); + int dims=fineLatt.size(); + assert(blockSize.size()==dims+1); + std::vector coarseLatt(dims); + std::vector coarseLatt5d ; + + for (int d=0;d seeds4({1,2,3,4}); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + SU3::HotConfiguration(RNG4, Umu); + } + std::cout << GridLogMessage << "Lattice dimensions: " << GridDefaultLatt() << " Ls: " << Ls << std::endl; + + // ZMobius EO Operator + ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, Params.omega,1.,0.); + SchurDiagTwoOperator HermOp(Ddwf); + + // Eigenvector storage + LanczosParams fine =Params.FineParams; + LanczosParams coarse=Params.CoarseParams; + const int Nm1 = fine.Nm; + const int Nm2 = coarse.Nm; + + std::cout << GridLogMessage << "Keep " << fine.Nk << " full vectors" << std::endl; + std::cout << GridLogMessage << "Keep " << coarse.Nk << " total vectors" << std::endl; + assert(Nm2 >= Nm1); + + const int nbasis= 70; + CoarseFineIRL IRL(FrbGrid,CoarseGrid5rb,HermOp,Odd); + + std::cout << GridLogMessage << "Constructed CoarseFine IRL" << std::endl; + + std::cout << GridLogMessage << "Performing fine grid IRL Nk "<< nbasis<<" Nm "< Coeffs { 0.,-1.}; Polynomial PolyX(Coeffs); - Chebyshev Cheb(0.2,5.,11); -// ChebyshevLanczos Cheb(9.,1.,0.,20); -// Cheb.csv(std::cout); -// exit(-24); - ImplicitlyRestartedLanczos IRL(HermOp,Cheb,Nstop,Nk,Nm,resid,MaxIt); + Chebyshev Cheby(0.2,5.,11); + + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op (HermOp); + + ImplicitlyRestartedLanczos IRL(OpCheby,Op,Nstop,Nk,Nm,resid,MaxIt); std::vector eval(Nm); diff --git a/tests/debug/Test_synthetic_lanczos.cc b/tests/lanczos/Test_synthetic_lanczos.cc similarity index 92% rename from tests/debug/Test_synthetic_lanczos.cc rename to tests/lanczos/Test_synthetic_lanczos.cc index 32fd6f32..4be9ca31 100644 --- a/tests/debug/Test_synthetic_lanczos.cc +++ b/tests/lanczos/Test_synthetic_lanczos.cc @@ -119,12 +119,13 @@ int main (int argc, char ** argv) RealD beta = 0.1; RealD mu = 0.0; int order = 11; - ChebyshevLanczos Cheby(alpha,beta,mu,order); + Chebyshev Cheby(alpha,beta,order); std::ofstream file("cheby.dat"); Cheby.csv(file); - HermOpOperatorFunction X; DumbOperator HermOp(grid); + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op(HermOp); const int Nk = 40; const int Nm = 80; @@ -133,8 +134,9 @@ int main (int argc, char ** argv) int Nconv; RealD eresid = 1.0e-6; - ImplicitlyRestartedLanczos IRL(HermOp,X,Nk,Nk,Nm,eresid,Nit); - ImplicitlyRestartedLanczos ChebyIRL(HermOp,Cheby,Nk,Nk,Nm,eresid,Nit); + + ImplicitlyRestartedLanczos IRL(Op,Op,Nk,Nk,Nm,eresid,Nit); + ImplicitlyRestartedLanczos ChebyIRL(OpCheby,Op,Nk,Nk,Nm,eresid,Nit); LatticeComplex src(grid); gaussian(RNG,src); { diff --git a/tests/solver/Test_wilson_lanczos.cc b/tests/lanczos/Test_wilson_lanczos.cc similarity index 92% rename from tests/solver/Test_wilson_lanczos.cc rename to tests/lanczos/Test_wilson_lanczos.cc index e8549234..eabc86d7 100644 --- a/tests/solver/Test_wilson_lanczos.cc +++ b/tests/lanczos/Test_wilson_lanczos.cc @@ -86,9 +86,12 @@ int main(int argc, char** argv) { std::vector Coeffs{0, 1.}; Polynomial PolyX(Coeffs); - Chebyshev Cheb(0.0, 10., 12); - ImplicitlyRestartedLanczos IRL(HermOp, PolyX, Nstop, Nk, Nm, - resid, MaxIt); + Chebyshev Cheby(0.0, 10., 12); + + FunctionHermOp OpCheby(Cheby,HermOp); + PlainHermOp Op (HermOp); + + ImplicitlyRestartedLanczos IRL(OpCheby, Op, Nstop, Nk, Nm, resid, MaxIt); std::vector eval(Nm); FermionField src(FGrid); diff --git a/tests/solver/Test_dwf_hdcr.cc b/tests/solver/Test_dwf_hdcr.cc index c553ba0a..b3373238 100644 --- a/tests/solver/Test_dwf_hdcr.cc +++ b/tests/solver/Test_dwf_hdcr.cc @@ -555,13 +555,13 @@ int main (int argc, char ** argv) std::cout< HermDefOp(Ddwf); - Subspace Aggregates(Coarse5d,FGrid); + Subspace Aggregates(Coarse5d,FGrid,0); // Aggregates.CreateSubspace(RNG5,HermDefOp,nbasis); assert ( (nbasis & 0x1)==0); int nb=nbasis/2; std::cout< mpi_layout = GridDefaultMpi(); std::vector mpi_split (mpi_layout.size(),1); - std::cout << "UGrid (world root)"<RankCount() ; + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); ///////////////////////////////////////////// // Split into 1^4 mpi communicators ///////////////////////////////////////////// - std::cout << "SGrid (world root)"<> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + int me; + for(int i=0;i seeds({1,2,3,4}); - GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); for(int s=0;sThisRank(); + LatticeGaugeField s_Umu(SGrid); FermionField s_src(SFGrid); + FermionField s_src_split(SFGrid); + FermionField s_tmp(SFGrid); FermionField s_res(SFGrid); { @@ -157,6 +168,24 @@ int main (int argc, char ** argv) FGrid->Barrier(); } + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + std::cout << GridLogMessage << " Splitting the grid data "<Barrier(); + if ( n==me ) { + std::cout << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<Barrier(); + } + /////////////////////////////////////////////////////////////// // Set up N-solvers as trivially parallel @@ -164,6 +193,7 @@ int main (int argc, char ** argv) RealD mass=0.01; RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); - ConjugateGradient CG((1.0e-8/(me+1)),10000); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-5/(me+1)),10000); s_res = zero; CG(HermOp,s_src,s_res); - /////////////////////////////////////// - // Share the information - /////////////////////////////////////// + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// std::vector iterations(nrhs,0); iterations[me] = CG.IterationsToComplete; for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + + for(int i=0;i> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + int me; + for(int i=0;i seeds({1,2,3,4}); + + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + std::cout << GridLogMessage << "Made the Fermion Fields"< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-2),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + std::cout << GridLogMessage << " split residual norm "< iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + int nrhs = UGrid->RankCount() ; + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + int me; + GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + mpi_split, + *UGrid,me); + + GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid); + GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid); + GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid); + + /////////////////////////////////////////////// + // Set up the problem as a 4d spreadout job + /////////////////////////////////////////////// + std::vector seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + std::vector src_e(nrhs,FrbGrid); + std::vector src_o(nrhs,FrbGrid); + + for(int s=0;s HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +int main (int argc, char ** argv) +{ + typedef typename DomainWallFermionR::FermionField FermionField; + typedef typename DomainWallFermionR::ComplexField ComplexField; + typename DomainWallFermionR::ImplParams params; + + const int Ls=4; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + std::vector mpi_split (mpi_layout.size(),1); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + ///////////////////////////////////////////// + // Split into 1^4 mpi communicators + ///////////////////////////////////////////// + + for(int i=0;i> mpi_split[k]; + } + break; + } + } + + int nrhs = 1; + for(int i=0;i seeds({1,2,3,4}); + + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + std::vector src(nrhs,FGrid); + std::vector src_chk(nrhs,FGrid); + std::vector result(nrhs,FGrid); + FermionField tmp(FGrid); + + for(int s=0;sThisRank(); + + LatticeGaugeField s_Umu(SGrid); + FermionField s_src(SFGrid); + FermionField s_tmp(SFGrid); + FermionField s_res(SFGrid); + + /////////////////////////////////////////////////////////////// + // split the source out using MPI instead of I/O + /////////////////////////////////////////////////////////////// + Grid_split (Umu,s_Umu); + Grid_split (src,s_src); + + /////////////////////////////////////////////////////////////// + // Set up N-solvers as trivially parallel + /////////////////////////////////////////////////////////////// + RealD mass=0.01; + RealD M5=1.8; + DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5); + DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp(Ddwf); + MdagMLinearOperator HermOpCk(Dchk); + ConjugateGradient CG((1.0e-8/(me+1)),10000); + s_res = zero; + CG(HermOp,s_src,s_res); + + ///////////////////////////////////////////////////////////// + // Report how long they all took + ///////////////////////////////////////////////////////////// + std::vector iterations(nrhs,0); + iterations[me] = CG.IterationsToComplete; + + for(int n=0;nGlobalSum(iterations[n]); + std::cout << GridLogMessage<<" Rank "< +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + +int main (int argc, char ** argv) +{ + typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField; + typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField; + typename ImprovedStaggeredFermion5DR::ImplParams params; + + const int Ls=8; + + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + std::vector seeds({1,2,3,4}); + GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds); + GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds); + + FermionField src(FGrid); random(pRNG5,src); + FermionField src_o(FrbGrid); pickCheckerboard(Odd,src_o,src); + FermionField result_o(FrbGrid); result_o=zero; + RealD nrm = norm2(src); + + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu); + + RealD mass=0.003; + ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass); + SchurStaggeredOperator HermOp(Ds); + + ConjugateGradient CG(1.0e-8,10000); + int blockDim = 0; + BlockConjugateGradient BCGrQ(BlockCGrQ,blockDim,1.0e-8,10000); + BlockConjugateGradient BCG (BlockCG,blockDim,1.0e-8,10000); + BlockConjugateGradient mCG (CGmultiRHS,blockDim,1.0e-8,10000); + + std::cout << GridLogMessage << "****************************************************************** "< HermOp4d(Ds4d); + FermionField src4d(UGrid); random(pRNG,src4d); + FermionField src4d_o(UrbGrid); pickCheckerboard(Odd,src4d_o,src4d); + FermionField result4d_o(UrbGrid); + + result4d_o=zero; + CG(HermOp4d,src4d_o,result4d_o); + std::cout << GridLogMessage << "************************************************************************ "< HermOpEO(Ds); + SchurStaggeredOperator HermOpEO(Ds); ConjugateGradient CG(1.0e-8,10000); CG(HermOpEO,src_o,res_o); + FermionField tmp(&RBGrid); + + HermOpEO.Mpc(res_o,tmp); + std::cout << "check Mpc resid " << axpy_norm(tmp,-1.0,src_o,tmp)/norm2(src_o) << "\n"; + Grid_finalize(); } diff --git a/tests/solver/Test_staggered_cg_schur.cc b/tests/solver/Test_staggered_cg_schur.cc new file mode 100644 index 00000000..a5c25b85 --- /dev/null +++ b/tests/solver/Test_staggered_cg_schur.cc @@ -0,0 +1,90 @@ + /************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_wilson_cg_schur.cc + + Copyright (C) 2015 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include + +using namespace std; +using namespace Grid; +using namespace Grid::QCD; + +template +struct scal { + d internal; +}; + + Gamma::Algebra Gmu [] = { + Gamma::Algebra::GammaX, + Gamma::Algebra::GammaY, + Gamma::Algebra::GammaZ, + Gamma::Algebra::GammaT + }; + +int main (int argc, char ** argv) +{ + typedef typename ImprovedStaggeredFermionR::FermionField FermionField; + typename ImprovedStaggeredFermionR::ImplParams params; + Grid_init(&argc,&argv); + + std::vector latt_size = GridDefaultLatt(); + std::vector simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); + std::vector mpi_layout = GridDefaultMpi(); + GridCartesian Grid(latt_size,simd_layout,mpi_layout); + GridRedBlackCartesian RBGrid(&Grid); + + std::vector seeds({1,2,3,4}); + GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); + + LatticeGaugeField Umu(&Grid); SU3::HotConfiguration(pRNG,Umu); + + FermionField src(&Grid); random(pRNG,src); + FermionField result(&Grid); result=zero; + FermionField resid(&Grid); + + RealD mass=0.1; + ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass); + + ConjugateGradient CG(1.0e-8,10000); + SchurRedBlackStaggeredSolve SchurSolver(CG); + + double volume=1.0; + for(int mu=0;mu volume * 1146 + double ncall=CG.IterationsToComplete; + double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146 + + std::cout<