From 51322da6f86c671b18141019e7cbd394bf3cc312 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Wed, 7 Dec 2016 09:00:45 +0900 Subject: [PATCH] Hadrons: genetic scheduler improvement --- extras/Hadrons/GeneticScheduler.hpp | 152 ++++++++++++++++++---------- lib/Threads.h | 2 + 2 files changed, 102 insertions(+), 52 deletions(-) diff --git a/extras/Hadrons/GeneticScheduler.hpp b/extras/Hadrons/GeneticScheduler.hpp index 3d75f2a4..0d0692cc 100644 --- a/extras/Hadrons/GeneticScheduler.hpp +++ b/extras/Hadrons/GeneticScheduler.hpp @@ -61,10 +61,12 @@ public: friend std::ostream & operator<<(std::ostream &out, const GeneticScheduler &s) { + out << "["; for (auto &p: s.population_) { - out << p.second << ": " << p.first << std::endl; + out << p.first << ", "; } + out << "\b\b]"; return out; } @@ -72,10 +74,10 @@ private: // randomly initialize population void initPopulation(void); // genetic operators - const std::vector & selection(void); - void crossover(const std::vector &c1, - const std::vector &c2); - void mutation(std::vector &c); + std::vector * select1(void); + std::pair *, std::vector *> select2(void); + void crossover(void); + void mutation(void); private: Graph &graph_; const ObjFunc &func_; @@ -115,38 +117,35 @@ int GeneticScheduler::getMinValue(void) template void GeneticScheduler::nextGeneration(void) { - std::uniform_real_distribution dis(0., 1.); - // random initialization of the population if necessary if (population_.size() != par_.popSize) { initPopulation(); } - - // mating - for (unsigned int i = 0; i < par_.popSize/2; ++i) - { - auto &p1 = selection(), &p2 = selection(); - crossover(p1, p2); - } + LOG(Debug) << "Starting population:\n" << *this << std::endl; // random mutations - auto buf = population_; - population_.clear(); - for (auto &c: buf) + PARALLEL_FOR_LOOP + for (unsigned int i = 0; i < par_.popSize; ++i) { - if (dis(gen_) < par_.mutationRate) - { - mutation(c.second); - } - population_.emplace(func_(c.second), c.second); + mutation(); } + LOG(Debug) << "After mutations:\n" << *this << std::endl; + + // mating + PARALLEL_FOR_LOOP + for (unsigned int i = 0; i < par_.popSize/2; ++i) + { + crossover(); + } + LOG(Debug) << "After mating:\n" << *this << std::endl; // grim reaper auto it = population_.begin(); std::advance(it, par_.popSize); population_.erase(it, population_.end()); + LOG(Debug) << "After grim reaper:\n" << *this << std::endl; } // randomly initialize population ////////////////////////////////////////////// @@ -164,37 +163,66 @@ void GeneticScheduler::initPopulation(void) // genetic operators /////////////////////////////////////////////////////////// template -const std::vector & GeneticScheduler::selection(void) +std::vector * GeneticScheduler::select1(void) +{ + std::uniform_int_distribution pdis(0, population_.size() - 1); + + auto it = population_.begin(); + std::advance(it, pdis(gen_)); + + return &(it->second); +} + +template +std::pair *, std::vector *> GeneticScheduler::select2(void) { std::vector prob; + unsigned int ind; + std::vector *p1, *p2; for (auto &c: population_) { prob.push_back(1./c.first); } - std::discrete_distribution dis(prob.begin(), prob.end()); - auto rIt = population_.begin(); - std::advance(rIt, dis(gen_)); + do + { + double probCpy; + + std::discrete_distribution dis1(prob.begin(), prob.end()); + auto rIt = population_.begin(); + ind = dis1(gen_); + std::advance(rIt, ind); + p1 = &(rIt->second); + probCpy = prob[ind]; + prob[ind] = 0.; + std::discrete_distribution dis2(prob.begin(), prob.end()); + rIt = population_.begin(); + std::advance(rIt, dis2(gen_)); + p2 = &(rIt->second); + prob[ind] = probCpy; + } while (p1 == p2); - return rIt->second; + return std::make_pair(p1, p2); } template -void GeneticScheduler::crossover(const std::vector &p1, - const std::vector &p2) +void GeneticScheduler::crossover(void) { - std::uniform_int_distribution dis(0, p1.size() - 1); - unsigned int cut = dis(gen_); + auto p = select2(); + auto &p1 = *(p.first), + &p2 = *(p.second); + std::uniform_int_distribution dis2(0, p1.size() - 1); + unsigned int cut = dis2(gen_); std::vector c1, c2, buf; auto cross = [&buf, cut](std::vector &c, const std::vector &p1, const std::vector &p2) { - buf = p2; + buf = p1; for (unsigned int i = 0; i < cut; ++i) { - c.push_back(p1[i]); - buf.erase(std::find(buf.begin(), buf.end(), p1[i])); + c.push_back(p2[i]); + buf.erase(std::find(buf.begin(), buf.end(), p2[i])); } for (unsigned int i = 0; i < buf.size(); ++i) { @@ -204,31 +232,51 @@ void GeneticScheduler::crossover(const std::vector &p1, cross(c1, p1, p2); cross(c2, p2, p1); - population_.emplace(func_(c1), c1); - population_.emplace(func_(c2), c2); + PARALLEL_CRITICAL + { + population_.emplace(func_(c1), c1); + population_.emplace(func_(c2), c2); + } } template -void GeneticScheduler::mutation(std::vector &c) +void GeneticScheduler::mutation(void) { - std::uniform_int_distribution dis(0, c.size() - 1); - unsigned int cut = dis(gen_); - Graph g = graph_; - std::vector buf; + std::uniform_real_distribution mdis(0., 1.); - for (unsigned int i = cut; i < c.size(); ++i) + if (mdis(gen_) < par_.mutationRate) { - g.removeVertex(c[i]); + auto &c = *select1(); + std::uniform_int_distribution cdis(0, c.size() - 1); + unsigned int cut = cdis(gen_); + std::vector buf1, buf2; + Graph g1 = graph_, g2 = graph_; + + for (unsigned int i = 0; i < cut; ++i) + { + g1.removeVertex(c[i]); + } + for (unsigned int i = cut; i < c.size(); ++i) + { + g2.removeVertex(c[i]); + } + if (g1.size() > 0) + { + buf1 = g1.topoSort(gen_); + } + if (g2.size() > 0) + { + buf2 = g2.topoSort(gen_); + } + for (unsigned int i = cut; i < c.size(); ++i) + { + buf2.push_back(buf1[i - cut]); + } + PARALLEL_CRITICAL + { + population_.emplace(func_(buf2), buf2); + } } - if (g.size() > 0) - { - buf = g.topoSort(gen_); - } - for (unsigned int i = cut; i < c.size(); ++i) - { - buf.push_back(c[i]); - } - c = buf; } END_HADRONS_NAMESPACE diff --git a/lib/Threads.h b/lib/Threads.h index 2f270b73..2f072633 100644 --- a/lib/Threads.h +++ b/lib/Threads.h @@ -46,11 +46,13 @@ Author: paboyle #endif #define PARALLEL_NESTED_LOOP2 _Pragma("omp parallel for collapse(2)") #define PARALLEL_REGION _Pragma("omp parallel") +#define PARALLEL_CRITICAL _Pragma("omp critical") #else #define PARALLEL_FOR_LOOP #define PARALLEL_FOR_LOOP_INTERN #define PARALLEL_NESTED_LOOP2 #define PARALLEL_REGION +#define PARALLEL_CRITICAL #endif namespace Grid {