From 51eae5723e2719843821d6b155f6f300538259a3 Mon Sep 17 00:00:00 2001 From: Antonin Portelli Date: Sun, 29 Jan 2023 14:56:37 +0000 Subject: [PATCH] Grid IO benchmark cleanup --- Grid/Benchmark_IO.cpp | 93 ++++++++++++++++++++----------------------- Grid/Benchmark_IO.hpp | 47 ++++++++++------------ 2 files changed, 64 insertions(+), 76 deletions(-) diff --git a/Grid/Benchmark_IO.cpp b/Grid/Benchmark_IO.cpp index 96ef3e3..e069e70 100644 --- a/Grid/Benchmark_IO.cpp +++ b/Grid/Benchmark_IO.cpp @@ -32,7 +32,7 @@ along with this program. If not, see . #ifdef HAVE_LIME using namespace Grid; -std::string filestem(const int l) { return "iobench_l" + std::to_string(l); } +std::string filestem(const int l) { return "io/iobench_l" + std::to_string(l); } int vol(const int i) { return BENCH_IO_LMIN + 2 * i; } @@ -56,13 +56,6 @@ template void stats(Mat &mean, Mat &stdDev, const std::vector avPerf(BENCH_IO_NPASS, Eigen::VectorXd::Zero(4)); std::vector latt; - MSG << "Grid is setup to use " << threads << " threads" << std::endl; - MSG << "MPI partition " << mpi << std::endl; + GRID_MSG << "Grid is setup to use " << threads << " threads" << std::endl; + GRID_MSG << "MPI partition " << mpi << std::endl; for (unsigned int i = 0; i < BENCH_IO_NPASS; ++i) { - MSG << BIGSEP << std::endl; - MSG << "Pass " << i + 1 << "/" << BENCH_IO_NPASS << std::endl; - MSG << BIGSEP << std::endl; - MSG << SEP << std::endl; - MSG << "Benchmark std write" << std::endl; - MSG << SEP << std::endl; + grid_big_sep(); + GRID_MSG << "Pass " << i + 1 << "/" << BENCH_IO_NPASS << std::endl; + grid_big_sep(); + grid_small_sep(); + GRID_MSG << "Benchmark std write" << std::endl; + grid_small_sep(); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) { latt = {l * mpi[0], l * mpi[1], l * mpi[2], l * mpi[3]}; - MSG << "-- Local volume " << l << "^4" << std::endl; + GRID_MSG << "-- Local volume " << l << "^4" << std::endl; writeBenchmark(latt, filestem(l), stdWrite); perf[i](volInd(l), sWrite) = BinaryIO::lastPerf.mbytesPerSecond; } - MSG << SEP << std::endl; - MSG << "Benchmark std read" << std::endl; - MSG << SEP << std::endl; + grid_small_sep(); + GRID_MSG << "Benchmark std read" << std::endl; + grid_small_sep(); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) { latt = {l * mpi[0], l * mpi[1], l * mpi[2], l * mpi[3]}; - MSG << "-- Local volume " << l << "^4" << std::endl; + GRID_MSG << "-- Local volume " << l << "^4" << std::endl; readBenchmark(latt, filestem(l), stdRead); perf[i](volInd(l), sRead) = BinaryIO::lastPerf.mbytesPerSecond; } #ifdef HAVE_LIME - MSG << SEP << std::endl; - MSG << "Benchmark Grid C-Lime write" << std::endl; - MSG << SEP << std::endl; + grid_small_sep(); + GRID_MSG << "Benchmark Grid C-Lime write" << std::endl; + grid_small_sep(); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) { latt = {l * mpi[0], l * mpi[1], l * mpi[2], l * mpi[3]}; - MSG << "-- Local volume " << l << "^4" << std::endl; + GRID_MSG << "-- Local volume " << l << "^4" << std::endl; writeBenchmark(latt, filestem(l), limeWrite); perf[i](volInd(l), gWrite) = BinaryIO::lastPerf.mbytesPerSecond; } - MSG << SEP << std::endl; - MSG << "Benchmark Grid C-Lime read" << std::endl; - MSG << SEP << std::endl; + grid_small_sep(); + GRID_MSG << "Benchmark Grid C-Lime read" << std::endl; + grid_small_sep(); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) { latt = {l * mpi[0], l * mpi[1], l * mpi[2], l * mpi[3]}; - MSG << "-- Local volume " << l << "^4" << std::endl; + GRID_MSG << "-- Local volume " << l << "^4" << std::endl; readBenchmark(latt, filestem(l), limeRead); perf[i](volInd(l), gRead) = BinaryIO::lastPerf.mbytesPerSecond; } @@ -159,13 +152,13 @@ int main(int argc, char **argv) avRob.fill(100.); avRob -= 100. * avStdDev.cwiseQuotient(avMean.cwiseAbs()); - MSG << BIGSEP << std::endl; - MSG << "SUMMARY" << std::endl; - MSG << BIGSEP << std::endl; - MSG << "Summary of individual results (all results in MB/s)." << std::endl; - MSG << "Every second colum gives the standard deviation of the previous column." - << std::endl; - MSG << std::endl; + grid_big_sep(); + GRID_MSG << "SUMMARY" << std::endl; + grid_big_sep(); + GRID_MSG << "Summary of individual results (all results in MB/s)." << std::endl; + GRID_MSG << "Every second colum gives the standard deviation of the previous column." + << std::endl; + GRID_MSG << std::endl; grid_printf("%4s %12s %12s %12s %12s %12s %12s %12s %12s\n", "L", "std read", "std dev", "std write", "std dev", "Grid read", "std dev", "Grid write", "std dev"); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) @@ -176,10 +169,10 @@ int main(int argc, char **argv) stdDev(volInd(l), gRead), mean(volInd(l), gWrite), stdDev(volInd(l), gWrite)); } - MSG << std::endl; - MSG << "Robustness of individual results, in %. (rob = 100% - std dev / mean)" - << std::endl; - MSG << std::endl; + GRID_MSG << std::endl; + GRID_MSG << "Robustness of individual results, in %. (rob = 100% - std dev / mean)" + << std::endl; + GRID_MSG << std::endl; grid_printf("%4s %12s %12s %12s %12s\n", "L", "std read", "std write", "Grid read", "Grid write"); for (int l = BENCH_IO_LMIN; l <= BENCH_IO_LMAX; l += 2) @@ -187,21 +180,21 @@ int main(int argc, char **argv) grid_printf("%4d %12.1f %12.1f %12.1f %12.1f\n", l, rob(volInd(l), sRead), rob(volInd(l), sWrite), rob(volInd(l), gRead), rob(volInd(l), gWrite)); } - MSG << std::endl; - MSG << "Summary of results averaged over local volumes 24^4-" << BENCH_IO_LMAX - << "^4 (all results in MB/s)." << std::endl; - MSG << "Every second colum gives the standard deviation of the previous column." - << std::endl; - MSG << std::endl; + GRID_MSG << std::endl; + GRID_MSG << "Summary of results averaged over local volumes 24^4-" << BENCH_IO_LMAX + << "^4 (all results in MB/s)." << std::endl; + GRID_MSG << "Every second colum gives the standard deviation of the previous column." + << std::endl; + GRID_MSG << std::endl; grid_printf("%12s %12s %12s %12s %12s %12s %12s %12s\n", "std read", "std dev", "std write", "std dev", "Grid read", "std dev", "Grid write", "std dev"); grid_printf("%12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f %12.1f\n", avMean(sRead), avStdDev(sRead), avMean(sWrite), avStdDev(sWrite), avMean(gRead), avStdDev(gRead), avMean(gWrite), avStdDev(gWrite)); - MSG << std::endl; - MSG << "Robustness of volume-averaged results, in %. (rob = 100% - std dev / mean)" - << std::endl; - MSG << std::endl; + GRID_MSG << std::endl; + GRID_MSG << "Robustness of volume-averaged results, in %. (rob = 100% - std dev / mean)" + << std::endl; + GRID_MSG << std::endl; grid_printf("%12s %12s %12s %12s\n", "std read", "std write", "Grid read", "Grid write"); grid_printf("%12.1f %12.1f %12.1f %12.1f\n", avRob(sRead), avRob(sWrite), avRob(gRead), diff --git a/Grid/Benchmark_IO.hpp b/Grid/Benchmark_IO.hpp index d71e943..34f3d11 100644 --- a/Grid/Benchmark_IO.hpp +++ b/Grid/Benchmark_IO.hpp @@ -18,12 +18,8 @@ along with this program. If not, see . #ifndef Benchmark_IO_hpp_ #define Benchmark_IO_hpp_ +#include "Common.hpp" #include -#define MSG std::cout << GridLogMessage -#define SEP \ - "-----------------------------------------------------------------------------" -#define BIGSEP \ - "=============================================================================" #ifdef HAVE_LIME namespace Grid @@ -50,9 +46,9 @@ namespace Grid // crc = GridChecksum::crc32(vec_v.cpu_ptr, size); // std::fwrite(&crc, sizeof(uint32_t), 1, file); // crcWatch.Stop(); - // MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl; - // ioWatch.Start(); - // std::fwrite(vec_v.cpu_ptr, sizeof(typename Field::scalar_object), + // GRID_MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << + // std::endl; ioWatch.Start(); std::fwrite(vec_v.cpu_ptr, sizeof(typename + // Field::scalar_object), // vec.Grid()->lSites(), file); // ioWatch.Stop(); // std::fclose(file); @@ -61,11 +57,11 @@ namespace Grid // p.size = size; // p.time = ioWatch.useconds(); // p.mbytesPerSecond = size / 1024. / 1024. / (ioWatch.useconds() / 1.e6); - // MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() + // GRID_MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() // << ", // " // << p.mbytesPerSecond << " MB/s" << std::endl; - // MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl; + // GRID_MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl; // } // template void stdRead(Field &vec, const std::string filestem) @@ -94,16 +90,14 @@ namespace Grid // crcData = GridChecksum::crc32(vec_v.cpu_ptr, size); // crcWatch.Stop(); // } - // MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl; - // assert(crcData == crcRead); - // size *= vec.Grid()->ProcessorCount(); - // auto &p = BinaryIO::lastPerf; - // p.size = size; - // p.time = ioWatch.useconds(); + // GRID_MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << + // std::endl; assert(crcData == crcRead); size *= vec.Grid()->ProcessorCount(); auto + // &p = BinaryIO::lastPerf; p.size = size; p.time = ioWatch.useconds(); // p.mbytesPerSecond = size / 1024. / 1024. / (ioWatch.useconds() / 1.e6); - // MSG << "Std I/O read: Read " << p.size << " bytes in " << ioWatch.Elapsed() << ", " + // GRID_MSG << "Std I/O read: Read " << p.size << " bytes in " << ioWatch.Elapsed() << + // ", " // << p.mbytesPerSecond << " MB/s" << std::endl; - // MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl; + // GRID_MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl; // } template void stdWrite(const std::string filestem, Field &vec) @@ -122,7 +116,7 @@ namespace Grid crc = GridChecksum::crc32(vec_v.cpu_ptr, size); file.write(reinterpret_cast(&crc), sizeof(uint32_t) / sizeof(char)); crcWatch.Stop(); - MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl; + GRID_MSG << "Std I/O write: Data CRC32 " << std::hex << crc << std::dec << std::endl; ioWatch.Start(); file.write(reinterpret_cast(vec_v.cpu_ptr), sizec); file.flush(); @@ -132,9 +126,9 @@ namespace Grid p.size = size; p.time = ioWatch.useconds(); p.mbytesPerSecond = size / 1024. / 1024. / (ioWatch.useconds() / 1.e6); - MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() << ", " - << p.mbytesPerSecond << " MB/s" << std::endl; - MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl; + GRID_MSG << "Std I/O write: Wrote " << p.size << " bytes in " << ioWatch.Elapsed() + << ", " << p.mbytesPerSecond << " MB/s" << std::endl; + GRID_MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl; } template void stdRead(Field &vec, const std::string filestem) @@ -163,16 +157,17 @@ namespace Grid crcData = GridChecksum::crc32(vec_v.cpu_ptr, size); crcWatch.Stop(); } - MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec << std::endl; + GRID_MSG << "Std I/O read: Data CRC32 " << std::hex << crcData << std::dec + << std::endl; assert(crcData == crcRead); size *= vec.Grid()->ProcessorCount(); auto &p = BinaryIO::lastPerf; p.size = size; p.time = ioWatch.useconds(); p.mbytesPerSecond = size / 1024. / 1024. / (ioWatch.useconds() / 1.e6); - MSG << "Std I/O read: Read " << p.size << " bytes in " << ioWatch.Elapsed() << ", " - << p.mbytesPerSecond << " MB/s" << std::endl; - MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl; + GRID_MSG << "Std I/O read: Read " << p.size << " bytes in " << ioWatch.Elapsed() + << ", " << p.mbytesPerSecond << " MB/s" << std::endl; + GRID_MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl; } template void limeWrite(const std::string filestem, Field &vec)