1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-04-27 14:15:55 +01:00

I/O benchmark code cleaning

This commit is contained in:
Antonin Portelli 2020-10-07 15:31:51 +01:00
parent e9c5a271a8
commit 5ee832f738
3 changed files with 50 additions and 44 deletions

View File

@ -14,61 +14,62 @@ std::string filestem(const int l)
int main (int argc, char ** argv) int main (int argc, char ** argv)
{ {
#ifdef HAVE_LIME
Grid_init(&argc,&argv); Grid_init(&argc,&argv);
int64_t threads = GridThread::GetThreads(); int64_t threads = GridThread::GetThreads();
auto mpi = GridDefaultMpi();
std::vector<int> latt;
MSG << "Grid is setup to use " << threads << " threads" << std::endl; MSG << "Grid is setup to use " << threads << " threads" << std::endl;
MSG << "MPI partition " << mpi << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
MSG << "Benchmark std C++ write" << std::endl; MSG << "Benchmark std write" << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2) for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{ {
auto mpi = GridDefaultMpi(); latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
MSG << "-- Local volume " << l << "^4" << std::endl; MSG << "-- Local volume " << l << "^4" << std::endl;
writeBenchmark<LatticeFermion>(latt, filestem(l), stdWrite<LatticeFermion>); writeBenchmark<LatticeFermion>(latt, filestem(l), stdWrite<LatticeFermion>);
} }
MSG << SEP << std::endl; MSG << SEP << std::endl;
MSG << "Benchmark std C++ read" << std::endl; MSG << "Benchmark std read" << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2) for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{ {
auto mpi = GridDefaultMpi(); latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
MSG << "-- Local volume " << l << "^4" << std::endl; MSG << "-- Local volume " << l << "^4" << std::endl;
readBenchmark<LatticeFermion>(latt, filestem(l), stdRead<LatticeFermion>); readBenchmark<LatticeFermion>(latt, filestem(l), stdRead<LatticeFermion>);
} }
#ifdef HAVE_LIME
MSG << SEP << std::endl; MSG << SEP << std::endl;
MSG << "Benchmark Lime write" << std::endl; MSG << "Benchmark Grid C-Lime write" << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2) for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{ {
auto mpi = GridDefaultMpi(); latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
MSG << "-- Local volume " << l << "^4" << std::endl; MSG << "-- Local volume " << l << "^4" << std::endl;
writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>); writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>);
} }
MSG << SEP << std::endl; MSG << SEP << std::endl;
MSG << "Benchmark Lime read" << std::endl; MSG << "Benchmark Grid C-Lime read" << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2) for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{ {
auto mpi = GridDefaultMpi(); latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
MSG << "-- Local volume " << l << "^4" << std::endl; MSG << "-- Local volume " << l << "^4" << std::endl;
readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>); readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>);
} }
#endif
Grid_finalize(); Grid_finalize();
#endif
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }

View File

@ -101,7 +101,7 @@ void stdWrite(const std::string filestem, Field &vec)
ioWatch.Stop(); ioWatch.Stop();
size *= vec.Grid()->ProcessorCount(); size *= vec.Grid()->ProcessorCount();
MSG << "Std I/O write: Wrote " << size << " bytes in " << ioWatch.Elapsed() MSG << "Std I/O write: Wrote " << size << " bytes in " << ioWatch.Elapsed()
<< ", performance " << size/1024./1024./(ioWatch.useconds()/1.e6) << ", " << size/1024./1024./(ioWatch.useconds()/1.e6)
<< " MB/s" << std::endl; << " MB/s" << std::endl;
MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl; MSG << "Std I/O write: checksum overhead " << crcWatch.Elapsed() << std::endl;
} }
@ -136,7 +136,7 @@ void stdRead(Field &vec, const std::string filestem)
assert(crcData == crcRead); assert(crcData == crcRead);
size *= vec.Grid()->ProcessorCount(); size *= vec.Grid()->ProcessorCount();
MSG << "Std I/O read: Read " << size << " bytes in " << ioWatch.Elapsed() MSG << "Std I/O read: Read " << size << " bytes in " << ioWatch.Elapsed()
<< ", performance " << size/1024./1024./(ioWatch.useconds()/1.e6) << ", " << size/1024./1024./(ioWatch.useconds()/1.e6)
<< " MB/s" << std::endl; << " MB/s" << std::endl;
MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl; MSG << "Std I/O read: checksum overhead " << crcWatch.Elapsed() << std::endl;
} }

View File

@ -34,46 +34,51 @@ int main (int argc, char ** argv)
} }
Grid_init(&argc,&argv); Grid_init(&argc,&argv);
int64_t threads = GridThread::GetThreads(); int64_t threads = GridThread::GetThreads();
auto mpi = GridDefaultMpi();
MSG << "Grid is setup to use " << threads << " threads" << std::endl; MSG << "Grid is setup to use " << threads << " threads" << std::endl;
MSG << SEP << std::endl; MSG << "MPI partition " << mpi << std::endl;
MSG << "Benchmark double precision Lime write" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermion>, Ls, rb);
}
MSG << SEP << std::endl; MSG << SEP << std::endl;
MSG << "Benchmark double precision Lime read" << std::endl; MSG << "Benchmark Grid C-Lime write" << std::endl;
MSG << SEP << std::endl; MSG << SEP << std::endl;
for (auto &d: dir) for (auto &d: dir)
{ {
MSG << "-- Directory " << d << std::endl; MSG << "-- Directory " << d << std::endl;
readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermion>, Ls, rb); writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench",
limeWrite<LatticeFermion>, Ls, rb);
}
MSG << SEP << std::endl;
MSG << "Benchmark Grid C-Lime read" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench",
limeRead<LatticeFermion>, Ls, rb);
} }
MSG << SEP << std::endl; // MSG << SEP << std::endl;
MSG << "Benchmark single precision Lime write" << std::endl; // MSG << "Benchmark single precision Lime write" << std::endl;
MSG << SEP << std::endl; // MSG << SEP << std::endl;
for (auto &d: dir) // for (auto &d: dir)
{ // {
MSG << "-- Directory " << d << std::endl; // MSG << "-- Directory " << d << std::endl;
writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb); // writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
} // }
MSG << SEP << std::endl; // MSG << SEP << std::endl;
MSG << "Benchmark single precision Lime read" << std::endl; // MSG << "Benchmark single precision Lime read" << std::endl;
MSG << SEP << std::endl; // MSG << SEP << std::endl;
for (auto &d: dir) // for (auto &d: dir)
{ // {
MSG << "-- Directory " << d << std::endl; // MSG << "-- Directory " << d << std::endl;
readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb); // readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
} // }
Grid_finalize(); Grid_finalize();
#endif #endif
return EXIT_SUCCESS; return EXIT_SUCCESS;
} }