forked from portelli/lattice-benchmarks
Compare commits
33 Commits
a1ad41bb06
...
benchmark-
Author | SHA1 | Date | |
---|---|---|---|
fb43d16830 | |||
6fa2e6bcd0 | |||
fb4c456776 | |||
3fbb8ea346 | |||
86b160cb5c | |||
dc411017bb | |||
b2cc780690 | |||
6d87396576 | |||
e9d084ce09 | |||
32e301fc67 | |||
eaa4feee43 | |||
025f9dab50 | |||
3a561091d9 | |||
191c0cfca5 | |||
6f9af8acad | |||
371a329457 | |||
f81cb198ab | |||
a7e1d9e67f | |||
19c9dcb6ae | |||
7d89380b80 | |||
4cd67805b9 | |||
f7e607eae4 | |||
a267986800 | |||
a1ec08cdb3 | |||
fb6c79d9ca | |||
d7647afa72 | |||
ba00493c7d | |||
6055e0503c | |||
6ea093fc80 | |||
fa47ec5bbe | |||
7235bfde4c | |||
e5c61c2db1 | |||
80c80049d7 |
@ -1,7 +1,7 @@
|
||||
/*
|
||||
Copyright © 2015 Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Copyright © 2022 Antonin Portelli <antonin.portelli@me.com>
|
||||
Copyright © 2022 Simon Buerger <simon.buerger@rwth-aachen.de>
|
||||
Copyright © 2024 Simon Buerger <simon.buerger@rwth-aachen.de>
|
||||
|
||||
This is a fork of Benchmark_ITT.cpp from Grid
|
||||
|
||||
@ -29,6 +29,43 @@ int NN_global;
|
||||
|
||||
nlohmann::json json_results;
|
||||
|
||||
// NOTE: Grid::GridClock is just a typedef to
|
||||
// `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to
|
||||
// microseconds (no idea why, probably wasnt ever relevant before), so we need
|
||||
// our own wrapper here.
|
||||
double usecond_precise()
|
||||
{
|
||||
using namespace std::chrono;
|
||||
auto nsecs = duration_cast<nanoseconds>(GridClock::now() - Grid::theProgramStart);
|
||||
return nsecs.count() * 1e-3;
|
||||
}
|
||||
|
||||
std::vector<std::string> get_mpi_hostnames()
|
||||
{
|
||||
int world_size;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
|
||||
|
||||
char hostname[MPI_MAX_PROCESSOR_NAME];
|
||||
int name_len = 0;
|
||||
MPI_Get_processor_name(hostname, &name_len);
|
||||
|
||||
// Allocate buffer to gather all hostnames
|
||||
std::vector<char> all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME);
|
||||
|
||||
// Use MPI_Allgather to gather all hostnames on all ranks
|
||||
MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, all_hostnames.data(),
|
||||
MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_COMM_WORLD);
|
||||
|
||||
// Convert the gathered hostnames back into a vector of std::string
|
||||
std::vector<std::string> hostname_list(world_size);
|
||||
for (int i = 0; i < world_size; ++i)
|
||||
{
|
||||
hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]);
|
||||
}
|
||||
|
||||
return hostname_list;
|
||||
}
|
||||
|
||||
struct time_statistics
|
||||
{
|
||||
double mean;
|
||||
@ -73,6 +110,8 @@ class Benchmark
|
||||
{local[0] * mpi[0], local[1] * mpi[1], local[2] * mpi[2], local[3] * mpi[3]});
|
||||
GridCartesian *TmpGrid = SpaceTimeGrid::makeFourDimGrid(
|
||||
latt4, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
|
||||
Grid::Coordinate shm(4, 1);
|
||||
GlobalSharedMemory::GetShmDims(mpi, shm);
|
||||
|
||||
uint64_t NP = TmpGrid->RankCount();
|
||||
uint64_t NN = TmpGrid->NodeCount();
|
||||
@ -85,7 +124,9 @@ class Benchmark
|
||||
std::cout << GridLogMessage << "* OpenMP threads : " << GridThread::GetThreads()
|
||||
<< std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "* MPI tasks : " << GridCmdVectorIntToString(mpi)
|
||||
std::cout << GridLogMessage << "* MPI layout : " << GridCmdVectorIntToString(mpi)
|
||||
<< std::endl;
|
||||
std::cout << GridLogMessage << "* Shm layout : " << GridCmdVectorIntToString(shm)
|
||||
<< std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "* vReal : " << sizeof(vReal) * 8 << "bits ; "
|
||||
@ -118,6 +159,7 @@ class Benchmark
|
||||
for (unsigned int i = 0; i < mpi.size(); ++i)
|
||||
{
|
||||
tmp["mpi"].push_back(mpi[i]);
|
||||
tmp["shm"].push_back(shm[i]);
|
||||
}
|
||||
tmp["ranks"] = NP;
|
||||
tmp["nodes"] = NN;
|
||||
@ -132,6 +174,8 @@ class Benchmark
|
||||
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
Coordinate shm_layout(Nd, 1);
|
||||
GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout);
|
||||
|
||||
for (int mu = 0; mu < Nd; mu++)
|
||||
if (mpi_layout[mu] > 1)
|
||||
@ -143,8 +187,8 @@ class Benchmark
|
||||
std::cout << GridLogMessage << "Benchmarking threaded STENCIL halo exchange in "
|
||||
<< nmu << " dimensions" << std::endl;
|
||||
grid_small_sep();
|
||||
grid_printf("%5s %5s %15s %15s %15s %15s %15s\n", "L", "dir", "payload (B)",
|
||||
"time (usec)", "rate (GB/s/node)", "std dev", "max");
|
||||
grid_printf("%5s %5s %7s %15s %15s %15s %15s %15s\n", "L", "dir", "shm",
|
||||
"payload (B)", "time (usec)", "rate (GB/s/node)", "std dev", "max");
|
||||
|
||||
for (int lat = 16; lat <= maxlat; lat += 8)
|
||||
{
|
||||
@ -173,74 +217,80 @@ class Benchmark
|
||||
for (int dir = 0; dir < 8; dir++)
|
||||
{
|
||||
int mu = dir % 4;
|
||||
if (mpi_layout[mu] > 1)
|
||||
if (mpi_layout[mu] == 1) // skip directions that are not distributed
|
||||
continue;
|
||||
bool is_shm = mpi_layout[mu] == shm_layout[mu];
|
||||
bool is_partial_shm = !is_shm && shm_layout[mu] != 1;
|
||||
|
||||
std::vector<double> times(Nloop);
|
||||
for (int i = 0; i < NWARMUP; i++)
|
||||
{
|
||||
int xmit_to_rank;
|
||||
int recv_from_rank;
|
||||
|
||||
if (dir == mu)
|
||||
{
|
||||
int comm_proc = 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
else
|
||||
{
|
||||
int comm_proc = mpi_layout[mu] - 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0],
|
||||
recv_from_rank, bytes);
|
||||
}
|
||||
for (int i = 0; i < Nloop; i++)
|
||||
{
|
||||
|
||||
std::vector<double> times(Nloop);
|
||||
for (int i = 0; i < NWARMUP; i++)
|
||||
dbytes = 0;
|
||||
double start = usecond();
|
||||
int xmit_to_rank;
|
||||
int recv_from_rank;
|
||||
|
||||
if (dir == mu)
|
||||
{
|
||||
int xmit_to_rank;
|
||||
int recv_from_rank;
|
||||
|
||||
if (dir == mu)
|
||||
{
|
||||
int comm_proc = 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
else
|
||||
{
|
||||
int comm_proc = mpi_layout[mu] - 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||
(void *)&rbuf[dir][0], recv_from_rank, bytes);
|
||||
int comm_proc = 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
for (int i = 0; i < Nloop; i++)
|
||||
else
|
||||
{
|
||||
|
||||
dbytes = 0;
|
||||
double start = usecond();
|
||||
int xmit_to_rank;
|
||||
int recv_from_rank;
|
||||
|
||||
if (dir == mu)
|
||||
{
|
||||
int comm_proc = 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
else
|
||||
{
|
||||
int comm_proc = mpi_layout[mu] - 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||
(void *)&rbuf[dir][0], recv_from_rank, bytes);
|
||||
dbytes += bytes;
|
||||
|
||||
double stop = usecond();
|
||||
t_time[i] = stop - start; // microseconds
|
||||
int comm_proc = mpi_layout[mu] - 1;
|
||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
||||
}
|
||||
timestat.statistics(t_time);
|
||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0],
|
||||
recv_from_rank, bytes);
|
||||
dbytes += bytes;
|
||||
|
||||
dbytes = dbytes * ppn;
|
||||
double bidibytes = 2. * dbytes;
|
||||
double rate = bidibytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.;
|
||||
double rate_err = rate * timestat.err / timestat.mean;
|
||||
double rate_max = rate * timestat.mean / timestat.min;
|
||||
grid_printf("%5d %5d %15d %15.2f %15.2f %15.1f %15.2f\n", lat, dir, bytes,
|
||||
timestat.mean, rate, rate_err, rate_max);
|
||||
nlohmann::json tmp;
|
||||
nlohmann::json tmp_rate;
|
||||
tmp["L"] = lat;
|
||||
tmp["dir"] = dir;
|
||||
tmp["bytes"] = bytes;
|
||||
tmp["time_usec"] = timestat.mean;
|
||||
tmp_rate["mean"] = rate;
|
||||
tmp_rate["error"] = rate_err;
|
||||
tmp_rate["max"] = rate_max;
|
||||
tmp["rate_GBps"] = tmp_rate;
|
||||
json_results["comms"].push_back(tmp);
|
||||
double stop = usecond();
|
||||
t_time[i] = stop - start; // microseconds
|
||||
}
|
||||
timestat.statistics(t_time);
|
||||
|
||||
dbytes = dbytes * ppn;
|
||||
double bidibytes = 2. * dbytes;
|
||||
double rate = bidibytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.;
|
||||
double rate_err = rate * timestat.err / timestat.mean;
|
||||
double rate_max = rate * timestat.mean / timestat.min;
|
||||
grid_printf("%5d %5d %7s %15d %15.2f %15.2f %15.1f %15.2f\n", lat, dir,
|
||||
is_shm ? "yes"
|
||||
: is_partial_shm ? "partial"
|
||||
: "no",
|
||||
bytes, timestat.mean, rate, rate_err, rate_max);
|
||||
nlohmann::json tmp;
|
||||
nlohmann::json tmp_rate;
|
||||
tmp["L"] = lat;
|
||||
tmp["dir"] = dir;
|
||||
tmp["shared_mem"] = is_shm;
|
||||
tmp["partial_shared_mem"] = is_partial_shm;
|
||||
tmp["bytes"] = bytes;
|
||||
tmp["time_usec"] = timestat.mean;
|
||||
tmp_rate["mean"] = rate;
|
||||
tmp_rate["error"] = rate_err;
|
||||
tmp_rate["max"] = rate_max;
|
||||
tmp["rate_GBps"] = tmp_rate;
|
||||
json_results["comms"].push_back(tmp);
|
||||
}
|
||||
for (int d = 0; d < 8; d++)
|
||||
{
|
||||
@ -251,6 +301,170 @@ class Benchmark
|
||||
return;
|
||||
}
|
||||
|
||||
static void Latency(void)
|
||||
{
|
||||
int Nwarmup = 100;
|
||||
int Nloop = 300;
|
||||
|
||||
std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl;
|
||||
grid_small_sep();
|
||||
grid_printf("from to mean(usec) err max\n");
|
||||
|
||||
int ranks;
|
||||
int me;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||
|
||||
int bytes = 8;
|
||||
void *buf_from = acceleratorAllocDevice(bytes);
|
||||
void *buf_to = acceleratorAllocDevice(bytes);
|
||||
nlohmann::json json_latency;
|
||||
for (int from = 0; from < ranks; ++from)
|
||||
for (int to = 0; to < ranks; ++to)
|
||||
{
|
||||
if (from == to)
|
||||
continue;
|
||||
|
||||
std::vector<double> t_time(Nloop);
|
||||
time_statistics timestat;
|
||||
MPI_Status status;
|
||||
|
||||
for (int i = -Nwarmup; i < Nloop; ++i)
|
||||
{
|
||||
double start = usecond_precise();
|
||||
if (from == me)
|
||||
{
|
||||
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
|
||||
assert(err == MPI_SUCCESS);
|
||||
}
|
||||
if (to == me)
|
||||
{
|
||||
auto err =
|
||||
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
|
||||
assert(err == MPI_SUCCESS);
|
||||
}
|
||||
double stop = usecond_precise();
|
||||
if (i >= 0)
|
||||
t_time[i] = stop - start;
|
||||
}
|
||||
// important: only 'from' and 'to' have meaningful timings. we use
|
||||
// 'from's.
|
||||
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
|
||||
|
||||
timestat.statistics(t_time);
|
||||
grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean,
|
||||
timestat.err, timestat.max);
|
||||
nlohmann::json tmp;
|
||||
tmp["from"] = from;
|
||||
tmp["to"] = to;
|
||||
tmp["time_usec"] = timestat.mean;
|
||||
tmp["time_usec_error"] = timestat.err;
|
||||
tmp["time_usec_min"] = timestat.min;
|
||||
tmp["time_usec_max"] = timestat.max;
|
||||
tmp["time_usec_full"] = t_time;
|
||||
json_latency.push_back(tmp);
|
||||
}
|
||||
json_results["latency"] = json_latency;
|
||||
|
||||
acceleratorFreeDevice(buf_from);
|
||||
acceleratorFreeDevice(buf_to);
|
||||
}
|
||||
|
||||
static void P2P(void)
|
||||
{
|
||||
// IMPORTANT: The P2P benchmark uses "MPI_COMM_WORLD" communicator, which is
|
||||
// not the quite the same as Grid.communicator. Practically speaking, the
|
||||
// latter one contains the same MPI-ranks but in a different order. Grid
|
||||
// does this make sure it can exploit ranks with shared memory (i.e.
|
||||
// multiple ranks on the same node) as best as possible.
|
||||
|
||||
// buffer-size to benchmark. This number is the same as the largest one used
|
||||
// in the "Comms()" benchmark. ( L=48, Ls=12, double-prec-complex,
|
||||
// half-color-spin-vector. ). Mostly an arbitrary choice, but nice to match
|
||||
// it here
|
||||
size_t bytes = 127401984;
|
||||
|
||||
int Nwarmup = 20;
|
||||
int Nloop = 100;
|
||||
|
||||
std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl;
|
||||
grid_small_sep();
|
||||
grid_printf("from to mean(usec) err min "
|
||||
"bytes rate (GiB/s)\n");
|
||||
|
||||
int ranks;
|
||||
int me;
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||
|
||||
void *buf_from = acceleratorAllocDevice(bytes);
|
||||
void *buf_to = acceleratorAllocDevice(bytes);
|
||||
nlohmann::json json_p2p;
|
||||
for (int from = 0; from < ranks; ++from)
|
||||
for (int to = 0; to < ranks; ++to)
|
||||
{
|
||||
if (from == to)
|
||||
continue;
|
||||
|
||||
std::vector<double> t_time(Nloop);
|
||||
time_statistics timestat;
|
||||
MPI_Status status;
|
||||
|
||||
for (int i = -Nwarmup; i < Nloop; ++i)
|
||||
{
|
||||
double start = usecond_precise();
|
||||
if (from == me)
|
||||
{
|
||||
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
|
||||
assert(err == MPI_SUCCESS);
|
||||
}
|
||||
if (to == me)
|
||||
{
|
||||
auto err =
|
||||
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
|
||||
assert(err == MPI_SUCCESS);
|
||||
}
|
||||
double stop = usecond_precise();
|
||||
if (i >= 0)
|
||||
t_time[i] = stop - start;
|
||||
}
|
||||
// important: only 'from' and 'to' have meaningful timings. we use
|
||||
// 'from's.
|
||||
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
|
||||
|
||||
timestat.statistics(t_time);
|
||||
double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.;
|
||||
double rate_err = rate * timestat.err / timestat.mean;
|
||||
double rate_max = rate * timestat.mean / timestat.min;
|
||||
double rate_min = rate * timestat.mean / timestat.max;
|
||||
|
||||
grid_printf("%2d %2d %15.4f %15.3f %15.4f %15d %15.2f\n", from, to, timestat.mean,
|
||||
timestat.err, timestat.min, bytes, rate);
|
||||
|
||||
nlohmann::json tmp;
|
||||
tmp["from"] = from;
|
||||
tmp["to"] = to;
|
||||
tmp["bytes"] = bytes;
|
||||
tmp["time_usec"] = timestat.mean;
|
||||
tmp["time_usec_error"] = timestat.err;
|
||||
tmp["time_usec_min"] = timestat.min;
|
||||
tmp["time_usec_max"] = timestat.max;
|
||||
tmp["time_usec_full"] = t_time;
|
||||
nlohmann::json tmp_rate;
|
||||
tmp_rate["mean"] = rate;
|
||||
tmp_rate["error"] = rate_err;
|
||||
tmp_rate["max"] = rate_max;
|
||||
tmp_rate["min"] = rate_min;
|
||||
tmp["rate_GBps"] = tmp_rate;
|
||||
|
||||
json_p2p.push_back(tmp);
|
||||
}
|
||||
json_results["p2p"] = json_p2p;
|
||||
|
||||
acceleratorFreeDevice(buf_from);
|
||||
acceleratorFreeDevice(buf_to);
|
||||
}
|
||||
|
||||
static void Memory(void)
|
||||
{
|
||||
const int Nvec = 8;
|
||||
@ -512,8 +726,6 @@ class Benchmark
|
||||
|
||||
FGrid->Broadcast(0, &ncall, sizeof(ncall));
|
||||
|
||||
Dw.ZeroCounters();
|
||||
|
||||
time_statistics timestat;
|
||||
std::vector<double> t_time(ncall);
|
||||
for (uint64_t i = 0; i < ncall; i++)
|
||||
@ -708,7 +920,6 @@ class Benchmark
|
||||
uint64_t ncall = 500;
|
||||
|
||||
FGrid->Broadcast(0, &ncall, sizeof(ncall));
|
||||
Ds.ZeroCounters();
|
||||
|
||||
time_statistics timestat;
|
||||
std::vector<double> t_time(ncall);
|
||||
@ -776,11 +987,47 @@ int main(int argc, char **argv)
|
||||
{
|
||||
Grid_init(&argc, &argv);
|
||||
|
||||
int Ls = 1;
|
||||
bool do_su4 = true;
|
||||
bool do_memory = true;
|
||||
bool do_comms = true;
|
||||
bool do_flops = true;
|
||||
|
||||
// NOTE: these two take O((number of ranks)^2) time, which might be a lot, so they are
|
||||
// off by default
|
||||
bool do_latency = false;
|
||||
bool do_p2p = false;
|
||||
|
||||
std::string json_filename = ""; // empty indicates no json output
|
||||
for (int i = 0; i < argc; i++)
|
||||
{
|
||||
if (std::string(argv[i]) == "--json-out")
|
||||
auto arg = std::string(argv[i]);
|
||||
if (arg == "--json-out")
|
||||
json_filename = argv[i + 1];
|
||||
if (arg == "--benchmark-su4")
|
||||
do_su4 = true;
|
||||
if (arg == "--benchmark-memory")
|
||||
do_memory = true;
|
||||
if (arg == "--benchmark-comms")
|
||||
do_comms = true;
|
||||
if (arg == "--benchmark-flops")
|
||||
do_flops = true;
|
||||
if (arg == "--benchmark-latency")
|
||||
do_latency = true;
|
||||
if (arg == "--benchmark-p2p")
|
||||
do_p2p = true;
|
||||
if (arg == "--no-benchmark-su4")
|
||||
do_su4 = false;
|
||||
if (arg == "--no-benchmark-memory")
|
||||
do_memory = false;
|
||||
if (arg == "--no-benchmark-comms")
|
||||
do_comms = false;
|
||||
if (arg == "--no-benchmark-flops")
|
||||
do_flops = false;
|
||||
if (arg == "--no-benchmark-latency")
|
||||
do_latency = false;
|
||||
if (arg == "--no-benchmark-p2p")
|
||||
do_p2p = false;
|
||||
}
|
||||
|
||||
CartesianCommunicator::SetCommunicatorPolicy(
|
||||
@ -792,12 +1039,6 @@ int main(int argc, char **argv)
|
||||
#endif
|
||||
Benchmark::Decomposition();
|
||||
|
||||
int do_su4 = 1;
|
||||
int do_memory = 1;
|
||||
int do_comms = 1;
|
||||
int do_flops = 1;
|
||||
int Ls = 1;
|
||||
|
||||
int sel = 4;
|
||||
std::vector<int> L_list({8, 12, 16, 24, 32});
|
||||
int selm1 = sel - 1;
|
||||
@ -830,6 +1071,22 @@ int main(int argc, char **argv)
|
||||
Benchmark::Comms();
|
||||
}
|
||||
|
||||
if (do_latency)
|
||||
{
|
||||
grid_big_sep();
|
||||
std::cout << GridLogMessage << " Latency benchmark " << std::endl;
|
||||
grid_big_sep();
|
||||
Benchmark::Latency();
|
||||
}
|
||||
|
||||
if (do_p2p)
|
||||
{
|
||||
grid_big_sep();
|
||||
std::cout << GridLogMessage << " Point-To-Point benchmark " << std::endl;
|
||||
grid_big_sep();
|
||||
Benchmark::P2P();
|
||||
}
|
||||
|
||||
if (do_flops)
|
||||
{
|
||||
Ls = 1;
|
||||
@ -889,6 +1146,8 @@ int main(int argc, char **argv)
|
||||
json_results["flops"] = tmp_flops;
|
||||
}
|
||||
|
||||
json_results["hostnames"] = get_mpi_hostnames();
|
||||
|
||||
if (!json_filename.empty())
|
||||
{
|
||||
std::cout << GridLogMessage << "writing benchmark results to " << json_filename
|
||||
|
@ -4,7 +4,13 @@ set -euo pipefail
|
||||
|
||||
gcc_spec='gcc@9.4.0'
|
||||
cuda_spec='cuda@11.4.0'
|
||||
hdf5_spec='hdf5@1.10.7'
|
||||
|
||||
# hdf5 and fftw depend on OpenMPI, which we install manually. To make sure this
|
||||
# dependency is picked by spack, we specify the compiler here explicitly. For
|
||||
# most other packages we dont really care about the compiler (i.e. system
|
||||
# compiler versus ${gcc_spec})
|
||||
hdf5_spec="hdf5@1.10.7+cxx+threadsafe%${gcc_spec}"
|
||||
fftw_spec="fftw%${gcc_spec}"
|
||||
|
||||
if (( $# != 1 )); then
|
||||
echo "usage: $(basename "$0") <env dir>" 1>&2
|
||||
@ -18,7 +24,7 @@ cd "${cwd}"
|
||||
|
||||
# General configuration ########################################################
|
||||
# build with 128 tasks
|
||||
echo 'config:
|
||||
echo 'config:
|
||||
build_jobs: 128
|
||||
build_stage:
|
||||
- $spack/var/spack/stage
|
||||
@ -38,26 +44,23 @@ rm external.yaml
|
||||
|
||||
# Base compilers ###############################################################
|
||||
# configure system base
|
||||
|
||||
spack env create base
|
||||
spack env activate base
|
||||
spack compiler find --scope site
|
||||
|
||||
# install GCC, CUDA & LLVM
|
||||
spack install ${gcc_spec} ${cuda_spec} llvm
|
||||
|
||||
spack load llvm
|
||||
# install GCC, CUDA
|
||||
spack add ${gcc_spec} ${cuda_spec}
|
||||
spack concretize
|
||||
spack env depfile -o Makefile.tmp
|
||||
make -j128 -f Makefile.tmp
|
||||
spack compiler find --scope site
|
||||
spack unload llvm
|
||||
|
||||
spack load ${gcc_spec}
|
||||
spack compiler find --scope site
|
||||
spack unload ${gcc_spec}
|
||||
|
||||
# Manual compilation of OpenMPI & UCX ##########################################
|
||||
# set build directories
|
||||
mkdir -p "${dir}"/build
|
||||
cd "${dir}"/build
|
||||
|
||||
spack load ${gcc_spec} ${cuda_spec}
|
||||
|
||||
cuda_path=$(spack find --format "{prefix}" cuda)
|
||||
gdrcopy_path=/mnt/lustre/tursafs1/apps/gdrcopy/2.3.1
|
||||
|
||||
@ -124,8 +127,8 @@ mkdir build_gpu; cd build_gpu
|
||||
--with-cuda="${cuda_path}" --disable-getpwuid \
|
||||
--with-verbs --with-slurm --enable-mpi-fortran=all \
|
||||
--with-pmix=internal --with-libevent=internal
|
||||
make -j 128
|
||||
make install
|
||||
make -j 128
|
||||
make install
|
||||
cd ..
|
||||
|
||||
# openmpi cpu build
|
||||
@ -141,60 +144,62 @@ make -j 128
|
||||
make install
|
||||
cd "${dir}"
|
||||
|
||||
ucx_spec_gpu="ucx@1.12.0.GPU%${gcc_spec}"
|
||||
ucx_spec_cpu="ucx@1.12.0.CPU%${gcc_spec}"
|
||||
openmpi_spec_gpu="openmpi@4.1.1.GPU%${gcc_spec}"
|
||||
openmpi_spec_cpu="openmpi@4.1.1.CPU%${gcc_spec}"
|
||||
|
||||
# Add externals to spack
|
||||
echo "packages:
|
||||
ucx:
|
||||
externals:
|
||||
- spec: \"ucx@1.12.0.GPU%gcc@9.4.0\"
|
||||
- spec: \"${ucx_spec_gpu}\"
|
||||
prefix: ${dir}/prefix/ucx_gpu
|
||||
- spec: \"ucx@1.12.0.CPU%gcc@9.4.0\"
|
||||
- spec: \"${ucx_spec_cpu}\"
|
||||
prefix: ${dir}/prefix/ucx_cpu
|
||||
buildable: False
|
||||
openmpi:
|
||||
externals:
|
||||
- spec: \"openmpi@4.1.1.GPU%gcc@9.4.0\"
|
||||
- spec: \"${openmpi_spec_gpu}\"
|
||||
prefix: ${dir}/prefix/ompi_gpu
|
||||
- spec: \"openmpi@4.1.1.CPU%gcc@9.4.0\"
|
||||
- spec: \"${openmpi_spec_cpu}\"
|
||||
prefix: ${dir}/prefix/ompi_cpu
|
||||
buildable: False" > spack.yaml
|
||||
|
||||
spack config --scope site add -f spack.yaml
|
||||
rm spack.yaml
|
||||
spack install ucx@1.12.0.GPU%gcc@9.4.0 openmpi@4.1.1.GPU%gcc@9.4.0
|
||||
spack install ucx@1.12.0.CPU%gcc@9.4.0 openmpi@4.1.1.CPU%gcc@9.4.0
|
||||
spack env deactivate
|
||||
|
||||
cd "${cwd}"
|
||||
|
||||
# environments #################################################################
|
||||
dev_tools=("autoconf" "automake" "libtool" "jq" "git")
|
||||
ompi_gpu_hash=$(spack find --format "{hash}" openmpi@4.1.1.GPU)
|
||||
ompi_cpu_hash=$(spack find --format "{hash}" openmpi@4.1.1.CPU)
|
||||
|
||||
spack env create grid-gpu
|
||||
spack env activate grid-gpu
|
||||
spack add ${gcc_spec} ${cuda_spec} "${dev_tools[@]}"
|
||||
spack add ucx@1.12.0.GPU%gcc@9.4.0 openmpi@4.1.1.GPU%gcc@9.4.0
|
||||
spack add ${hdf5_spec}+cxx+threadsafe ^/"${ompi_gpu_hash}"
|
||||
spack add fftw ^/"${ompi_gpu_hash}"
|
||||
spack add openssl gmp mpfr c-lime
|
||||
spack install
|
||||
spack compiler find --scope site
|
||||
spack add ${gcc_spec} ${cuda_spec} ${ucx_spec_gpu} ${openmpi_spec_gpu}
|
||||
spack add ${hdf5_spec} ${fftw_spec}
|
||||
spack add openssl gmp mpfr c-lime "${dev_tools[@]}"
|
||||
spack concretize
|
||||
spack env depfile -o Makefile.tmp
|
||||
make -j128 -f Makefile.tmp
|
||||
spack env deactivate
|
||||
|
||||
spack env create grid-cpu
|
||||
spack env activate grid-cpu
|
||||
spack add llvm "${dev_tools[@]}"
|
||||
spack add ucx@1.12.0.CPU%gcc@9.4.0 openmpi@4.1.1.CPU%gcc@9.4.0
|
||||
spack add ${hdf5_spec}+cxx+threadsafe ^/"${ompi_cpu_hash}"
|
||||
spack add fftw ^/"${ompi_cpu_hash}"
|
||||
spack add openssl gmp mpfr c-lime
|
||||
spack install
|
||||
spack compiler find --scope site
|
||||
spack add ${gcc_spec} ${ucx_spec_cpu} ${openmpi_spec_cpu}
|
||||
spack add ${hdf5_spec} ${fftw_spec}
|
||||
spack add openssl gmp mpfr c-lime "${dev_tools[@]}"
|
||||
spack concretize
|
||||
spack env depfile -o Makefile.tmp
|
||||
make -j128 -f Makefile.tmp
|
||||
spack env deactivate
|
||||
|
||||
spack install jq git
|
||||
|
||||
# Final setup ##################################################################
|
||||
spack clean
|
||||
spack gc -y
|
||||
#spack gc -y # "spack gc" tends to get hung up for unknown reasons
|
||||
|
||||
# add more environment variables in module loading
|
||||
spack config --scope site add 'modules:prefix_inspections:lib:[LD_LIBRARY_PATH,LIBRARY_PATH]'
|
||||
|
@ -2,23 +2,73 @@
|
||||
#include <array>
|
||||
#include <blas_quda.h>
|
||||
#include <cassert>
|
||||
#include <chrono>
|
||||
#include <color_spinor_field.h>
|
||||
#include <communicator_quda.h>
|
||||
#include <dirac_quda.h>
|
||||
#include <fstream>
|
||||
#include <gauge_tools.h>
|
||||
#include <memory>
|
||||
#include <mpi.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
using namespace quda;
|
||||
|
||||
// remove to use QUDA's own flop counting instead of Grid's convention
|
||||
#define FLOP_COUNTING_GRID
|
||||
|
||||
#include "json.hpp"
|
||||
using nlohmann::json;
|
||||
json json_results;
|
||||
|
||||
using namespace quda;
|
||||
|
||||
// thanks chatGPT :)
|
||||
std::string get_timestamp()
|
||||
{
|
||||
// Get the current time
|
||||
auto now = std::chrono::system_clock::now();
|
||||
|
||||
// Convert the current time to a time_t object
|
||||
std::time_t currentTime = std::chrono::system_clock::to_time_t(now);
|
||||
|
||||
// Format the time using std::put_time
|
||||
std::stringstream ss;
|
||||
ss << std::put_time(std::localtime(¤tTime), "%Y%m%d %H:%M:%S");
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
// This is the MPI grid, i.e. the layout of ranks
|
||||
int nranks = -1;
|
||||
std::array<int, 4> mpi_grid = {1, 1, 1, 1};
|
||||
|
||||
// run f() in a loop for roughly target_time seconds
|
||||
// returns seconds per iteration it took
|
||||
template <class F> double bench(F const &f, double target_time, int niter_warmup = 5)
|
||||
{
|
||||
device_timer_t timer;
|
||||
timer.start();
|
||||
for (int iter = 0; iter < niter_warmup; ++iter)
|
||||
f();
|
||||
timer.stop();
|
||||
|
||||
double secs = timer.last() / niter_warmup;
|
||||
int niter = std::max(1, int(target_time / secs));
|
||||
// niter = std::min(1000, niter);
|
||||
// printfQuda("during warmup took %f s/iter, deciding on %d iters\n", secs, niter);
|
||||
|
||||
// important: each rank has its own timer, so their measurements can slightly vary. But
|
||||
// 'niter' needs to be consistent (bug took me a couple hours to track down)
|
||||
comm_broadcast_global(&niter, sizeof(niter), 0);
|
||||
|
||||
timer.reset(__FUNCTION__, __FILE__, __LINE__);
|
||||
timer.start();
|
||||
for (int iter = 0; iter < niter; ++iter)
|
||||
f();
|
||||
timer.stop();
|
||||
return timer.last() / niter;
|
||||
}
|
||||
|
||||
void initComms(int argc, char **argv)
|
||||
{
|
||||
// init MPI communication
|
||||
@ -43,6 +93,9 @@ void initComms(int argc, char **argv)
|
||||
for (int d = 0; d < 4; d++)
|
||||
if (mpi_grid[d] > 1)
|
||||
commDimPartitionedSet(d);
|
||||
|
||||
json_results["geometry"]["ranks"] = nranks;
|
||||
json_results["geometry"]["mpi"] = mpi_grid;
|
||||
}
|
||||
|
||||
// creates a random gauge field. L = local(!) size
|
||||
@ -149,11 +202,8 @@ ColorSpinorField make_source(int L, int Ls = 1)
|
||||
return src;
|
||||
}
|
||||
|
||||
void benchmark_wilson()
|
||||
void benchmark_wilson(std::vector<int> const &L_list, double target_time)
|
||||
{
|
||||
int niter = 20;
|
||||
int niter_warmup = 10;
|
||||
|
||||
printfQuda("==================== wilson dirac operator ====================\n");
|
||||
#ifdef FLOP_COUNTING_GRID
|
||||
printfQuda("IMPORTANT: flop counting as in Benchmark_Grid\n");
|
||||
@ -163,8 +213,10 @@ void benchmark_wilson()
|
||||
#endif
|
||||
printfQuda("%5s %15s %15s\n", "L", "time (usec)", "Gflop/s/rank");
|
||||
|
||||
for (int L : {8, 12, 16, 24, 32, 48})
|
||||
for (int L : L_list)
|
||||
{
|
||||
// printfQuda("starting wilson L=%d\n", L);
|
||||
|
||||
auto U = make_gauge_field(L);
|
||||
auto src = make_source(L);
|
||||
|
||||
@ -179,44 +231,41 @@ void benchmark_wilson()
|
||||
// (the additional nullptr's are for smeared links and fancy preconditioners and such.
|
||||
// Not used for simple Wilson fermions)
|
||||
dirac.updateFields(&U, nullptr, nullptr, nullptr);
|
||||
auto res = ColorSpinorField(ColorSpinorParam(src));
|
||||
auto f = [&]() { dirac.Dslash(res, src, QUDA_EVEN_PARITY); };
|
||||
|
||||
auto tmp = ColorSpinorField(ColorSpinorParam(src));
|
||||
|
||||
// couple iterations without timing to warm up
|
||||
for (int iter = 0; iter < niter_warmup; ++iter)
|
||||
dirac.Dslash(tmp, src, QUDA_EVEN_PARITY);
|
||||
|
||||
// actual benchmark with timings
|
||||
// first run to get the quda tuning out of the way
|
||||
dirac.Flops(); // reset flops counter
|
||||
device_timer_t device_timer;
|
||||
device_timer.start();
|
||||
for (int iter = 0; iter < niter; ++iter)
|
||||
dirac.Dslash(tmp, src, QUDA_EVEN_PARITY);
|
||||
device_timer.stop();
|
||||
f();
|
||||
double flops = 1.0 * dirac.Flops();
|
||||
|
||||
double secs = device_timer.last() / niter;
|
||||
// actual benchmarking
|
||||
auto start_time = get_timestamp();
|
||||
double secs = bench(f, target_time);
|
||||
auto end_time = get_timestamp();
|
||||
|
||||
#ifdef FLOP_COUNTING_GRID
|
||||
// this is the flop counting from Benchmark_Grid
|
||||
double Nc = 3;
|
||||
double Nd = 4;
|
||||
double Ns = 4;
|
||||
double flops =
|
||||
(Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2);
|
||||
flops = (Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2);
|
||||
flops *= L * L * L * L / 2.0;
|
||||
#else
|
||||
double flops = 1.0 * dirac.Flops() / niter;
|
||||
#endif
|
||||
|
||||
printfQuda("%5d %15.2f %15.2f\n", L, secs * 1e6, flops / secs * 1e-9);
|
||||
|
||||
json tmp;
|
||||
tmp["L"] = L;
|
||||
tmp["Gflops_wilson"] = flops / secs * 1e-9;
|
||||
tmp["start_time"] = start_time;
|
||||
tmp["end_time"] = end_time;
|
||||
json_results["flops"]["results"].push_back(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void benchmark_dwf()
|
||||
void benchmark_dwf(std::vector<int> const &L_list, double target_time)
|
||||
{
|
||||
int niter = 20;
|
||||
int niter_warmup = 10;
|
||||
|
||||
printfQuda("==================== domain wall dirac operator ====================\n");
|
||||
#ifdef FLOP_COUNTING_GRID
|
||||
printfQuda("IMPORTANT: flop counting as in Benchmark_Grid\n");
|
||||
@ -226,8 +275,9 @@ void benchmark_dwf()
|
||||
#endif
|
||||
printfQuda("%5s %15s %15s\n", "L", "time (usec)", "Gflop/s/rank");
|
||||
int Ls = 12;
|
||||
for (int L : {8, 12, 16, 24})
|
||||
for (int L : L_list)
|
||||
{
|
||||
// printfQuda("starting dwf L=%d\n", L);
|
||||
auto U = make_gauge_field(L);
|
||||
auto src = make_source(L, Ls);
|
||||
|
||||
@ -243,45 +293,43 @@ void benchmark_dwf()
|
||||
// insert gauge field into the dirac operator
|
||||
// (the additional nullptr's are for smeared links and fancy preconditioners and such)
|
||||
dirac.updateFields(&U, nullptr, nullptr, nullptr);
|
||||
auto res = ColorSpinorField(ColorSpinorParam(src));
|
||||
auto f = [&]() { dirac.Dslash(res, src, QUDA_EVEN_PARITY); };
|
||||
|
||||
auto tmp = ColorSpinorField(ColorSpinorParam(src));
|
||||
|
||||
// couple iterations without timing to warm up
|
||||
for (int iter = 0; iter < niter_warmup; ++iter)
|
||||
dirac.Dslash(tmp, src, QUDA_EVEN_PARITY);
|
||||
|
||||
// actual benchmark with timings
|
||||
// first run to get the quda tuning out of the way
|
||||
dirac.Flops(); // reset flops counter
|
||||
device_timer_t device_timer;
|
||||
device_timer.start();
|
||||
for (int iter = 0; iter < niter; ++iter)
|
||||
dirac.Dslash(tmp, src, QUDA_EVEN_PARITY);
|
||||
device_timer.stop();
|
||||
f();
|
||||
double flops = 1.0 * dirac.Flops();
|
||||
|
||||
double secs = device_timer.last() / niter;
|
||||
// actual benchmarking
|
||||
auto start_time = get_timestamp();
|
||||
double secs = bench(f, target_time);
|
||||
auto end_time = get_timestamp();
|
||||
|
||||
#ifdef FLOP_COUNTING_GRID
|
||||
// this is the flop counting from Benchmark_Grid
|
||||
double Nc = 3;
|
||||
double Nd = 4;
|
||||
double Ns = 4;
|
||||
double flops =
|
||||
(Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2);
|
||||
flops = (Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2);
|
||||
flops *= L * L * L * L * Ls / 2.0;
|
||||
#else
|
||||
double flops = 1.0 * dirac.Flops() / niter;
|
||||
#endif
|
||||
|
||||
printfQuda("%5d %15.2f %15.2f\n", L, secs * 1e6, flops / secs * 1e-9);
|
||||
json tmp;
|
||||
tmp["L"] = L;
|
||||
tmp["Gflops_dwf4"] = flops / secs * 1e-9;
|
||||
tmp["start_time"] = start_time;
|
||||
tmp["end_time"] = end_time;
|
||||
json_results["flops"]["results"].push_back(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
void benchmark_axpy()
|
||||
void benchmark_axpy(std::vector<int> const &L_list, double target_time)
|
||||
{
|
||||
// number of iterations for warmup / measurement
|
||||
// (feel free to change for noise/time tradeoff)
|
||||
constexpr int niter_warmup = 10;
|
||||
constexpr int niter = 20;
|
||||
constexpr int niter_warmup = 5;
|
||||
|
||||
printfQuda("==================== axpy / memory ====================\n");
|
||||
|
||||
@ -305,11 +353,11 @@ void benchmark_axpy()
|
||||
|
||||
printfQuda("%5s %15s %15s %15s %15s\n", "L", "size (MiB/rank)", "time (usec)",
|
||||
"GiB/s/rank", "Gflop/s/rank");
|
||||
std::vector L_list = {8, 12, 16, 24, 32, 48};
|
||||
for (int L : L_list)
|
||||
{
|
||||
// IMPORTANT: all of `param.x`, `field_elements`, `field.Bytes()`
|
||||
// are LOCAL, i.e. per rank / per GPU
|
||||
// printfQuda("starting axpy L=%d\n", L);
|
||||
// IMPORTANT: all of `param.x`, `field_elements`, `field.Bytes()`
|
||||
// are LOCAL, i.e. per rank / per GPU
|
||||
|
||||
param.x[0] = L;
|
||||
param.x[1] = L;
|
||||
@ -336,26 +384,41 @@ void benchmark_axpy()
|
||||
double flops = 2 * field_elements;
|
||||
double memory = 3 * sizeof(float) * field_elements;
|
||||
|
||||
// do some iterations to to let QUDA do its internal tuning and also stabilize cache
|
||||
// behaviour and such
|
||||
for (int iter = 0; iter < niter_warmup; ++iter)
|
||||
blas::axpy(1.234, fieldA, fieldB);
|
||||
auto f = [&]() { blas::axpy(1.234, fieldA, fieldB); };
|
||||
|
||||
// running the actual benchmark
|
||||
device_timer_t device_timer;
|
||||
device_timer.start();
|
||||
for (int iter = 0; iter < niter; ++iter)
|
||||
blas::axpy(1.234, fieldA, fieldB);
|
||||
device_timer.stop();
|
||||
double secs = device_timer.last() / niter; // seconds per iteration
|
||||
// first run to get the quda tuning out of the way
|
||||
f();
|
||||
|
||||
printfQuda("%5d %15.2f %15.2f %15.2f %15.2f\n", L, memory / 1024. / 1024., secs * 1e6,
|
||||
memory / secs / 1024. / 1024. / 1024., flops / secs * 1e-9);
|
||||
// actual benchmarking
|
||||
auto start_time = get_timestamp();
|
||||
double secs = bench(f, target_time);
|
||||
auto end_time = get_timestamp();
|
||||
|
||||
double mem_MiB = memory / 1024. / 1024.;
|
||||
double GBps = mem_MiB / 1024 / secs;
|
||||
printfQuda("%5d %15.2f %15.2f %15.2f %15.2f\n", L, mem_MiB, secs * 1e6, GBps,
|
||||
flops / secs * 1e-9);
|
||||
|
||||
json tmp;
|
||||
tmp["L"] = L;
|
||||
tmp["size_MB"] = mem_MiB;
|
||||
tmp["GBps"] = GBps;
|
||||
tmp["GFlops"] = flops / secs * 1e-9;
|
||||
tmp["start_time"] = start_time;
|
||||
tmp["end_time"] = end_time;
|
||||
json_results["axpy"].push_back(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
std::string json_filename = ""; // empty indicates no json output
|
||||
for (int i = 0; i < argc; i++)
|
||||
{
|
||||
if (std::string(argv[i]) == "--json-out")
|
||||
json_filename = argv[i + 1];
|
||||
}
|
||||
|
||||
initComms(argc, argv);
|
||||
|
||||
initQuda(-1); // -1 for multi-gpu. otherwise this selects the device to be used
|
||||
@ -367,14 +430,28 @@ int main(int argc, char **argv)
|
||||
printfQuda("MPI layout = %d %d %d %d\n", mpi_grid[0], mpi_grid[1], mpi_grid[2],
|
||||
mpi_grid[3]);
|
||||
|
||||
benchmark_axpy();
|
||||
benchmark_axpy({8, 12, 16, 24, 32, 48}, 1.0);
|
||||
|
||||
setVerbosity(QUDA_SILENT);
|
||||
benchmark_wilson();
|
||||
benchmark_dwf();
|
||||
benchmark_wilson({8, 12, 16, 24, 32, 48}, 1.0);
|
||||
benchmark_dwf({8, 12, 16, 24, 32}, 1.0);
|
||||
setVerbosity(QUDA_SUMMARIZE);
|
||||
|
||||
printfQuda("==================== done with all benchmarks ====================\n");
|
||||
|
||||
if (!json_filename.empty())
|
||||
{
|
||||
printfQuda("writing benchmark results to %s\n", json_filename.c_str());
|
||||
|
||||
int me = 0;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||
if (me == 0)
|
||||
{
|
||||
std::ofstream json_file(json_filename);
|
||||
json_file << std::setw(2) << json_results;
|
||||
}
|
||||
}
|
||||
|
||||
endQuda();
|
||||
quda::comm_finalize();
|
||||
MPI_Finalize();
|
||||
|
@ -28,5 +28,5 @@ mkdir -p "${PREFIX_DIR}"
|
||||
|
||||
LINK_FLAGS="-Wl,-rpath,$QUDA_DIR/lib: $QUDA_DIR/lib/libquda.so $EXTRA_LIBS -lpthread -lmpi"
|
||||
|
||||
g++ $BUILD_FLAGS -I$QUDA_DIR/include -c -o $BUILD_DIR/Benchmark_Quda.o $script_dir/Benchmark_Quda.cpp
|
||||
g++ $BUILD_FLAGS -I$QUDA_DIR/include/targets/cuda -I$QUDA_DIR/include -c -o $BUILD_DIR/Benchmark_Quda.o $script_dir/Benchmark_Quda.cpp
|
||||
g++ -g -O3 $BUILD_DIR/Benchmark_Quda.o -o $PREFIX_DIR/Benchmark_Quda $LINK_FLAGS -lmpi
|
||||
|
Reference in New Issue
Block a user