Compare commits
10 Commits
d7647afa72
...
371a329457
Author | SHA1 | Date | |
---|---|---|---|
371a329457 | |||
f81cb198ab | |||
a7e1d9e67f | |||
19c9dcb6ae | |||
7d89380b80 | |||
4cd67805b9 | |||
f7e607eae4 | |||
a267986800 | |||
a1ec08cdb3 | |||
fb6c79d9ca |
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
Copyright © 2015 Peter Boyle <paboyle@ph.ed.ac.uk>
|
Copyright © 2015 Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Copyright © 2022 Antonin Portelli <antonin.portelli@me.com>
|
Copyright © 2022 Antonin Portelli <antonin.portelli@me.com>
|
||||||
Copyright © 2022 Simon Buerger <simon.buerger@rwth-aachen.de>
|
Copyright © 2024 Simon Buerger <simon.buerger@rwth-aachen.de>
|
||||||
|
|
||||||
This is a fork of Benchmark_ITT.cpp from Grid
|
This is a fork of Benchmark_ITT.cpp from Grid
|
||||||
|
|
||||||
@ -29,6 +29,43 @@ int NN_global;
|
|||||||
|
|
||||||
nlohmann::json json_results;
|
nlohmann::json json_results;
|
||||||
|
|
||||||
|
// NOTE: Grid::GridClock is just a typedef to
|
||||||
|
// `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to
|
||||||
|
// microseconds (no idea why, probably wasnt ever relevant before), so we need
|
||||||
|
// our own wrapper here.
|
||||||
|
double usecond_precise()
|
||||||
|
{
|
||||||
|
using namespace std::chrono;
|
||||||
|
auto nsecs = duration_cast<nanoseconds>(GridClock::now() - Grid::theProgramStart);
|
||||||
|
return nsecs.count() * 1e-3;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> get_mpi_hostnames()
|
||||||
|
{
|
||||||
|
int world_size;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
|
||||||
|
|
||||||
|
char hostname[MPI_MAX_PROCESSOR_NAME];
|
||||||
|
int name_len = 0;
|
||||||
|
MPI_Get_processor_name(hostname, &name_len);
|
||||||
|
|
||||||
|
// Allocate buffer to gather all hostnames
|
||||||
|
std::vector<char> all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME);
|
||||||
|
|
||||||
|
// Use MPI_Allgather to gather all hostnames on all ranks
|
||||||
|
MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, all_hostnames.data(),
|
||||||
|
MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_COMM_WORLD);
|
||||||
|
|
||||||
|
// Convert the gathered hostnames back into a vector of std::string
|
||||||
|
std::vector<std::string> hostname_list(world_size);
|
||||||
|
for (int i = 0; i < world_size; ++i)
|
||||||
|
{
|
||||||
|
hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return hostname_list;
|
||||||
|
}
|
||||||
|
|
||||||
struct time_statistics
|
struct time_statistics
|
||||||
{
|
{
|
||||||
double mean;
|
double mean;
|
||||||
@ -73,7 +110,7 @@ class Benchmark
|
|||||||
{local[0] * mpi[0], local[1] * mpi[1], local[2] * mpi[2], local[3] * mpi[3]});
|
{local[0] * mpi[0], local[1] * mpi[1], local[2] * mpi[2], local[3] * mpi[3]});
|
||||||
GridCartesian *TmpGrid = SpaceTimeGrid::makeFourDimGrid(
|
GridCartesian *TmpGrid = SpaceTimeGrid::makeFourDimGrid(
|
||||||
latt4, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
|
latt4, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
|
||||||
Grid::Coordinate shm;
|
Grid::Coordinate shm(4, 1);
|
||||||
GlobalSharedMemory::GetShmDims(mpi, shm);
|
GlobalSharedMemory::GetShmDims(mpi, shm);
|
||||||
|
|
||||||
uint64_t NP = TmpGrid->RankCount();
|
uint64_t NP = TmpGrid->RankCount();
|
||||||
@ -137,7 +174,7 @@ class Benchmark
|
|||||||
|
|
||||||
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
||||||
Coordinate mpi_layout = GridDefaultMpi();
|
Coordinate mpi_layout = GridDefaultMpi();
|
||||||
Coordinate shm_layout;
|
Coordinate shm_layout(Nd, 1);
|
||||||
GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout);
|
GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout);
|
||||||
|
|
||||||
for (int mu = 0; mu < Nd; mu++)
|
for (int mu = 0; mu < Nd; mu++)
|
||||||
@ -266,124 +303,166 @@ class Benchmark
|
|||||||
|
|
||||||
static void Latency(void)
|
static void Latency(void)
|
||||||
{
|
{
|
||||||
int Nloop = 200;
|
int Nwarmup = 100;
|
||||||
int nmu = 0;
|
int Nloop = 300;
|
||||||
|
|
||||||
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
|
std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl;
|
||||||
Coordinate mpi_layout = GridDefaultMpi();
|
|
||||||
Coordinate shm_layout;
|
|
||||||
GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout);
|
|
||||||
|
|
||||||
for (int mu = 0; mu < Nd; mu++)
|
|
||||||
if (mpi_layout[mu] > 1)
|
|
||||||
nmu++;
|
|
||||||
|
|
||||||
std::vector<double> t_time(Nloop);
|
|
||||||
time_statistics timestat;
|
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Benchmarking Latency to neighbors in " << nmu
|
|
||||||
<< " dimensions" << std::endl;
|
|
||||||
grid_small_sep();
|
grid_small_sep();
|
||||||
grid_printf("%5s %7s %15s %15s %15s\n", "dir", "shm", "time (usec)", "std dev",
|
grid_printf("from to mean(usec) err max\n");
|
||||||
"min");
|
|
||||||
|
|
||||||
int lat = 8; // dummy lattice size. Not really used.
|
int ranks;
|
||||||
Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2],
|
int me;
|
||||||
lat * mpi_layout[3]});
|
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||||
|
|
||||||
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
|
int bytes = 8;
|
||||||
RealD Nrank = Grid._Nprocessors;
|
void *buf_from = acceleratorAllocDevice(bytes);
|
||||||
RealD Nnode = Grid.NodeCount();
|
void *buf_to = acceleratorAllocDevice(bytes);
|
||||||
RealD ppn = Nrank / Nnode;
|
nlohmann::json json_latency;
|
||||||
|
for (int from = 0; from < ranks; ++from)
|
||||||
std::vector<HalfSpinColourVectorD *> xbuf(8);
|
for (int to = 0; to < ranks; ++to)
|
||||||
std::vector<HalfSpinColourVectorD *> rbuf(8);
|
|
||||||
uint64_t bytes = 8;
|
|
||||||
for (int d = 0; d < 8; d++)
|
|
||||||
{
|
|
||||||
xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
|
||||||
rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
double dbytes;
|
|
||||||
#define NWARMUP 50
|
|
||||||
|
|
||||||
for (int dir = 0; dir < 8; dir++)
|
|
||||||
{
|
|
||||||
int mu = dir % 4;
|
|
||||||
if (mpi_layout[mu] == 1) // skip directions that are not distributed
|
|
||||||
continue;
|
|
||||||
bool is_shm = mpi_layout[mu] == shm_layout[mu];
|
|
||||||
bool is_partial_shm = !is_shm && shm_layout[mu] != 1;
|
|
||||||
|
|
||||||
std::vector<double> times(Nloop);
|
|
||||||
for (int i = 0; i < NWARMUP; i++)
|
|
||||||
{
|
{
|
||||||
int xmit_to_rank;
|
if (from == to)
|
||||||
int recv_from_rank;
|
continue;
|
||||||
|
|
||||||
if (dir == mu)
|
std::vector<double> t_time(Nloop);
|
||||||
|
time_statistics timestat;
|
||||||
|
MPI_Status status;
|
||||||
|
|
||||||
|
for (int i = -Nwarmup; i < Nloop; ++i)
|
||||||
{
|
{
|
||||||
int comm_proc = 1;
|
double start = usecond_precise();
|
||||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
if (from == me)
|
||||||
|
{
|
||||||
|
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
|
||||||
|
assert(err == MPI_SUCCESS);
|
||||||
|
}
|
||||||
|
if (to == me)
|
||||||
|
{
|
||||||
|
auto err =
|
||||||
|
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
|
||||||
|
assert(err == MPI_SUCCESS);
|
||||||
|
}
|
||||||
|
double stop = usecond_precise();
|
||||||
|
if (i >= 0)
|
||||||
|
t_time[i] = stop - start;
|
||||||
}
|
}
|
||||||
else
|
// important: only 'from' and 'to' have meaningful timings. we use
|
||||||
{
|
// 'from's.
|
||||||
int comm_proc = mpi_layout[mu] - 1;
|
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
|
||||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
|
||||||
}
|
timestat.statistics(t_time);
|
||||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0],
|
grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean,
|
||||||
recv_from_rank, bytes);
|
timestat.err, timestat.max);
|
||||||
|
nlohmann::json tmp;
|
||||||
|
tmp["from"] = from;
|
||||||
|
tmp["to"] = to;
|
||||||
|
tmp["time_usec"] = timestat.mean;
|
||||||
|
tmp["time_usec_error"] = timestat.err;
|
||||||
|
tmp["time_usec_min"] = timestat.min;
|
||||||
|
tmp["time_usec_max"] = timestat.max;
|
||||||
|
tmp["time_usec_full"] = t_time;
|
||||||
|
json_latency.push_back(tmp);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < Nloop; i++)
|
json_results["latency"] = json_latency;
|
||||||
|
|
||||||
|
acceleratorFreeDevice(buf_from);
|
||||||
|
acceleratorFreeDevice(buf_to);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void P2P(void)
|
||||||
|
{
|
||||||
|
// IMPORTANT: The P2P benchmark uses "MPI_COMM_WORLD" communicator, which is
|
||||||
|
// not the quite the same as Grid.communicator. Practically speaking, the
|
||||||
|
// latter one contains the same MPI-ranks but in a different order. Grid
|
||||||
|
// does this make sure it can exploit ranks with shared memory (i.e.
|
||||||
|
// multiple ranks on the same node) as best as possible.
|
||||||
|
|
||||||
|
// buffer-size to benchmark. This number is the same as the largest one used
|
||||||
|
// in the "Comms()" benchmark. ( L=48, Ls=12, double-prec-complex,
|
||||||
|
// half-color-spin-vector. ). Mostly an arbitrary choice, but nice to match
|
||||||
|
// it here
|
||||||
|
size_t bytes = 127401984;
|
||||||
|
|
||||||
|
int Nwarmup = 20;
|
||||||
|
int Nloop = 100;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl;
|
||||||
|
grid_small_sep();
|
||||||
|
grid_printf("from to mean(usec) err min "
|
||||||
|
"bytes rate (GiB/s)\n");
|
||||||
|
|
||||||
|
int ranks;
|
||||||
|
int me;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||||
|
|
||||||
|
void *buf_from = acceleratorAllocDevice(bytes);
|
||||||
|
void *buf_to = acceleratorAllocDevice(bytes);
|
||||||
|
nlohmann::json json_p2p;
|
||||||
|
for (int from = 0; from < ranks; ++from)
|
||||||
|
for (int to = 0; to < ranks; ++to)
|
||||||
{
|
{
|
||||||
|
if (from == to)
|
||||||
|
continue;
|
||||||
|
|
||||||
dbytes = 0;
|
std::vector<double> t_time(Nloop);
|
||||||
double start = usecond();
|
time_statistics timestat;
|
||||||
int xmit_to_rank;
|
MPI_Status status;
|
||||||
int recv_from_rank;
|
|
||||||
|
|
||||||
if (dir == mu)
|
for (int i = -Nwarmup; i < Nloop; ++i)
|
||||||
{
|
{
|
||||||
int comm_proc = 1;
|
double start = usecond_precise();
|
||||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
if (from == me)
|
||||||
|
{
|
||||||
|
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
|
||||||
|
assert(err == MPI_SUCCESS);
|
||||||
|
}
|
||||||
|
if (to == me)
|
||||||
|
{
|
||||||
|
auto err =
|
||||||
|
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
|
||||||
|
assert(err == MPI_SUCCESS);
|
||||||
|
}
|
||||||
|
double stop = usecond_precise();
|
||||||
|
if (i >= 0)
|
||||||
|
t_time[i] = stop - start;
|
||||||
}
|
}
|
||||||
else
|
// important: only 'from' and 'to' have meaningful timings. we use
|
||||||
{
|
// 'from's.
|
||||||
int comm_proc = mpi_layout[mu] - 1;
|
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
|
||||||
Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank);
|
|
||||||
}
|
|
||||||
Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0],
|
|
||||||
recv_from_rank, bytes);
|
|
||||||
dbytes += bytes;
|
|
||||||
|
|
||||||
double stop = usecond();
|
timestat.statistics(t_time);
|
||||||
t_time[i] = stop - start; // microseconds
|
double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.;
|
||||||
|
double rate_err = rate * timestat.err / timestat.mean;
|
||||||
|
double rate_max = rate * timestat.mean / timestat.min;
|
||||||
|
double rate_min = rate * timestat.mean / timestat.max;
|
||||||
|
|
||||||
|
grid_printf("%2d %2d %15.4f %15.3f %15.4f %15d %15.2f\n", from, to, timestat.mean,
|
||||||
|
timestat.err, timestat.min, bytes, rate);
|
||||||
|
|
||||||
|
nlohmann::json tmp;
|
||||||
|
tmp["from"] = from;
|
||||||
|
tmp["to"] = to;
|
||||||
|
tmp["bytes"] = bytes;
|
||||||
|
tmp["time_usec"] = timestat.mean;
|
||||||
|
tmp["time_usec_error"] = timestat.err;
|
||||||
|
tmp["time_usec_min"] = timestat.min;
|
||||||
|
tmp["time_usec_max"] = timestat.max;
|
||||||
|
tmp["time_usec_full"] = t_time;
|
||||||
|
nlohmann::json tmp_rate;
|
||||||
|
tmp_rate["mean"] = rate;
|
||||||
|
tmp_rate["error"] = rate_err;
|
||||||
|
tmp_rate["max"] = rate_max;
|
||||||
|
tmp_rate["min"] = rate_min;
|
||||||
|
tmp["rate_GBps"] = tmp_rate;
|
||||||
|
|
||||||
|
json_p2p.push_back(tmp);
|
||||||
}
|
}
|
||||||
timestat.statistics(t_time);
|
json_results["p2p"] = json_p2p;
|
||||||
|
|
||||||
grid_printf("%5d %7s %15.2f %15.1f %15.2f\n", dir,
|
acceleratorFreeDevice(buf_from);
|
||||||
is_shm ? "yes"
|
acceleratorFreeDevice(buf_to);
|
||||||
: is_partial_shm ? "partial"
|
|
||||||
: "no",
|
|
||||||
timestat.mean, timestat.err, timestat.min);
|
|
||||||
nlohmann::json tmp;
|
|
||||||
nlohmann::json tmp_rate;
|
|
||||||
tmp["dir"] = dir;
|
|
||||||
tmp["shared_mem"] = is_shm;
|
|
||||||
tmp["partial_shared_mem"] = is_partial_shm;
|
|
||||||
tmp["time_usec"] = timestat.mean;
|
|
||||||
tmp["time_usec_error"] = timestat.err;
|
|
||||||
tmp["time_usec_max"] = timestat.min;
|
|
||||||
json_results["latency"].push_back(tmp);
|
|
||||||
}
|
|
||||||
for (int d = 0; d < 8; d++)
|
|
||||||
{
|
|
||||||
acceleratorFreeDevice(xbuf[d]);
|
|
||||||
acceleratorFreeDevice(rbuf[d]);
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void Memory(void)
|
static void Memory(void)
|
||||||
@ -908,11 +987,47 @@ int main(int argc, char **argv)
|
|||||||
{
|
{
|
||||||
Grid_init(&argc, &argv);
|
Grid_init(&argc, &argv);
|
||||||
|
|
||||||
|
int Ls = 1;
|
||||||
|
bool do_su4 = true;
|
||||||
|
bool do_memory = true;
|
||||||
|
bool do_comms = true;
|
||||||
|
bool do_flops = true;
|
||||||
|
|
||||||
|
// NOTE: these two take O((number of ranks)^2) time, which might be a lot, so they are
|
||||||
|
// off by default
|
||||||
|
bool do_latency = false;
|
||||||
|
bool do_p2p = false;
|
||||||
|
|
||||||
std::string json_filename = ""; // empty indicates no json output
|
std::string json_filename = ""; // empty indicates no json output
|
||||||
for (int i = 0; i < argc; i++)
|
for (int i = 0; i < argc; i++)
|
||||||
{
|
{
|
||||||
if (std::string(argv[i]) == "--json-out")
|
auto arg = std::string(argv[i]);
|
||||||
|
if (arg == "--json-out")
|
||||||
json_filename = argv[i + 1];
|
json_filename = argv[i + 1];
|
||||||
|
if (arg == "--benchmark-su4")
|
||||||
|
do_su4 = true;
|
||||||
|
if (arg == "--benchmark-memory")
|
||||||
|
do_memory = true;
|
||||||
|
if (arg == "--benchmark-comms")
|
||||||
|
do_comms = true;
|
||||||
|
if (arg == "--benchmark-flops")
|
||||||
|
do_flops = true;
|
||||||
|
if (arg == "--benchmark-latency")
|
||||||
|
do_latency = true;
|
||||||
|
if (arg == "--benchmark-p2p")
|
||||||
|
do_p2p = true;
|
||||||
|
if (arg == "--no-benchmark-su4")
|
||||||
|
do_su4 = false;
|
||||||
|
if (arg == "--no-benchmark-memory")
|
||||||
|
do_memory = false;
|
||||||
|
if (arg == "--no-benchmark-comms")
|
||||||
|
do_comms = false;
|
||||||
|
if (arg == "--no-benchmark-flops")
|
||||||
|
do_flops = false;
|
||||||
|
if (arg == "--no-benchmark-latency")
|
||||||
|
do_latency = false;
|
||||||
|
if (arg == "--no-benchmark-p2p")
|
||||||
|
do_p2p = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::SetCommunicatorPolicy(
|
CartesianCommunicator::SetCommunicatorPolicy(
|
||||||
@ -924,13 +1039,6 @@ int main(int argc, char **argv)
|
|||||||
#endif
|
#endif
|
||||||
Benchmark::Decomposition();
|
Benchmark::Decomposition();
|
||||||
|
|
||||||
int do_su4 = 1;
|
|
||||||
int do_memory = 1;
|
|
||||||
int do_comms = 1;
|
|
||||||
int do_latency = 1;
|
|
||||||
int do_flops = 1;
|
|
||||||
int Ls = 1;
|
|
||||||
|
|
||||||
int sel = 4;
|
int sel = 4;
|
||||||
std::vector<int> L_list({8, 12, 16, 24, 32});
|
std::vector<int> L_list({8, 12, 16, 24, 32});
|
||||||
int selm1 = sel - 1;
|
int selm1 = sel - 1;
|
||||||
@ -971,6 +1079,14 @@ int main(int argc, char **argv)
|
|||||||
Benchmark::Latency();
|
Benchmark::Latency();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (do_p2p)
|
||||||
|
{
|
||||||
|
grid_big_sep();
|
||||||
|
std::cout << GridLogMessage << " Point-To-Point benchmark " << std::endl;
|
||||||
|
grid_big_sep();
|
||||||
|
Benchmark::P2P();
|
||||||
|
}
|
||||||
|
|
||||||
if (do_flops)
|
if (do_flops)
|
||||||
{
|
{
|
||||||
Ls = 1;
|
Ls = 1;
|
||||||
@ -1030,6 +1146,8 @@ int main(int argc, char **argv)
|
|||||||
json_results["flops"] = tmp_flops;
|
json_results["flops"] = tmp_flops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
json_results["hostnames"] = get_mpi_hostnames();
|
||||||
|
|
||||||
if (!json_filename.empty())
|
if (!json_filename.empty())
|
||||||
{
|
{
|
||||||
std::cout << GridLogMessage << "writing benchmark results to " << json_filename
|
std::cout << GridLogMessage << "writing benchmark results to " << json_filename
|
||||||
|
Loading…
x
Reference in New Issue
Block a user