From 6055e0503c28eeeeb016dd0e756dcad3dfbe8634 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Wed, 20 Dec 2023 13:43:51 +0000 Subject: [PATCH 1/9] simple latency benchmark --- Grid/Benchmark_Grid.cpp | 131 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index 262e0eb..f013598 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -264,6 +264,128 @@ class Benchmark return; } + static void Latency(void) + { + int Nloop = 200; + int nmu = 0; + + Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); + Coordinate mpi_layout = GridDefaultMpi(); + Coordinate shm_layout; + GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout); + + for (int mu = 0; mu < Nd; mu++) + if (mpi_layout[mu] > 1) + nmu++; + + std::vector t_time(Nloop); + time_statistics timestat; + + std::cout << GridLogMessage << "Benchmarking Latency to neighbors in " << nmu + << " dimensions" << std::endl; + grid_small_sep(); + grid_printf("%5s %7s %15s %15s %15s\n", "dir", "shm", "time (usec)", "std dev", + "min"); + + int lat = 8; // dummy lattice size. Not really used. + Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], + lat * mpi_layout[3]}); + + GridCartesian Grid(latt_size, simd_layout, mpi_layout); + RealD Nrank = Grid._Nprocessors; + RealD Nnode = Grid.NodeCount(); + RealD ppn = Nrank / Nnode; + + std::vector xbuf(8); + std::vector rbuf(8); + uint64_t bytes = 8; + for (int d = 0; d < 8; d++) + { + xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes); + rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes); + } + + double dbytes; +#define NWARMUP 50 + + for (int dir = 0; dir < 8; dir++) + { + int mu = dir % 4; + if (mpi_layout[mu] == 1) // skip directions that are not distributed + continue; + bool is_shm = mpi_layout[mu] == shm_layout[mu]; + bool is_partial_shm = !is_shm && shm_layout[mu] != 1; + + std::vector times(Nloop); + for (int i = 0; i < NWARMUP; i++) + { + int xmit_to_rank; + int recv_from_rank; + + if (dir == mu) + { + int comm_proc = 1; + Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); + } + else + { + int comm_proc = mpi_layout[mu] - 1; + Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); + } + Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0], + recv_from_rank, bytes); + } + for (int i = 0; i < Nloop; i++) + { + + dbytes = 0; + double start = usecond(); + int xmit_to_rank; + int recv_from_rank; + + if (dir == mu) + { + int comm_proc = 1; + Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); + } + else + { + int comm_proc = mpi_layout[mu] - 1; + Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); + } + Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0], + recv_from_rank, bytes); + dbytes += bytes; + + double stop = usecond(); + t_time[i] = stop - start; // microseconds + } + timestat.statistics(t_time); + + grid_printf("%5d %7s %15.2f %15.1f %15.2f\n", dir, + is_shm ? "yes" + : is_partial_shm ? "partial" + : "no", + timestat.mean, timestat.err, timestat.min); + nlohmann::json tmp; + nlohmann::json tmp_rate; + tmp["dir"] = dir; + tmp["shared_mem"] = is_shm; + tmp["partial_shared_mem"] = is_partial_shm; + tmp["time_usec"] = timestat.mean; + tmp["time_usec_error"] = timestat.err; + tmp["time_usec_max"] = timestat.min; + json_results["latency"].push_back(tmp); + } + for (int d = 0; d < 8; d++) + { + acceleratorFreeDevice(xbuf[d]); + acceleratorFreeDevice(rbuf[d]); + } + + return; + } + static void Memory(void) { const int Nvec = 8; @@ -805,6 +927,7 @@ int main(int argc, char **argv) int do_su4 = 1; int do_memory = 1; int do_comms = 1; + int do_latency = 1; int do_flops = 1; int Ls = 1; @@ -840,6 +963,14 @@ int main(int argc, char **argv) Benchmark::Comms(); } + if (do_latency) + { + grid_big_sep(); + std::cout << GridLogMessage << " Latency benchmark " << std::endl; + grid_big_sep(); + Benchmark::Latency(); + } + if (do_flops) { Ls = 1; From a1ec08cdb33f632a510c431881ea5d02d87954e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Thu, 9 May 2024 23:17:54 +0100 Subject: [PATCH 2/9] point-to-point latency --- Grid/Benchmark_Grid.cpp | 165 +++++++++++++++------------------------- 1 file changed, 61 insertions(+), 104 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index f013598..e33459e 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -264,126 +264,83 @@ class Benchmark return; } - static void Latency(void) + static void PointToPoint(void) { int Nloop = 200; - int nmu = 0; Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); Coordinate mpi_layout = GridDefaultMpi(); - Coordinate shm_layout; - GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout); - for (int mu = 0; mu < Nd; mu++) - if (mpi_layout[mu] > 1) - nmu++; - - std::vector t_time(Nloop); - time_statistics timestat; - - std::cout << GridLogMessage << "Benchmarking Latency to neighbors in " << nmu - << " dimensions" << std::endl; + std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl; grid_small_sep(); - grid_printf("%5s %7s %15s %15s %15s\n", "dir", "shm", "time (usec)", "std dev", - "min"); + grid_printf("from to mean(usec) err min\n"); - int lat = 8; // dummy lattice size. Not really used. + int lat = 8; // dummy lattice size. Not actually used. Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], lat * mpi_layout[3]}); GridCartesian Grid(latt_size, simd_layout, mpi_layout); - RealD Nrank = Grid._Nprocessors; - RealD Nnode = Grid.NodeCount(); - RealD ppn = Nrank / Nnode; - std::vector xbuf(8); - std::vector rbuf(8); - uint64_t bytes = 8; - for (int d = 0; d < 8; d++) - { - xbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes); - rbuf[d] = (HalfSpinColourVectorD *)acceleratorAllocDevice(bytes); - } + int ranks; + int me; + MPI_Comm_size(Grid.communicator, &ranks); + MPI_Comm_rank(Grid.communicator, &me); + assert(ranks == Grid._Nprocessors); + assert(me == Grid._processor); - double dbytes; -#define NWARMUP 50 - - for (int dir = 0; dir < 8; dir++) - { - int mu = dir % 4; - if (mpi_layout[mu] == 1) // skip directions that are not distributed - continue; - bool is_shm = mpi_layout[mu] == shm_layout[mu]; - bool is_partial_shm = !is_shm && shm_layout[mu] != 1; - - std::vector times(Nloop); - for (int i = 0; i < NWARMUP; i++) + int bytes = 8; + void *buf_from = acceleratorAllocDevice(bytes); + void *buf_to = acceleratorAllocDevice(bytes); + nlohmann::json json_p2p; + for (int from = 0; from < ranks; ++from) + for (int to = 0; to < ranks; ++to) { - int xmit_to_rank; - int recv_from_rank; + if (from == to) + continue; - if (dir == mu) + std::vector t_time(Nloop); + time_statistics timestat; + MPI_Status status; + + for (int i = 0; i < Nloop; ++i) { - int comm_proc = 1; - Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); + double start = usecond(); + if (from == me) + { + auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); + assert(err == MPI_SUCCESS); + err = MPI_Recv(buf_to, bytes, MPI_CHAR, to, 0, Grid.communicator, &status); + assert(err == MPI_SUCCESS); + } + if (to == me) + { + auto err = + MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); + assert(err == MPI_SUCCESS); + err = MPI_Send(buf_from, bytes, MPI_CHAR, from, 0, Grid.communicator); + assert(err == MPI_SUCCESS); + } + double stop = usecond(); + t_time[i] = stop - start; } - else - { - int comm_proc = mpi_layout[mu] - 1; - Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); - } - Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0], - recv_from_rank, bytes); + // important: only the 'from' rank has a trustworthy time + MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); + + timestat.statistics(t_time); + grid_printf("%2d %2d %15.2f %15.1f %15.2f\n", from, to, timestat.mean, + timestat.err, timestat.min); + nlohmann::json tmp; + tmp["from"] = from; + tmp["to"] = to; + tmp["time_usec"] = timestat.mean; + tmp["time_usec_error"] = timestat.err; + tmp["time_usec_max"] = timestat.min; + json_p2p.push_back(tmp); } - for (int i = 0; i < Nloop; i++) - { + json_results["latency"] = json_p2p; - dbytes = 0; - double start = usecond(); - int xmit_to_rank; - int recv_from_rank; - - if (dir == mu) - { - int comm_proc = 1; - Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); - } - else - { - int comm_proc = mpi_layout[mu] - 1; - Grid.ShiftedRanks(mu, comm_proc, xmit_to_rank, recv_from_rank); - } - Grid.SendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, (void *)&rbuf[dir][0], - recv_from_rank, bytes); - dbytes += bytes; - - double stop = usecond(); - t_time[i] = stop - start; // microseconds - } - timestat.statistics(t_time); - - grid_printf("%5d %7s %15.2f %15.1f %15.2f\n", dir, - is_shm ? "yes" - : is_partial_shm ? "partial" - : "no", - timestat.mean, timestat.err, timestat.min); - nlohmann::json tmp; - nlohmann::json tmp_rate; - tmp["dir"] = dir; - tmp["shared_mem"] = is_shm; - tmp["partial_shared_mem"] = is_partial_shm; - tmp["time_usec"] = timestat.mean; - tmp["time_usec_error"] = timestat.err; - tmp["time_usec_max"] = timestat.min; - json_results["latency"].push_back(tmp); - } - for (int d = 0; d < 8; d++) - { - acceleratorFreeDevice(xbuf[d]); - acceleratorFreeDevice(rbuf[d]); - } - - return; + acceleratorFreeDevice(buf_from); + acceleratorFreeDevice(buf_to); } static void Memory(void) @@ -927,7 +884,7 @@ int main(int argc, char **argv) int do_su4 = 1; int do_memory = 1; int do_comms = 1; - int do_latency = 1; + int do_p2p = 1; int do_flops = 1; int Ls = 1; @@ -963,12 +920,12 @@ int main(int argc, char **argv) Benchmark::Comms(); } - if (do_latency) + if (do_p2p) { grid_big_sep(); - std::cout << GridLogMessage << " Latency benchmark " << std::endl; + std::cout << GridLogMessage << " Point-to-Point benchmark " << std::endl; grid_big_sep(); - Benchmark::Latency(); + Benchmark::PointToPoint(); } if (do_flops) From a267986800f37b57a7f094e15e5318805e86dd58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Thu, 9 May 2024 23:25:06 +0100 Subject: [PATCH 3/9] naming consitency --- Grid/Benchmark_Grid.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index e33459e..8b81863 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -264,7 +264,7 @@ class Benchmark return; } - static void PointToPoint(void) + static void Latency(void) { int Nloop = 200; @@ -273,7 +273,7 @@ class Benchmark std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl; grid_small_sep(); - grid_printf("from to mean(usec) err min\n"); + grid_printf("from to mean(usec) err min\n"); int lat = 8; // dummy lattice size. Not actually used. Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], @@ -291,7 +291,7 @@ class Benchmark int bytes = 8; void *buf_from = acceleratorAllocDevice(bytes); void *buf_to = acceleratorAllocDevice(bytes); - nlohmann::json json_p2p; + nlohmann::json json_latency; for (int from = 0; from < ranks; ++from) for (int to = 0; to < ranks; ++to) { @@ -335,9 +335,9 @@ class Benchmark tmp["time_usec"] = timestat.mean; tmp["time_usec_error"] = timestat.err; tmp["time_usec_max"] = timestat.min; - json_p2p.push_back(tmp); + json_latency.push_back(tmp); } - json_results["latency"] = json_p2p; + json_results["latency"] = json_latency; acceleratorFreeDevice(buf_from); acceleratorFreeDevice(buf_to); @@ -884,7 +884,7 @@ int main(int argc, char **argv) int do_su4 = 1; int do_memory = 1; int do_comms = 1; - int do_p2p = 1; + int do_latency = 1; int do_flops = 1; int Ls = 1; @@ -920,12 +920,12 @@ int main(int argc, char **argv) Benchmark::Comms(); } - if (do_p2p) + if (do_latency) { grid_big_sep(); - std::cout << GridLogMessage << " Point-to-Point benchmark " << std::endl; + std::cout << GridLogMessage << " Latency benchmark " << std::endl; grid_big_sep(); - Benchmark::PointToPoint(); + Benchmark::Latency(); } if (do_flops) From f7e607eae4a15f9a19d82d69b770f63742e50a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Thu, 9 May 2024 23:33:04 +0100 Subject: [PATCH 4/9] proper warmup loop for latency --- Grid/Benchmark_Grid.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index 8b81863..d8b8de9 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -266,7 +266,8 @@ class Benchmark static void Latency(void) { - int Nloop = 200; + int Nwarmup = 100; + int Nloop = 1000; Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); Coordinate mpi_layout = GridDefaultMpi(); @@ -302,7 +303,7 @@ class Benchmark time_statistics timestat; MPI_Status status; - for (int i = 0; i < Nloop; ++i) + for (int i = -Nwarmup; i < Nloop; ++i) { double start = usecond(); if (from == me) @@ -321,7 +322,8 @@ class Benchmark assert(err == MPI_SUCCESS); } double stop = usecond(); - t_time[i] = stop - start; + if (i >= 0) + t_time[i] = stop - start; } // important: only the 'from' rank has a trustworthy time MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); From 4cd67805b935d874d5035a3ee08f532c5349e865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Thu, 26 Sep 2024 09:31:22 +0100 Subject: [PATCH 5/9] make Latency benchmark proper one-way and increase statistics --- Grid/Benchmark_Grid.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index d8b8de9..e0d8878 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -266,8 +266,8 @@ class Benchmark static void Latency(void) { - int Nwarmup = 100; - int Nloop = 1000; + int Nwarmup = 1000; + int Nloop = 10000; Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); Coordinate mpi_layout = GridDefaultMpi(); @@ -310,26 +310,22 @@ class Benchmark { auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); assert(err == MPI_SUCCESS); - err = MPI_Recv(buf_to, bytes, MPI_CHAR, to, 0, Grid.communicator, &status); - assert(err == MPI_SUCCESS); } if (to == me) { auto err = MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); assert(err == MPI_SUCCESS); - err = MPI_Send(buf_from, bytes, MPI_CHAR, from, 0, Grid.communicator); - assert(err == MPI_SUCCESS); } double stop = usecond(); if (i >= 0) t_time[i] = stop - start; } - // important: only the 'from' rank has a trustworthy time + // important: only 'from' and 'to' have meaningful timings. we use 'from's. MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); timestat.statistics(t_time); - grid_printf("%2d %2d %15.2f %15.1f %15.2f\n", from, to, timestat.mean, + grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean, timestat.err, timestat.min); nlohmann::json tmp; tmp["from"] = from; From 7d89380b806dd51586f7f73bd144cfeab3ccf158 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Mon, 7 Oct 2024 17:22:26 +0100 Subject: [PATCH 6/9] point-to-point bandwith benchmark --- Grid/Benchmark_Grid.cpp | 140 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 138 insertions(+), 2 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index e0d8878..ce51109 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -29,6 +29,39 @@ int NN_global; nlohmann::json json_results; +// NOTE: Grid::GridClock is just a typedef to `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to microseconds (no idea why), so we need our own wrapper here. +double usecond_precise() +{ + using namespace std::chrono; + auto nsecs = duration_cast(GridClock::now()-Grid::theProgramStart); + return nsecs.count()*1e-3; +} + +std::vector get_mpi_hostnames() { + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); + + char hostname[MPI_MAX_PROCESSOR_NAME]; + int name_len = 0; + MPI_Get_processor_name(hostname, &name_len); + + // Allocate buffer to gather all hostnames + std::vector all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME); + + // Use MPI_Allgather to gather all hostnames on all ranks + MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, + all_hostnames.data(), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, + MPI_COMM_WORLD); + + // Convert the gathered hostnames back into a vector of std::string + std::vector hostname_list(world_size); + for (int i = 0; i < world_size; ++i) { + hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]); + } + + return hostname_list; +} + struct time_statistics { double mean; @@ -305,7 +338,7 @@ class Benchmark for (int i = -Nwarmup; i < Nloop; ++i) { - double start = usecond(); + double start = usecond_precise(); if (from == me) { auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); @@ -317,7 +350,7 @@ class Benchmark MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); assert(err == MPI_SUCCESS); } - double stop = usecond(); + double stop = usecond_precise(); if (i >= 0) t_time[i] = stop - start; } @@ -341,6 +374,98 @@ class Benchmark acceleratorFreeDevice(buf_to); } + static void P2P(void) + { + // buffer-size to benchmark. This number is the same as the largest one used in the "Comms()" benchmark. + // ( L=48, Ls=12, double-prec-complex, half-color-spin-vector. ). Mostly arbitrary choice, but nice to match it here + size_t bytes=127401984; + + int Nwarmup = 50; + int Nloop = 200; + + Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); + Coordinate mpi_layout = GridDefaultMpi(); + + std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl; + grid_small_sep(); + grid_printf("from to mean(usec) err min bytes rate (GiB/s)\n"); + + int lat = 8; // dummy lattice size. Not actually used. + Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], + lat * mpi_layout[3]}); + + GridCartesian Grid(latt_size, simd_layout, mpi_layout); + + int ranks; + int me; + MPI_Comm_size(Grid.communicator, &ranks); + MPI_Comm_rank(Grid.communicator, &me); + assert(ranks == Grid._Nprocessors); + assert(me == Grid._processor); + + void *buf_from = acceleratorAllocDevice(bytes); + void *buf_to = acceleratorAllocDevice(bytes); + nlohmann::json json_p2p; + for (int from = 0; from < ranks; ++from) + for (int to = 0; to < ranks; ++to) + { + if (from == to) + continue; + + std::vector t_time(Nloop); + time_statistics timestat; + MPI_Status status; + + for (int i = -Nwarmup; i < Nloop; ++i) + { + double start = usecond_precise(); + if (from == me) + { + auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); + assert(err == MPI_SUCCESS); + } + if (to == me) + { + auto err = + MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); + assert(err == MPI_SUCCESS); + } + double stop = usecond_precise(); + if (i >= 0) + t_time[i] = stop - start; + } + // important: only 'from' and 'to' have meaningful timings. we use 'from's. + MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); + + timestat.statistics(t_time); + double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.; + double rate_err = rate * timestat.err / timestat.mean; + double rate_max = rate * timestat.mean / timestat.min; + + grid_printf("%2d %2d %15.4f %15.3f %15.4f %15d %15.2f\n", from, to, timestat.mean, + timestat.err, timestat.min, bytes, rate); + + nlohmann::json tmp; + tmp["from"] = from; + tmp["to"] = to; + tmp["bytes"] = bytes; + tmp["time_usec"] = timestat.mean; + tmp["time_usec_error"] = timestat.err; + tmp["time_usec_max"] = timestat.min; + nlohmann::json tmp_rate; + tmp_rate["mean"] = rate; + tmp_rate["error"] = rate_err; + tmp_rate["max"] = rate_max; + tmp["rate_GBps"] = tmp_rate; + + json_p2p.push_back(tmp); + } + json_results["p2p"] = json_p2p; + + acceleratorFreeDevice(buf_from); + acceleratorFreeDevice(buf_to); + } + static void Memory(void) { const int Nvec = 8; @@ -883,6 +1008,7 @@ int main(int argc, char **argv) int do_memory = 1; int do_comms = 1; int do_latency = 1; + int do_p2p = 1; int do_flops = 1; int Ls = 1; @@ -926,6 +1052,14 @@ int main(int argc, char **argv) Benchmark::Latency(); } + if(do_p2p) + { + grid_big_sep(); + std::cout << GridLogMessage << " Point-To-Point benchmark " << std::endl; + grid_big_sep(); + Benchmark::P2P(); + } + if (do_flops) { Ls = 1; @@ -985,6 +1119,8 @@ int main(int argc, char **argv) json_results["flops"] = tmp_flops; } + json_results["hostnames"] = get_mpi_hostnames(); + if (!json_filename.empty()) { std::cout << GridLogMessage << "writing benchmark results to " << json_filename From 19c9dcb6ae99810c359f62bd0b9d0e5bd73b28bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Thu, 10 Oct 2024 11:40:44 +0100 Subject: [PATCH 7/9] fix order of ranks in latency/p2p --- Grid/Benchmark_Grid.cpp | 113 +++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 60 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index ce51109..3f6aa99 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -29,37 +29,41 @@ int NN_global; nlohmann::json json_results; -// NOTE: Grid::GridClock is just a typedef to `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to microseconds (no idea why), so we need our own wrapper here. +// NOTE: Grid::GridClock is just a typedef to +// `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to +// microseconds (no idea why, probably wasnt ever relevant before), so we need +// our own wrapper here. double usecond_precise() { using namespace std::chrono; - auto nsecs = duration_cast(GridClock::now()-Grid::theProgramStart); - return nsecs.count()*1e-3; + auto nsecs = duration_cast(GridClock::now() - Grid::theProgramStart); + return nsecs.count() * 1e-3; } -std::vector get_mpi_hostnames() { - int world_size; - MPI_Comm_size(MPI_COMM_WORLD, &world_size); +std::vector get_mpi_hostnames() +{ + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); - char hostname[MPI_MAX_PROCESSOR_NAME]; - int name_len = 0; - MPI_Get_processor_name(hostname, &name_len); + char hostname[MPI_MAX_PROCESSOR_NAME]; + int name_len = 0; + MPI_Get_processor_name(hostname, &name_len); - // Allocate buffer to gather all hostnames - std::vector all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME); + // Allocate buffer to gather all hostnames + std::vector all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME); - // Use MPI_Allgather to gather all hostnames on all ranks - MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, - all_hostnames.data(), MPI_MAX_PROCESSOR_NAME, MPI_CHAR, - MPI_COMM_WORLD); + // Use MPI_Allgather to gather all hostnames on all ranks + MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, all_hostnames.data(), + MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_COMM_WORLD); - // Convert the gathered hostnames back into a vector of std::string - std::vector hostname_list(world_size); - for (int i = 0; i < world_size; ++i) { - hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]); - } + // Convert the gathered hostnames back into a vector of std::string + std::vector hostname_list(world_size); + for (int i = 0; i < world_size; ++i) + { + hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]); + } - return hostname_list; + return hostname_list; } struct time_statistics @@ -302,25 +306,14 @@ class Benchmark int Nwarmup = 1000; int Nloop = 10000; - Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); - Coordinate mpi_layout = GridDefaultMpi(); - std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl; grid_small_sep(); grid_printf("from to mean(usec) err min\n"); - int lat = 8; // dummy lattice size. Not actually used. - Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], - lat * mpi_layout[3]}); - - GridCartesian Grid(latt_size, simd_layout, mpi_layout); - int ranks; int me; - MPI_Comm_size(Grid.communicator, &ranks); - MPI_Comm_rank(Grid.communicator, &me); - assert(ranks == Grid._Nprocessors); - assert(me == Grid._processor); + MPI_Comm_size(MPI_COMM_WORLD, &ranks); + MPI_Comm_rank(MPI_COMM_WORLD, &me); int bytes = 8; void *buf_from = acceleratorAllocDevice(bytes); @@ -341,21 +334,22 @@ class Benchmark double start = usecond_precise(); if (from == me) { - auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); + auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD); assert(err == MPI_SUCCESS); } if (to == me) { auto err = - MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); + MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status); assert(err == MPI_SUCCESS); } double stop = usecond_precise(); if (i >= 0) t_time[i] = stop - start; } - // important: only 'from' and 'to' have meaningful timings. we use 'from's. - MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); + // important: only 'from' and 'to' have meaningful timings. we use + // 'from's. + MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD); timestat.statistics(t_time); grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean, @@ -376,32 +370,30 @@ class Benchmark static void P2P(void) { - // buffer-size to benchmark. This number is the same as the largest one used in the "Comms()" benchmark. - // ( L=48, Ls=12, double-prec-complex, half-color-spin-vector. ). Mostly arbitrary choice, but nice to match it here - size_t bytes=127401984; + // IMPORTANT: The P2P benchmark uses "MPI_COMM_WORLD" communicator, which is + // not the quite the same as Grid.communicator. Practically speaking, the + // latter one contains the same MPI-ranks but in a different order. Grid + // does this make sure it can exploit ranks with shared memory (i.e. + // multiple ranks on the same node) as best as possible. + + // buffer-size to benchmark. This number is the same as the largest one used + // in the "Comms()" benchmark. ( L=48, Ls=12, double-prec-complex, + // half-color-spin-vector. ). Mostly an arbitrary choice, but nice to match + // it here + size_t bytes = 127401984; int Nwarmup = 50; int Nloop = 200; - Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd()); - Coordinate mpi_layout = GridDefaultMpi(); - std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl; grid_small_sep(); - grid_printf("from to mean(usec) err min bytes rate (GiB/s)\n"); - - int lat = 8; // dummy lattice size. Not actually used. - Coordinate latt_size({lat * mpi_layout[0], lat * mpi_layout[1], lat * mpi_layout[2], - lat * mpi_layout[3]}); - - GridCartesian Grid(latt_size, simd_layout, mpi_layout); + grid_printf("from to mean(usec) err min " + "bytes rate (GiB/s)\n"); int ranks; int me; - MPI_Comm_size(Grid.communicator, &ranks); - MPI_Comm_rank(Grid.communicator, &me); - assert(ranks == Grid._Nprocessors); - assert(me == Grid._processor); + MPI_Comm_size(MPI_COMM_WORLD, &ranks); + MPI_Comm_rank(MPI_COMM_WORLD, &me); void *buf_from = acceleratorAllocDevice(bytes); void *buf_to = acceleratorAllocDevice(bytes); @@ -421,21 +413,22 @@ class Benchmark double start = usecond_precise(); if (from == me) { - auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, Grid.communicator); + auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD); assert(err == MPI_SUCCESS); } if (to == me) { auto err = - MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, Grid.communicator, &status); + MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status); assert(err == MPI_SUCCESS); } double stop = usecond_precise(); if (i >= 0) t_time[i] = stop - start; } - // important: only 'from' and 'to' have meaningful timings. we use 'from's. - MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, Grid.communicator); + // important: only 'from' and 'to' have meaningful timings. we use + // 'from's. + MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD); timestat.statistics(t_time); double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.; @@ -1052,7 +1045,7 @@ int main(int argc, char **argv) Benchmark::Latency(); } - if(do_p2p) + if (do_p2p) { grid_big_sep(); std::cout << GridLogMessage << " Point-To-Point benchmark " << std::endl; From a7e1d9e67f2ce9709a1df56775c08e3b8985c4ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Fri, 11 Oct 2024 18:27:00 +0100 Subject: [PATCH 8/9] lower loop counts a bit for p2p/latency --- Grid/Benchmark_Grid.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index 3f6aa99..8ec0841 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -303,8 +303,8 @@ class Benchmark static void Latency(void) { - int Nwarmup = 1000; - int Nloop = 10000; + int Nwarmup = 500; + int Nloop = 5000; std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl; grid_small_sep(); @@ -382,8 +382,8 @@ class Benchmark // it here size_t bytes = 127401984; - int Nwarmup = 50; - int Nloop = 200; + int Nwarmup = 20; + int Nloop = 100; std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl; grid_small_sep(); From f81cb198ab2bef071aa4e34ac4479e938cc50e37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Simon=20B=C3=BCrger?= Date: Mon, 18 Nov 2024 23:50:45 +0000 Subject: [PATCH 9/9] add command line options to Benchmark_Grid --- Grid/Benchmark_Grid.cpp | 66 +++++++++++++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/Grid/Benchmark_Grid.cpp b/Grid/Benchmark_Grid.cpp index 8ec0841..6859d0c 100644 --- a/Grid/Benchmark_Grid.cpp +++ b/Grid/Benchmark_Grid.cpp @@ -1,7 +1,7 @@ /* Copyright © 2015 Peter Boyle Copyright © 2022 Antonin Portelli -Copyright © 2022 Simon Buerger +Copyright © 2024 Simon Buerger This is a fork of Benchmark_ITT.cpp from Grid @@ -303,12 +303,12 @@ class Benchmark static void Latency(void) { - int Nwarmup = 500; - int Nloop = 5000; + int Nwarmup = 100; + int Nloop = 300; std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl; grid_small_sep(); - grid_printf("from to mean(usec) err min\n"); + grid_printf("from to mean(usec) err max\n"); int ranks; int me; @@ -353,13 +353,15 @@ class Benchmark timestat.statistics(t_time); grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean, - timestat.err, timestat.min); + timestat.err, timestat.max); nlohmann::json tmp; tmp["from"] = from; tmp["to"] = to; tmp["time_usec"] = timestat.mean; tmp["time_usec_error"] = timestat.err; - tmp["time_usec_max"] = timestat.min; + tmp["time_usec_min"] = timestat.min; + tmp["time_usec_max"] = timestat.max; + tmp["time_usec_full"] = t_time; json_latency.push_back(tmp); } json_results["latency"] = json_latency; @@ -434,6 +436,7 @@ class Benchmark double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.; double rate_err = rate * timestat.err / timestat.mean; double rate_max = rate * timestat.mean / timestat.min; + double rate_min = rate * timestat.mean / timestat.max; grid_printf("%2d %2d %15.4f %15.3f %15.4f %15d %15.2f\n", from, to, timestat.mean, timestat.err, timestat.min, bytes, rate); @@ -444,11 +447,14 @@ class Benchmark tmp["bytes"] = bytes; tmp["time_usec"] = timestat.mean; tmp["time_usec_error"] = timestat.err; - tmp["time_usec_max"] = timestat.min; + tmp["time_usec_min"] = timestat.min; + tmp["time_usec_max"] = timestat.max; + tmp["time_usec_full"] = t_time; nlohmann::json tmp_rate; tmp_rate["mean"] = rate; tmp_rate["error"] = rate_err; tmp_rate["max"] = rate_max; + tmp_rate["min"] = rate_min; tmp["rate_GBps"] = tmp_rate; json_p2p.push_back(tmp); @@ -981,11 +987,47 @@ int main(int argc, char **argv) { Grid_init(&argc, &argv); + int Ls = 1; + bool do_su4 = true; + bool do_memory = true; + bool do_comms = true; + bool do_flops = true; + + // NOTE: these two take O((number of ranks)^2) time, which might be a lot, so they are + // off by default + bool do_latency = false; + bool do_p2p = false; + std::string json_filename = ""; // empty indicates no json output for (int i = 0; i < argc; i++) { - if (std::string(argv[i]) == "--json-out") + auto arg = std::string(argv[i]); + if (arg == "--json-out") json_filename = argv[i + 1]; + if (arg == "--benchmark-su4") + do_su4 = true; + if (arg == "--benchmark-memory") + do_memory = true; + if (arg == "--benchmark-comms") + do_comms = true; + if (arg == "--benchmark-flops") + do_flops = true; + if (arg == "--benchmark-latency") + do_latency = true; + if (arg == "--benchmark-p2p") + do_p2p = true; + if (arg == "--no-benchmark-su4") + do_su4 = false; + if (arg == "--no-benchmark-memory") + do_memory = false; + if (arg == "--no-benchmark-comms") + do_comms = false; + if (arg == "--no-benchmark-flops") + do_flops = false; + if (arg == "--no-benchmark-latency") + do_latency = false; + if (arg == "--no-benchmark-p2p") + do_p2p = false; } CartesianCommunicator::SetCommunicatorPolicy( @@ -997,14 +1039,6 @@ int main(int argc, char **argv) #endif Benchmark::Decomposition(); - int do_su4 = 1; - int do_memory = 1; - int do_comms = 1; - int do_latency = 1; - int do_p2p = 1; - int do_flops = 1; - int Ls = 1; - int sel = 4; std::vector L_list({8, 12, 16, 24, 32}); int selm1 = sel - 1;