Compare commits

..

33 Commits

Author SHA1 Message Date
fb43d16830 log iso-timestamp instead of seconds-since-start 2024-11-25 15:45:59 +00:00
6fa2e6bcd0 fix bug that made benchmark_quda hang randomly 2024-11-25 15:45:59 +00:00
fb4c456776 choose iteration count automatically 2024-11-25 15:45:58 +00:00
3fbb8ea346 add timestamps to benchmarks 2024-11-25 15:45:58 +00:00
86b160cb5c add json output to Benchmark_Quda 2024-11-25 15:45:58 +00:00
dc411017bb Update 'Quda/Readme.md' 2024-11-25 15:45:58 +00:00
b2cc780690 add Readme.md to Quda benchmark 2024-11-25 15:45:58 +00:00
6d87396576 clean up build script a bit 2024-11-25 15:45:58 +00:00
e9d084ce09 better range of lattice sizes 2024-11-25 15:45:58 +00:00
32e301fc67 add DWF benchmark 2024-11-25 15:45:58 +00:00
eaa4feee43 benchmark Dslash(...) instead of full M(...) 2024-11-25 15:45:58 +00:00
025f9dab50 fix scaling conventions for multi-gpu 2024-11-25 15:45:57 +00:00
3a561091d9 tidy up the wilson benchmark and add environment script 2024-11-25 15:45:57 +00:00
191c0cfca5 add quda axpy/memory benchmark 2024-11-25 15:45:57 +00:00
6f9af8acad first draft of Quda Benchmark 2024-11-25 15:45:57 +00:00
371a329457 Merge pull request 'Point-to-Point latency' (#7) from simon.buerger/lattice-benchmarks:latency_benchmark into main
Reviewed-on: #7
Reviewed-by: Antonin Portelli <antonin.portelli@me.com>
2024-11-19 10:37:26 +00:00
f81cb198ab add command line options to Benchmark_Grid 2024-11-18 23:50:45 +00:00
a7e1d9e67f lower loop counts a bit for p2p/latency 2024-10-11 18:27:00 +01:00
19c9dcb6ae fix order of ranks in latency/p2p 2024-10-10 11:40:44 +01:00
7d89380b80 point-to-point bandwith benchmark 2024-10-07 17:22:26 +01:00
4cd67805b9 make Latency benchmark proper one-way and increase statistics 2024-09-26 09:31:22 +01:00
f7e607eae4 proper warmup loop for latency 2024-05-09 23:33:04 +01:00
a267986800 naming consitency 2024-05-09 23:25:06 +01:00
a1ec08cdb3 point-to-point latency 2024-05-09 23:17:54 +01:00
fb6c79d9ca shm direction fix 2024-03-21 13:51:49 +09:00
d7647afa72 Merge remote-tracking branch 'upstream/main' into main 2024-01-23 10:10:52 +00:00
ba00493c7d Merge pull request 'fix incompatibility with latest Grid' (#5) from simon.buerger/lattice-benchmarks:main into main
Reviewed-on: #5
2023-12-20 15:06:41 +00:00
6055e0503c simple latency benchmark 2023-12-20 13:43:51 +00:00
6ea093fc80 fix incompatibility with latest Grid 2023-12-18 16:48:19 +00:00
fa47ec5bbe Merge pull request 'refactor and repair the spack environment' (#4) from simon.buerger/lattice-benchmarks:fix_spack_environment into main
Reviewed-on: #4
2023-07-05 15:11:40 +01:00
7235bfde4c refactor and repair the spack environment 2023-07-04 22:30:54 +01:00
e5c61c2db1 Merge pull request 'add indication of shared-memory directions in comms benchmark' (#2) from simon.buerger/lattice-benchmarks:feature/grid-shared-mem into main
Reviewed-on: #2
2023-04-12 15:05:39 +01:00
80c80049d7 add indication of shared-memory directions in comms benchmark 2023-04-12 11:40:39 +01:00
2 changed files with 302 additions and 51 deletions

View File

@ -1,7 +1,7 @@
/*
Copyright © 2015 Peter Boyle <paboyle@ph.ed.ac.uk>
Copyright © 2022 Antonin Portelli <antonin.portelli@me.com>
Copyright © 2022 Simon Buerger <simon.buerger@rwth-aachen.de>
Copyright © 2024 Simon Buerger <simon.buerger@rwth-aachen.de>
This is a fork of Benchmark_ITT.cpp from Grid
@ -29,6 +29,43 @@ int NN_global;
nlohmann::json json_results;
// NOTE: Grid::GridClock is just a typedef to
// `std::chrono::high_resolution_clock`, but `Grid::usecond` rounds to
// microseconds (no idea why, probably wasnt ever relevant before), so we need
// our own wrapper here.
double usecond_precise()
{
using namespace std::chrono;
auto nsecs = duration_cast<nanoseconds>(GridClock::now() - Grid::theProgramStart);
return nsecs.count() * 1e-3;
}
std::vector<std::string> get_mpi_hostnames()
{
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
char hostname[MPI_MAX_PROCESSOR_NAME];
int name_len = 0;
MPI_Get_processor_name(hostname, &name_len);
// Allocate buffer to gather all hostnames
std::vector<char> all_hostnames(world_size * MPI_MAX_PROCESSOR_NAME);
// Use MPI_Allgather to gather all hostnames on all ranks
MPI_Allgather(hostname, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, all_hostnames.data(),
MPI_MAX_PROCESSOR_NAME, MPI_CHAR, MPI_COMM_WORLD);
// Convert the gathered hostnames back into a vector of std::string
std::vector<std::string> hostname_list(world_size);
for (int i = 0; i < world_size; ++i)
{
hostname_list[i] = std::string(&all_hostnames[i * MPI_MAX_PROCESSOR_NAME]);
}
return hostname_list;
}
struct time_statistics
{
double mean;
@ -73,7 +110,7 @@ class Benchmark
{local[0] * mpi[0], local[1] * mpi[1], local[2] * mpi[2], local[3] * mpi[3]});
GridCartesian *TmpGrid = SpaceTimeGrid::makeFourDimGrid(
latt4, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
Grid::Coordinate shm;
Grid::Coordinate shm(4, 1);
GlobalSharedMemory::GetShmDims(mpi, shm);
uint64_t NP = TmpGrid->RankCount();
@ -137,7 +174,7 @@ class Benchmark
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
Coordinate shm_layout;
Coordinate shm_layout(Nd, 1);
GlobalSharedMemory::GetShmDims(mpi_layout, shm_layout);
for (int mu = 0; mu < Nd; mu++)
@ -264,6 +301,170 @@ class Benchmark
return;
}
static void Latency(void)
{
int Nwarmup = 100;
int Nloop = 300;
std::cout << GridLogMessage << "Benchmarking point-to-point latency" << std::endl;
grid_small_sep();
grid_printf("from to mean(usec) err max\n");
int ranks;
int me;
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
int bytes = 8;
void *buf_from = acceleratorAllocDevice(bytes);
void *buf_to = acceleratorAllocDevice(bytes);
nlohmann::json json_latency;
for (int from = 0; from < ranks; ++from)
for (int to = 0; to < ranks; ++to)
{
if (from == to)
continue;
std::vector<double> t_time(Nloop);
time_statistics timestat;
MPI_Status status;
for (int i = -Nwarmup; i < Nloop; ++i)
{
double start = usecond_precise();
if (from == me)
{
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
assert(err == MPI_SUCCESS);
}
if (to == me)
{
auto err =
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
assert(err == MPI_SUCCESS);
}
double stop = usecond_precise();
if (i >= 0)
t_time[i] = stop - start;
}
// important: only 'from' and 'to' have meaningful timings. we use
// 'from's.
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
timestat.statistics(t_time);
grid_printf("%2d %2d %15.4f %15.3f %15.4f\n", from, to, timestat.mean,
timestat.err, timestat.max);
nlohmann::json tmp;
tmp["from"] = from;
tmp["to"] = to;
tmp["time_usec"] = timestat.mean;
tmp["time_usec_error"] = timestat.err;
tmp["time_usec_min"] = timestat.min;
tmp["time_usec_max"] = timestat.max;
tmp["time_usec_full"] = t_time;
json_latency.push_back(tmp);
}
json_results["latency"] = json_latency;
acceleratorFreeDevice(buf_from);
acceleratorFreeDevice(buf_to);
}
static void P2P(void)
{
// IMPORTANT: The P2P benchmark uses "MPI_COMM_WORLD" communicator, which is
// not the quite the same as Grid.communicator. Practically speaking, the
// latter one contains the same MPI-ranks but in a different order. Grid
// does this make sure it can exploit ranks with shared memory (i.e.
// multiple ranks on the same node) as best as possible.
// buffer-size to benchmark. This number is the same as the largest one used
// in the "Comms()" benchmark. ( L=48, Ls=12, double-prec-complex,
// half-color-spin-vector. ). Mostly an arbitrary choice, but nice to match
// it here
size_t bytes = 127401984;
int Nwarmup = 20;
int Nloop = 100;
std::cout << GridLogMessage << "Benchmarking point-to-point bandwidth" << std::endl;
grid_small_sep();
grid_printf("from to mean(usec) err min "
"bytes rate (GiB/s)\n");
int ranks;
int me;
MPI_Comm_size(MPI_COMM_WORLD, &ranks);
MPI_Comm_rank(MPI_COMM_WORLD, &me);
void *buf_from = acceleratorAllocDevice(bytes);
void *buf_to = acceleratorAllocDevice(bytes);
nlohmann::json json_p2p;
for (int from = 0; from < ranks; ++from)
for (int to = 0; to < ranks; ++to)
{
if (from == to)
continue;
std::vector<double> t_time(Nloop);
time_statistics timestat;
MPI_Status status;
for (int i = -Nwarmup; i < Nloop; ++i)
{
double start = usecond_precise();
if (from == me)
{
auto err = MPI_Send(buf_from, bytes, MPI_CHAR, to, 0, MPI_COMM_WORLD);
assert(err == MPI_SUCCESS);
}
if (to == me)
{
auto err =
MPI_Recv(buf_to, bytes, MPI_CHAR, from, 0, MPI_COMM_WORLD, &status);
assert(err == MPI_SUCCESS);
}
double stop = usecond_precise();
if (i >= 0)
t_time[i] = stop - start;
}
// important: only 'from' and 'to' have meaningful timings. we use
// 'from's.
MPI_Bcast(t_time.data(), Nloop, MPI_DOUBLE, from, MPI_COMM_WORLD);
timestat.statistics(t_time);
double rate = bytes / (timestat.mean / 1.e6) / 1024. / 1024. / 1024.;
double rate_err = rate * timestat.err / timestat.mean;
double rate_max = rate * timestat.mean / timestat.min;
double rate_min = rate * timestat.mean / timestat.max;
grid_printf("%2d %2d %15.4f %15.3f %15.4f %15d %15.2f\n", from, to, timestat.mean,
timestat.err, timestat.min, bytes, rate);
nlohmann::json tmp;
tmp["from"] = from;
tmp["to"] = to;
tmp["bytes"] = bytes;
tmp["time_usec"] = timestat.mean;
tmp["time_usec_error"] = timestat.err;
tmp["time_usec_min"] = timestat.min;
tmp["time_usec_max"] = timestat.max;
tmp["time_usec_full"] = t_time;
nlohmann::json tmp_rate;
tmp_rate["mean"] = rate;
tmp_rate["error"] = rate_err;
tmp_rate["max"] = rate_max;
tmp_rate["min"] = rate_min;
tmp["rate_GBps"] = tmp_rate;
json_p2p.push_back(tmp);
}
json_results["p2p"] = json_p2p;
acceleratorFreeDevice(buf_from);
acceleratorFreeDevice(buf_to);
}
static void Memory(void)
{
const int Nvec = 8;
@ -525,8 +726,6 @@ class Benchmark
FGrid->Broadcast(0, &ncall, sizeof(ncall));
Dw.ZeroCounters();
time_statistics timestat;
std::vector<double> t_time(ncall);
for (uint64_t i = 0; i < ncall; i++)
@ -721,7 +920,6 @@ class Benchmark
uint64_t ncall = 500;
FGrid->Broadcast(0, &ncall, sizeof(ncall));
Ds.ZeroCounters();
time_statistics timestat;
std::vector<double> t_time(ncall);
@ -789,11 +987,47 @@ int main(int argc, char **argv)
{
Grid_init(&argc, &argv);
int Ls = 1;
bool do_su4 = true;
bool do_memory = true;
bool do_comms = true;
bool do_flops = true;
// NOTE: these two take O((number of ranks)^2) time, which might be a lot, so they are
// off by default
bool do_latency = false;
bool do_p2p = false;
std::string json_filename = ""; // empty indicates no json output
for (int i = 0; i < argc; i++)
{
if (std::string(argv[i]) == "--json-out")
auto arg = std::string(argv[i]);
if (arg == "--json-out")
json_filename = argv[i + 1];
if (arg == "--benchmark-su4")
do_su4 = true;
if (arg == "--benchmark-memory")
do_memory = true;
if (arg == "--benchmark-comms")
do_comms = true;
if (arg == "--benchmark-flops")
do_flops = true;
if (arg == "--benchmark-latency")
do_latency = true;
if (arg == "--benchmark-p2p")
do_p2p = true;
if (arg == "--no-benchmark-su4")
do_su4 = false;
if (arg == "--no-benchmark-memory")
do_memory = false;
if (arg == "--no-benchmark-comms")
do_comms = false;
if (arg == "--no-benchmark-flops")
do_flops = false;
if (arg == "--no-benchmark-latency")
do_latency = false;
if (arg == "--no-benchmark-p2p")
do_p2p = false;
}
CartesianCommunicator::SetCommunicatorPolicy(
@ -805,12 +1039,6 @@ int main(int argc, char **argv)
#endif
Benchmark::Decomposition();
int do_su4 = 1;
int do_memory = 1;
int do_comms = 1;
int do_flops = 1;
int Ls = 1;
int sel = 4;
std::vector<int> L_list({8, 12, 16, 24, 32});
int selm1 = sel - 1;
@ -843,6 +1071,22 @@ int main(int argc, char **argv)
Benchmark::Comms();
}
if (do_latency)
{
grid_big_sep();
std::cout << GridLogMessage << " Latency benchmark " << std::endl;
grid_big_sep();
Benchmark::Latency();
}
if (do_p2p)
{
grid_big_sep();
std::cout << GridLogMessage << " Point-To-Point benchmark " << std::endl;
grid_big_sep();
Benchmark::P2P();
}
if (do_flops)
{
Ls = 1;
@ -902,6 +1146,8 @@ int main(int argc, char **argv)
json_results["flops"] = tmp_flops;
}
json_results["hostnames"] = get_mpi_hostnames();
if (!json_filename.empty())
{
std::cout << GridLogMessage << "writing benchmark results to " << json_filename

View File

@ -4,7 +4,13 @@ set -euo pipefail
gcc_spec='gcc@9.4.0'
cuda_spec='cuda@11.4.0'
hdf5_spec='hdf5@1.10.7'
# hdf5 and fftw depend on OpenMPI, which we install manually. To make sure this
# dependency is picked by spack, we specify the compiler here explicitly. For
# most other packages we dont really care about the compiler (i.e. system
# compiler versus ${gcc_spec})
hdf5_spec="hdf5@1.10.7+cxx+threadsafe%${gcc_spec}"
fftw_spec="fftw%${gcc_spec}"
if (( $# != 1 )); then
echo "usage: $(basename "$0") <env dir>" 1>&2
@ -18,7 +24,7 @@ cd "${cwd}"
# General configuration ########################################################
# build with 128 tasks
echo 'config:
echo 'config:
build_jobs: 128
build_stage:
- $spack/var/spack/stage
@ -38,26 +44,23 @@ rm external.yaml
# Base compilers ###############################################################
# configure system base
spack env create base
spack env activate base
spack compiler find --scope site
# install GCC, CUDA & LLVM
spack install ${gcc_spec} ${cuda_spec} llvm
spack load llvm
# install GCC, CUDA
spack add ${gcc_spec} ${cuda_spec}
spack concretize
spack env depfile -o Makefile.tmp
make -j128 -f Makefile.tmp
spack compiler find --scope site
spack unload llvm
spack load ${gcc_spec}
spack compiler find --scope site
spack unload ${gcc_spec}
# Manual compilation of OpenMPI & UCX ##########################################
# set build directories
mkdir -p "${dir}"/build
cd "${dir}"/build
spack load ${gcc_spec} ${cuda_spec}
cuda_path=$(spack find --format "{prefix}" cuda)
gdrcopy_path=/mnt/lustre/tursafs1/apps/gdrcopy/2.3.1
@ -124,8 +127,8 @@ mkdir build_gpu; cd build_gpu
--with-cuda="${cuda_path}" --disable-getpwuid \
--with-verbs --with-slurm --enable-mpi-fortran=all \
--with-pmix=internal --with-libevent=internal
make -j 128
make install
make -j 128
make install
cd ..
# openmpi cpu build
@ -141,60 +144,62 @@ make -j 128
make install
cd "${dir}"
ucx_spec_gpu="ucx@1.12.0.GPU%${gcc_spec}"
ucx_spec_cpu="ucx@1.12.0.CPU%${gcc_spec}"
openmpi_spec_gpu="openmpi@4.1.1.GPU%${gcc_spec}"
openmpi_spec_cpu="openmpi@4.1.1.CPU%${gcc_spec}"
# Add externals to spack
echo "packages:
ucx:
externals:
- spec: \"ucx@1.12.0.GPU%gcc@9.4.0\"
- spec: \"${ucx_spec_gpu}\"
prefix: ${dir}/prefix/ucx_gpu
- spec: \"ucx@1.12.0.CPU%gcc@9.4.0\"
- spec: \"${ucx_spec_cpu}\"
prefix: ${dir}/prefix/ucx_cpu
buildable: False
openmpi:
externals:
- spec: \"openmpi@4.1.1.GPU%gcc@9.4.0\"
- spec: \"${openmpi_spec_gpu}\"
prefix: ${dir}/prefix/ompi_gpu
- spec: \"openmpi@4.1.1.CPU%gcc@9.4.0\"
- spec: \"${openmpi_spec_cpu}\"
prefix: ${dir}/prefix/ompi_cpu
buildable: False" > spack.yaml
spack config --scope site add -f spack.yaml
rm spack.yaml
spack install ucx@1.12.0.GPU%gcc@9.4.0 openmpi@4.1.1.GPU%gcc@9.4.0
spack install ucx@1.12.0.CPU%gcc@9.4.0 openmpi@4.1.1.CPU%gcc@9.4.0
spack env deactivate
cd "${cwd}"
# environments #################################################################
dev_tools=("autoconf" "automake" "libtool" "jq" "git")
ompi_gpu_hash=$(spack find --format "{hash}" openmpi@4.1.1.GPU)
ompi_cpu_hash=$(spack find --format "{hash}" openmpi@4.1.1.CPU)
spack env create grid-gpu
spack env activate grid-gpu
spack add ${gcc_spec} ${cuda_spec} "${dev_tools[@]}"
spack add ucx@1.12.0.GPU%gcc@9.4.0 openmpi@4.1.1.GPU%gcc@9.4.0
spack add ${hdf5_spec}+cxx+threadsafe ^/"${ompi_gpu_hash}"
spack add fftw ^/"${ompi_gpu_hash}"
spack add openssl gmp mpfr c-lime
spack install
spack compiler find --scope site
spack add ${gcc_spec} ${cuda_spec} ${ucx_spec_gpu} ${openmpi_spec_gpu}
spack add ${hdf5_spec} ${fftw_spec}
spack add openssl gmp mpfr c-lime "${dev_tools[@]}"
spack concretize
spack env depfile -o Makefile.tmp
make -j128 -f Makefile.tmp
spack env deactivate
spack env create grid-cpu
spack env activate grid-cpu
spack add llvm "${dev_tools[@]}"
spack add ucx@1.12.0.CPU%gcc@9.4.0 openmpi@4.1.1.CPU%gcc@9.4.0
spack add ${hdf5_spec}+cxx+threadsafe ^/"${ompi_cpu_hash}"
spack add fftw ^/"${ompi_cpu_hash}"
spack add openssl gmp mpfr c-lime
spack install
spack compiler find --scope site
spack add ${gcc_spec} ${ucx_spec_cpu} ${openmpi_spec_cpu}
spack add ${hdf5_spec} ${fftw_spec}
spack add openssl gmp mpfr c-lime "${dev_tools[@]}"
spack concretize
spack env depfile -o Makefile.tmp
make -j128 -f Makefile.tmp
spack env deactivate
spack install jq git
# Final setup ##################################################################
spack clean
spack gc -y
#spack gc -y # "spack gc" tends to get hung up for unknown reasons
# add more environment variables in module loading
spack config --scope site add 'modules:prefix_inspections:lib:[LD_LIBRARY_PATH,LIBRARY_PATH]'