#include #include #include #include #include #include #include #include #include #include #include #include #include // remove to use QUDA's own flop counting instead of Grid's convention #define FLOP_COUNTING_GRID #include "json.hpp" using nlohmann::json; json json_results; using namespace quda; // timestamp = seconds since program start. // these are written to the json output with the goal of later matching them against // power-measurments to determine energy efficiency. using Clock = std::chrono::steady_clock; Clock::time_point program_start_time = Clock::now(); double get_timestamp() { auto dur = Clock::now() - program_start_time; return std::chrono::duration_cast(dur).count() * 1.0e-6; } // This is the MPI grid, i.e. the layout of ranks int nranks = -1; std::array mpi_grid = {1, 1, 1, 1}; void initComms(int argc, char **argv) { // init MPI communication MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &nranks); assert(1 <= nranks && nranks <= 100000); mpi_grid[3] = nranks; // this maps coordinates to rank number auto lex_rank_from_coords = [](int const *coords, void *) { int rank = coords[0]; for (int i = 1; i < 4; i++) rank = mpi_grid[i] * rank + coords[i]; return rank; }; initCommsGridQuda(4, mpi_grid.data(), lex_rank_from_coords, nullptr); for (int d = 0; d < 4; d++) if (mpi_grid[d] > 1) commDimPartitionedSet(d); json_results["geometry"]["ranks"] = nranks; json_results["geometry"]["mpi"] = mpi_grid; } // creates a random gauge field. L = local(!) size cudaGaugeField make_gauge_field(int L) { GaugeFieldParam param; // dimension and type of the lattice object param.nDim = 4; param.x[0] = L; param.x[1] = L; param.x[2] = L; param.x[3] = L; // number of colors. potentially confusingly, QUDA sometimes uses the word "color" to // things unrelated with physical color. things like "nColor=32" do pop up in deflation // solvers where it (to my understanding) refers to the number of (parallely processed) // deflation vectors. param.nColor = 3; // boundary conditions (dont really care for benchmark) param.t_boundary = QUDA_PERIODIC_T; // for this benchmark we only need "SINGLE" and/or "DOUBLE" precision. But smaller // precisions are available in QUDA too param.setPrecision(QUDA_SINGLE_PRECISION); // no even/odd subset, we want a full lattice param.siteSubset = QUDA_FULL_SITE_SUBSET; // what kind of 3x3 matrices the field contains. A proper gauge field has SU(3) // matrices, but (for example) smeared/thick links could have non-unitary links. param.link_type = QUDA_SU3_LINKS; // "NULL" does not initialize the field upon creation, "ZERO" would set everything to 0 param.create = QUDA_NULL_FIELD_CREATE; // field should be allocated directly on the accelerator/GPU param.location = QUDA_CUDA_FIELD_LOCATION; // "reconstruct" here means reconstructing a SU(3) matrix from fewer than 18 real // numbers (=3x3 complex numbers). Great feature in production (saving // memory/cache/network bandwidth), not used for this benchmark. param.reconstruct = QUDA_RECONSTRUCT_NO; // "ghostExchange" would often be called "halo exchange" outside of Quda. This has // nothing to do with ghost fields from continuum/perturbative qcd. param.ghostExchange = QUDA_GHOST_EXCHANGE_NO; // This controls the physical order of elements. "float2" is the the default param.order = QUDA_FLOAT2_GAUGE_ORDER; // this means the field is a LORENTZ vector (which a gauge field must be). Has nothing // to do with spin. param.geometry = QUDA_VECTOR_GEOMETRY; // create the field and fill with random SU(3) matrices // std::cout << param << std::endl; // double-check parameters auto U = cudaGaugeField(param); gaugeGauss(U, /*seed=*/1234, 1.0); return U; } // create a random source vector (L = local size) ColorSpinorField make_source(int L, int Ls = 1) { // NOTE: `param.x` directly determines the size of the (local, per rank) memory // allocation. Thus for checkerboarding, we have to specifly x=(L/2,L,L,L) to get a // physical local volume of L^4, thus implicity choosing a dimension for the // checkerboarding (shouldnt really matter of course which one). ColorSpinorParam param; param.nColor = 3; param.nSpin = 4; param.nVec = 1; // only a single vector param.pad = 0; param.siteSubset = QUDA_PARITY_SITE_SUBSET; param.nDim = Ls == 1 ? 4 : 5; param.x[0] = L / 2; param.x[1] = L; param.x[2] = L; param.x[3] = L; param.x[4] = Ls; param.pc_type = QUDA_4D_PC; param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER; // somewhat surprisingly, the DiracWilson::Dslash(...) function only works with the // UKQCD_GAMMA_BASIS param.gammaBasis = QUDA_UKQCD_GAMMA_BASIS; param.create = QUDA_NULL_FIELD_CREATE; // do not (zero-) initilize the field param.setPrecision(QUDA_SINGLE_PRECISION); param.location = QUDA_CUDA_FIELD_LOCATION; // create the field and fill it with random values auto src = ColorSpinorField(param); quda::RNG rng(src, 1234); spinorNoise(src, rng, QUDA_NOISE_GAUSS); /*printfQuda( "created src with norm = %f (sanity check: should be close to %f) and %f bytes\n", blas::norm2(src), 2.0 * 12 * geom[0] * geom[1] * geom[2] * geom[3], src.Bytes() * 1.0);*/ // src.PrintDims(); return src; } void benchmark_wilson(std::vector const &L_list, int niter) { int niter_warmup = 10; printfQuda("==================== wilson dirac operator ====================\n"); #ifdef FLOP_COUNTING_GRID printfQuda("IMPORTANT: flop counting as in Benchmark_Grid\n"); #else printfQuda("IMPORTANT: flop counting by QUDA's own convention (different from " "Benchmark_Grid)\n"); #endif printfQuda("%5s %15s %15s\n", "L", "time (usec)", "Gflop/s/rank"); for (int L : L_list) { auto U = make_gauge_field(L); auto src = make_source(L); // create (Wilson) dirac operator DiracParam param; param.kappa = 0.10; param.dagger = QUDA_DAG_NO; param.matpcType = QUDA_MATPC_EVEN_EVEN; auto dirac = DiracWilson(param); // insert gauge field into the dirac operator // (the additional nullptr's are for smeared links and fancy preconditioners and such. // Not used for simple Wilson fermions) dirac.updateFields(&U, nullptr, nullptr, nullptr); auto res = ColorSpinorField(ColorSpinorParam(src)); // couple iterations without timing to warm up for (int iter = 0; iter < niter_warmup; ++iter) dirac.Dslash(res, src, QUDA_EVEN_PARITY); // actual benchmark with timings dirac.Flops(); // reset flops counter device_timer_t device_timer; device_timer.start(); double start_time = get_timestamp(); for (int iter = 0; iter < niter; ++iter) dirac.Dslash(res, src, QUDA_EVEN_PARITY); double end_time = get_timestamp(); device_timer.stop(); double secs = device_timer.last() / niter; #ifdef FLOP_COUNTING_GRID // this is the flop counting from Benchmark_Grid double Nc = 3; double Nd = 4; double Ns = 4; double flops = (Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2); flops *= L * L * L * L / 2.0; #else double flops = 1.0 * dirac.Flops() / niter; #endif printfQuda("%5d %15.2f %15.2f\n", L, secs * 1e6, flops / secs * 1e-9); json tmp; tmp["L"] = L; tmp["Gflops_wilson"] = flops / secs * 1e-9; tmp["start_time"] = start_time; tmp["end_time"] = end_time; json_results["flops"]["results"].push_back(tmp); } } void benchmark_dwf(std::vector const &L_list, int niter) { int niter_warmup = 10; printfQuda("==================== domain wall dirac operator ====================\n"); #ifdef FLOP_COUNTING_GRID printfQuda("IMPORTANT: flop counting as in Benchmark_Grid\n"); #else printfQuda("IMPORTANT: flop counting by QUDA's own convention (different from " "Benchmark_Grid)\n"); #endif printfQuda("%5s %15s %15s\n", "L", "time (usec)", "Gflop/s/rank"); int Ls = 12; for (int L : L_list) { auto U = make_gauge_field(L); auto src = make_source(L, Ls); // create dirac operator DiracParam param; param.kappa = 0.10; param.Ls = Ls; param.m5 = 0.1; param.dagger = QUDA_DAG_NO; param.matpcType = QUDA_MATPC_EVEN_EVEN; auto dirac = DiracDomainWall(param); // insert gauge field into the dirac operator // (the additional nullptr's are for smeared links and fancy preconditioners and such) dirac.updateFields(&U, nullptr, nullptr, nullptr); auto res = ColorSpinorField(ColorSpinorParam(src)); // couple iterations without timing to warm up for (int iter = 0; iter < niter_warmup; ++iter) dirac.Dslash(res, src, QUDA_EVEN_PARITY); // actual benchmark with timings dirac.Flops(); // reset flops counter device_timer_t device_timer; device_timer.start(); double start_time = get_timestamp(); for (int iter = 0; iter < niter; ++iter) dirac.Dslash(res, src, QUDA_EVEN_PARITY); double end_time = get_timestamp(); device_timer.stop(); double secs = device_timer.last() / niter; #ifdef FLOP_COUNTING_GRID // this is the flop counting from Benchmark_Grid double Nc = 3; double Nd = 4; double Ns = 4; double flops = (Nc * (6 + (Nc - 1) * 8) * Ns * Nd + 2 * Nd * Nc * Ns + 2 * Nd * Nc * Ns * 2); flops *= L * L * L * L * Ls / 2.0; #else double flops = 1.0 * dirac.Flops() / niter; #endif printfQuda("%5d %15.2f %15.2f\n", L, secs * 1e6, flops / secs * 1e-9); json tmp; tmp["L"] = L; tmp["Gflops_dwf4"] = flops / secs * 1e-9; tmp["start_time"] = start_time; tmp["end_time"] = end_time; json_results["flops"]["results"].push_back(tmp); } } void benchmark_axpy(std::vector const &L_list, int niter) { // number of iterations for warmup / measurement // (feel free to change for noise/time tradeoff) constexpr int niter_warmup = 10; printfQuda("==================== axpy / memory ====================\n"); ColorSpinorParam param; param.nDim = 4; // 4-dimensional lattice param.x[4] = 1; // no fifth dimension param.nColor = 3; // supported values for nSpin/nColor are configured when compiling // QUDA. "3*4" will probably always be enabled, so we stick with this param.nSpin = 4; param.nVec = 1; // just a single vector param.siteSubset = QUDA_FULL_SITE_SUBSET; // full lattice = no odd/even param.pad = 0; // no padding param.create = QUDA_NULL_FIELD_CREATE; // do not (zero-) initilize the field param.location = QUDA_CUDA_FIELD_LOCATION; // field should reside on GPU param.setPrecision(QUDA_SINGLE_PRECISION); // the following dont matter for an axpy benchmark, but need to choose something param.pc_type = QUDA_4D_PC; param.siteOrder = QUDA_EVEN_ODD_SITE_ORDER; param.gammaBasis = QUDA_DEGRAND_ROSSI_GAMMA_BASIS; printfQuda("%5s %15s %15s %15s %15s\n", "L", "size (MiB/rank)", "time (usec)", "GiB/s/rank", "Gflop/s/rank"); for (int L : L_list) { // IMPORTANT: all of `param.x`, `field_elements`, `field.Bytes()` // are LOCAL, i.e. per rank / per GPU param.x[0] = L; param.x[1] = L; param.x[2] = L; param.x[3] = L; // number of (real) elements in one (local) field size_t field_elements = 2 * param.x[0] * param.x[1] * param.x[2] * param.x[3] * param.nColor * param.nSpin; // create the field(s) auto fieldA = ColorSpinorField(param); auto fieldB = ColorSpinorField(param); assert(fieldA.Bytes() == sizeof(float) * field_elements); // sanity check assert(fieldB.Bytes() == sizeof(float) * field_elements); // sanity check // fill fields with random values quda::RNG rng(fieldA, 1234); spinorNoise(fieldA, rng, QUDA_NOISE_GAUSS); spinorNoise(fieldB, rng, QUDA_NOISE_GAUSS); // number of operations / bytes per iteration // axpy is one addition, one multiplication, two read, one write double flops = 2 * field_elements; double memory = 3 * sizeof(float) * field_elements; // do some iterations to to let QUDA do its internal tuning and also stabilize cache // behaviour and such for (int iter = 0; iter < niter_warmup; ++iter) blas::axpy(1.234, fieldA, fieldB); // running the actual benchmark device_timer_t device_timer; device_timer.start(); double start_time = get_timestamp(); for (int iter = 0; iter < niter; ++iter) blas::axpy(1.234, fieldA, fieldB); double end_time = get_timestamp(); device_timer.stop(); double secs = device_timer.last() / niter; // seconds per iteration double mem_MiB = memory / 1024. / 1024.; double GBps = mem_MiB / 1024 / secs; printfQuda("%5d %15.2f %15.2f %15.2f %15.2f\n", L, mem_MiB, secs * 1e6, GBps, flops / secs * 1e-9); json tmp; tmp["L"] = L; tmp["size_MB"] = mem_MiB; tmp["GBps"] = GBps; tmp["GFlops"] = flops / secs * 1e-9; tmp["start_time"] = start_time; tmp["end_time"] = end_time; json_results["axpy"].push_back(tmp); } } int main(int argc, char **argv) { std::string json_filename = ""; // empty indicates no json output for (int i = 0; i < argc; i++) { if (std::string(argv[i]) == "--json-out") json_filename = argv[i + 1]; } initComms(argc, argv); initQuda(-1); // -1 for multi-gpu. otherwise this selects the device to be used // verbosity options are: // SILENT, SUMMARIZE, VERBOSE, DEBUG_VERBOSE setVerbosity(QUDA_SUMMARIZE); printfQuda("MPI layout = %d %d %d %d\n", mpi_grid[0], mpi_grid[1], mpi_grid[2], mpi_grid[3]); benchmark_axpy({8, 12, 16, 24, 32, 48}, 20); setVerbosity(QUDA_SILENT); benchmark_wilson({8, 12, 16, 24, 32, 48}, 20); benchmark_dwf({8, 12, 16, 24, 32}, 20); setVerbosity(QUDA_SUMMARIZE); printfQuda("==================== done with all benchmarks ====================\n"); if (!json_filename.empty()) { printfQuda("writing benchmark results to %s\n", json_filename.c_str()); int me = 0; MPI_Comm_rank(MPI_COMM_WORLD, &me); if (me == 0) { std::ofstream json_file(json_filename); json_file << std::setw(2) << json_results; } } endQuda(); quda::comm_finalize(); MPI_Finalize(); }