From c3b1263e75212356fc1aa061cd226db70f4f00fc Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Fri, 25 Aug 2017 09:25:54 +0100 Subject: [PATCH] Benchmark prep --- benchmarks/Benchmark_ITT.cc | 322 +++++++++++++++++++--- benchmarks/Benchmark_comms.cc | 30 +- lib/allocator/AlignedAllocator.h | 5 + lib/communicator/Communicator_base.cc | 6 +- lib/communicator/Communicator_mpi3.cc | 5 +- lib/communicator/Communicator_mpit.cc | 19 +- lib/qcd/action/fermion/CayleyFermion5D.cc | 12 +- lib/qcd/action/fermion/WilsonCompressor.h | 41 ++- lib/qcd/action/fermion/WilsonFermion5D.cc | 11 + lib/stencil/Stencil.h | 114 +++++++- 10 files changed, 494 insertions(+), 71 deletions(-) diff --git a/benchmarks/Benchmark_ITT.cc b/benchmarks/Benchmark_ITT.cc index 9bf7d0a5..c5226ee1 100644 --- a/benchmarks/Benchmark_ITT.cc +++ b/benchmarks/Benchmark_ITT.cc @@ -32,6 +32,19 @@ using namespace std; using namespace Grid; using namespace Grid::QCD; +typedef WilsonFermion5D WilsonFermion5DR; +typedef WilsonFermion5D WilsonFermion5DF; +typedef WilsonFermion5D WilsonFermion5DD; + + +std::vector L_list; +std::vector Ls_list; +std::vector mflop_list; + +double mflop_ref; +double mflop_ref_err; + +int NN_global; struct time_statistics{ double mean; @@ -95,13 +108,15 @@ public: static void Comms(void) { - int Nloop=100; + int Nloop=1000; int nmu=0; int maxlat=32; std::vector simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd()); std::vector mpi_layout = GridDefaultMpi(); + for(int mu=0;mu1) nmu++; + std::vector t_time(Nloop); time_statistics timestat; @@ -133,13 +148,14 @@ public: bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD)); } - int ncomm; int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); + int ncomm; double dbytes; + std::vector times(Nloop); for(int i=0;i requests; dbytes=0; ncomm=0; @@ -150,7 +166,6 @@ public: if (mpi_layout[mu]>1 ) { - ncomm++; int xmit_to_rank; int recv_from_rank; if ( dir == mu ) { @@ -160,18 +175,18 @@ public: int comm_proc = mpi_layout[mu]-1; Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); } -#if 1 - tbytes= Grid.StencilSendToRecvFromBegin(requests, - (void *)&xbuf[dir][0], - xmit_to_rank, - (void *)&rbuf[dir][0], - recv_from_rank, - bytes,dir); - Grid.StencilSendToRecvFromComplete(requests,dir); -#endif - requests.resize(0); - + tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, + (void *)&rbuf[dir][0], recv_from_rank, + bytes,dir); + +#ifdef GRID_OMP #pragma omp atomic +#endif + ncomm++; + +#ifdef GRID_OMP +#pragma omp atomic +#endif dbytes+=tbytes; } } @@ -181,13 +196,15 @@ public: } timestat.statistics(t_time); + // for(int i=0;i({45,12,81,9})); for(int lat=8;lat<=lmax;lat+=4){ @@ -253,8 +271,7 @@ public: } }; - - static void DWF(int Ls,int L) + static double DWF5(int Ls,int L) { RealD mass=0.1; RealD M5 =1.8; @@ -262,6 +279,7 @@ public: double mflops; double mflops_best = 0; double mflops_worst= 0; + std::vector mflops_all; /////////////////////////////////////////////////////// // Set/Get the layout & grid size @@ -274,6 +292,189 @@ public: GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); uint64_t NP = TmpGrid->RankCount(); uint64_t NN = TmpGrid->NodeCount(); + NN_global=NN; + uint64_t SHM=NP/NN; + + std::vector internal; + if ( SHM == 1 ) internal = std::vector({1,1,1,1}); + else if ( SHM == 2 ) internal = std::vector({2,1,1,1}); + else if ( SHM == 4 ) internal = std::vector({2,2,1,1}); + else if ( SHM == 8 ) internal = std::vector({2,2,2,1}); + else assert(0); + + std::vector nodes({mpi[0]/internal[0],mpi[1]/internal[1],mpi[2]/internal[2],mpi[3]/internal[3]}); + std::vector latt4({local[0]*nodes[0],local[1]*nodes[1],local[2]*nodes[2],local[3]*nodes[3]}); + + ///////// Welcome message //////////// + std::cout< seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG RNG5(sFGrid); RNG5.SeedFixedIntegers(seeds5); + std::cout << GridLogMessage << "Initialised RNGs" << std::endl; + + ///////// Source preparation //////////// + LatticeFermion src (sFGrid); random(RNG5,src); + LatticeFermion tmp (sFGrid); + + RealD N2 = 1.0/::sqrt(norm2(src)); + src = src*N2; + + LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(RNG4,Umu); + + WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5); + LatticeFermion src_e (sFrbGrid); + LatticeFermion src_o (sFrbGrid); + LatticeFermion r_e (sFrbGrid); + LatticeFermion r_o (sFrbGrid); + LatticeFermion r_eo (sFGrid); + LatticeFermion err (sFGrid); + { + + pickCheckerboard(Even,src_e,src); + pickCheckerboard(Odd,src_o,src); + +#if defined(AVX512) + const int num_cases = 6; + std::string fmt("A/S ; A/O ; U/S ; U/O ; G/S ; G/O "); +#else + const int num_cases = 4; + std::string fmt("U/S ; U/O ; G/S ; G/O "); +#endif + controls Cases [] = { +#ifdef AVX512 + { QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential }, + { QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential }, +#endif + { QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential }, + { QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential }, + { QCD::WilsonKernelsStatic::OptGeneric , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential }, + { QCD::WilsonKernelsStatic::OptGeneric , QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential } + }; + + for(int c=0;cBarrier(); + for(int i=0;iBarrier(); + double t1=usecond(); + // uint64_t ncall = (uint64_t) 2.5*1000.0*1000.0*nwarm/(t1-t0); + // if (ncall < 500) ncall = 500; + uint64_t ncall = 1000; + + sFGrid->Broadcast(0,&ncall,sizeof(ncall)); + + // std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"< t_time(ncall); + for(uint64_t i=0;iBarrier(); + + double volume=Ls; for(int mu=0;mumflops_best ) mflops_best = mflops; + if ( mflops mflops_all; + + /////////////////////////////////////////////////////// + // Set/Get the layout & grid size + /////////////////////////////////////////////////////// + int threads = GridThread::GetThreads(); + std::vector mpi = GridDefaultMpi(); assert(mpi.size()==4); + std::vector local({L,L,L,L}); + + GridCartesian * TmpGrid = SpaceTimeGrid::makeFourDimGrid(std::vector({64,64,64,64}), + GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); + uint64_t NP = TmpGrid->RankCount(); + uint64_t NN = TmpGrid->NodeCount(); + NN_global=NN; uint64_t SHM=NP/NN; std::vector internal; @@ -364,13 +565,15 @@ public: #if defined(AVX512) const int num_cases = 6; + std::string fmt("A/S ; A/O ; U/S ; U/O ; G/S ; G/O "); #else const int num_cases = 4; + std::string fmt("U/S ; U/O ; G/S ; G/O "); #endif controls Cases [] = { #ifdef AVX512 - { QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential }, { QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential }, + { QCD::WilsonKernelsStatic::OptInlineAsm , QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential }, #endif { QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsThenCompute ,CartesianCommunicator::CommunicatorPolicySequential }, { QCD::WilsonKernelsStatic::OptHandUnroll, QCD::WilsonKernelsStatic::CommsAndCompute ,CartesianCommunicator::CommunicatorPolicySequential }, @@ -394,7 +597,7 @@ public: if ( sizeof(Real)==8 ) std::cout << GridLogMessage<< "* DOUBLE precision "<Barrier(); for(int i=0;iBarrier(); double t1=usecond(); - uint64_t ncall = (uint64_t) 2.5*1000.0*1000.0*nwarm/(t1-t0); + // uint64_t ncall = (uint64_t) 2.5*1000.0*1000.0*nwarm/(t1-t0); + // if (ncall < 500) ncall = 500; + uint64_t ncall = 1000; + FGrid->Broadcast(0,&ncall,sizeof(ncall)); // std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<mflops_best ) mflops_best = mflops; @@ -450,12 +656,20 @@ public: } std::cout< L_list({8,12,16,24}); + std::vector wilson; + std::vector dwf4; + std::vector dwf5; + if ( do_wilson ) { int Ls=1; std::cout< > xbuf(8,Vector(lat*lat*lat*Ls)); - Vector > rbuf(8,Vector(lat*lat*lat*Ls)); + std::vector > xbuf(8); + std::vector > rbuf(8); int ncomm; int bytes=lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD); + for(int mu=0;mu<8;mu++){ + xbuf[mu].resize(lat*lat*lat*Ls); + rbuf[mu].resize(lat*lat*lat*Ls); + // std::cout << " buffers " << std::hex << (uint64_t)&xbuf[mu][0] <<" " << (uint64_t)&rbuf[mu][0] < > xbuf(8,Vector(lat*lat*lat*Ls)); - Vector > rbuf(8,Vector(lat*lat*lat*Ls)); + std::vector > xbuf(8); + std::vector > rbuf(8); + for(int mu=0;mu<8;mu++){ + xbuf[mu].resize(lat*lat*lat*Ls); + rbuf[mu].resize(lat*lat*lat*Ls); + // std::cout << " buffers " << std::hex << (uint64_t)&xbuf[mu][0] <<" " << (uint64_t)&rbuf[mu][0] < &waitall,int dir) { - // Do nothing + int nreq=waitall.size(); + MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE); }; double CartesianCommunicator::StencilSendToRecvFrom(void *xmit, int xmit_to_rank, @@ -262,7 +275,7 @@ double CartesianCommunicator::StencilSendToRecvFrom(void *xmit, // Give the CPU to MPI immediately; can use threads to overlap optionally MPI_Request req[2]; MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[dir],&req[1]); - MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank,myrank, communicator_halo[dir], &req[0]); + MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank ,myrank , communicator_halo[dir],&req[0]); MPI_Waitall(2, req, MPI_STATUSES_IGNORE); return 2.0*bytes; } diff --git a/lib/qcd/action/fermion/CayleyFermion5D.cc b/lib/qcd/action/fermion/CayleyFermion5D.cc index 46ba3793..5e67d1f1 100644 --- a/lib/qcd/action/fermion/CayleyFermion5D.cc +++ b/lib/qcd/action/fermion/CayleyFermion5D.cc @@ -429,7 +429,7 @@ void CayleyFermion5D::SetCoefficientsInternal(RealD zolo_hi,std::vectorM5) +1.0); - // assert(fabs(bee[i])>0.0); + assert(fabs(bee[i])>0.0); cee[i]=as[i]*(1.0-cs[i]*(4.0-this->M5)); beo[i]=as[i]*bs[i]; ceo[i]=-as[i]*cs[i]; @@ -455,11 +455,17 @@ void CayleyFermion5D::SetCoefficientsInternal(RealD zolo_hi,std::vector0.0); + assert(fabs(bee[0])>0.0); lee[i] =-cee[i+1]/bee[i]; // sub-diag entry on the ith column leem[i]=mass*cee[Ls-1]/bee[0]; - for(int j=0;j0.0); + leem[i]*= aee[j]/bee[j+1]; + } uee[i] =-aee[i]/bee[i]; // up-diag entry on the ith row @@ -478,7 +484,7 @@ void CayleyFermion5D::SetCoefficientsInternal(RealD zolo_hi,std::vector0.0); + assert(fabs(bee[j])>0.0); delta_d *= cee[j]/bee[j]; } dee[Ls-1] += delta_d; diff --git a/lib/qcd/action/fermion/WilsonCompressor.h b/lib/qcd/action/fermion/WilsonCompressor.h index 96cbe1ec..30c6d838 100644 --- a/lib/qcd/action/fermion/WilsonCompressor.h +++ b/lib/qcd/action/fermion/WilsonCompressor.h @@ -238,7 +238,35 @@ template using WilsonCompressor = WilsonCom template class WilsonStencil : public CartesianStencil { public: - + double timer0; + double timer1; + double timer2; + double timer3; + double timer4; + double timer5; + double timer6; + uint64_t callsi; + void ZeroCountersi(void) + { + std::cout << GridLogMessage << " ZeroCountersi()"< same_node; @@ -252,6 +280,7 @@ public: : CartesianStencil (grid,npoints,checkerboard,directions,distances) , same_node(npoints) { + ZeroCountersi(); surface_list.resize(0); }; @@ -282,17 +311,25 @@ public: { std::vector > reqs; this->HaloExchangeOptGather(source,compress); + double t1=usecond(); this->CommunicateBegin(reqs); this->CommunicateComplete(reqs); + double t2=usecond(); timer1 += t2-t1; this->CommsMerge(compress); + double t3=usecond(); timer2 += t3-t2; this->CommsMergeSHM(compress); + double t4=usecond(); timer3 += t4-t3; } template void HaloExchangeOptGather(const Lattice &source,compressor &compress) { this->Prepare(); + double t0=usecond(); this->HaloGatherOpt(source,compress); + double t1=usecond(); + timer0 += t1-t0; + callsi++; } template @@ -304,7 +341,9 @@ public: typedef typename compressor::SiteHalfSpinor SiteHalfSpinor; typedef typename compressor::SiteHalfCommSpinor SiteHalfCommSpinor; + this->mpi3synctime_g-=usecond(); this->_grid->StencilBarrier(); + this->mpi3synctime_g+=usecond(); assert(source._grid==this->_grid); this->halogtime-=usecond(); diff --git a/lib/qcd/action/fermion/WilsonFermion5D.cc b/lib/qcd/action/fermion/WilsonFermion5D.cc index 404ecce0..c5b0f872 100644 --- a/lib/qcd/action/fermion/WilsonFermion5D.cc +++ b/lib/qcd/action/fermion/WilsonFermion5D.cc @@ -185,6 +185,11 @@ void WilsonFermion5D::Report(void) std::cout << GridLogMessage << "WilsonFermion5D StencilEven"< 0){ + std::cout << GridLogMessage << "WilsonFermion5D Stencil Reporti()" < @@ -204,6 +209,9 @@ void WilsonFermion5D::ZeroCounters(void) { Stencil.ZeroCounters(); StencilEven.ZeroCounters(); StencilOdd.ZeroCounters(); + Stencil.ZeroCountersi(); + StencilEven.ZeroCountersi(); + StencilOdd.ZeroCountersi(); } @@ -445,6 +453,9 @@ void WilsonFermion5D::DhopInternalOverlappedComms(StencilImpl & st, Lebesg DhopCommTime += ctime; DhopComputeTime+=ptime; + // First to enter, last to leave timing + st.CollateThreads(); + DhopFaceTime-=usecond(); st.CommsMerge(compressor); DhopFaceTime+=usecond(); diff --git a/lib/stencil/Stencil.h b/lib/stencil/Stencil.h index cca67587..ad454bcb 100644 --- a/lib/stencil/Stencil.h +++ b/lib/stencil/Stencil.h @@ -176,6 +176,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal // Timing info; ugly; possibly temporary ///////////////////////////////////////// double commtime; + double mpi3synctime; + double mpi3synctime_g; + double shmmergetime; double gathertime; double gathermtime; double halogtime; @@ -185,8 +188,10 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal double splicetime; double nosplicetime; double calls; - std::vector comms_bytesthr; - std::vector commtimethr; + std::vector comm_bytes_thr; + std::vector comm_time_thr; + std::vector comm_enter_thr; + std::vector comm_leave_thr; //////////////////////////////////////// // Stencil query @@ -262,18 +267,45 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal #endif if (nthreads == -1) nthreads = 1; if (mythread < nthreads) { + comm_enter_thr[mythread] = usecond(); for (int i = mythread; i < Packets.size(); i += nthreads) { - double start = usecond(); uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf, Packets[i].to_rank, Packets[i].recv_buf, Packets[i].from_rank, Packets[i].bytes,i); - comms_bytesthr[mythread] += bytes; - commtimethr[mythread] += usecond() - start; + comm_bytes_thr[mythread] += bytes; } + comm_leave_thr[mythread]= usecond(); + comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread]; } } + + void CollateThreads(void) + { + int nthreads = CartesianCommunicator::nCommThreads; + double first=0.0; + double last =0.0; + + for(int t=0;t 0.0) && ( t0 < first ) ) first = t0; // min time seen + + if ( t1 > last ) last = t1; // max time seen + + } + commtime+= last-first; + } void CommunicateBegin(std::vector > &reqs) { reqs.resize(Packets.size()); @@ -295,14 +327,48 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal } commtime+=usecond(); } + void Communicate(void) + { +#ifdef GRID_OMP +#pragma omp parallel + { + // must be called in parallel region + int mythread = omp_get_thread_num(); + int maxthreads= omp_get_max_threads(); + int nthreads = CartesianCommunicator::nCommThreads; + assert(nthreads <= maxthreads); + + if (nthreads == -1) nthreads = 1; +#else + int mythread = 0; + int nthreads = 1; +#endif + if (mythread < nthreads) { + for (int i = mythread; i < Packets.size(); i += nthreads) { + double start = usecond(); + comm_bytes_thr[mythread] += _grid->StencilSendToRecvFrom(Packets[i].send_buf, + Packets[i].to_rank, + Packets[i].recv_buf, + Packets[i].from_rank, + Packets[i].bytes,i); + comm_time_thr[mythread] += usecond() - start; + } + } +#ifdef GRID_OMP + } +#endif + } template void HaloExchange(const Lattice &source,compressor &compress) { std::vector > reqs; Prepare(); HaloGather(source,compress); + // Concurrent CommunicateBegin(reqs); CommunicateComplete(reqs); + // Sequential + // Communicate(); CommsMergeSHM(compress); CommsMerge(compress); } @@ -363,7 +429,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal template void HaloGather(const Lattice &source,compressor &compress) { + mpi3synctime_g-=usecond(); _grid->StencilBarrier();// Synch shared memory on a single nodes + mpi3synctime_g+=usecond(); // conformable(source._grid,_grid); assert(source._grid==_grid); @@ -423,8 +491,12 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal CommsMerge(decompress,Mergers,Decompressions); } template void CommsMergeSHM(decompressor decompress) { + mpi3synctime-=usecond(); _grid->StencilBarrier();// Synch shared memory on a single nodes + mpi3synctime+=usecond(); + shmmergetime-=usecond(); CommsMerge(decompress,MergersSHM,DecompressionsSHM); + shmmergetime+=usecond(); } template @@ -470,8 +542,10 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal const std::vector &distances) : _permute_type(npoints), _comm_buf_size(npoints), - comms_bytesthr(npoints), - commtimethr(npoints) + comm_bytes_thr(npoints), + comm_enter_thr(npoints), + comm_leave_thr(npoints), + comm_time_thr(npoints) { face_table_computed=0; _npoints = npoints; @@ -1025,8 +1099,15 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal void ZeroCounters(void) { gathertime = 0.; commtime = 0.; - memset(&commtimethr[0], 0, sizeof(commtimethr)); - memset(&comms_bytesthr[0], 0, sizeof(comms_bytesthr)); + mpi3synctime=0.; + mpi3synctime_g=0.; + shmmergetime=0.; + for(int i=0;i<_npoints;i++){ + comm_time_thr[i]=0; + comm_bytes_thr[i]=0; + comm_enter_thr[i]=0; + comm_leave_thr[i]=0; + } halogtime = 0.; mergetime = 0.; decompresstime = 0.; @@ -1043,13 +1124,17 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal RealD NP = _grid->_Nprocessors; RealD NN = _grid->NodeCount(); double t = 0; - // if commtimethr is set they were all done in parallel so take the max + // if comm_time_thr is set they were all done in parallel so take the max // but add up the bytes + int threaded = 0 ; for (int i = 0; i < 8; ++i) { - comms_bytes += comms_bytesthr[i]; - if (t < commtimethr[i]) t = commtimethr[i]; + if ( comm_time_thr[i]>0.0 ) { + threaded = 1; + comms_bytes += comm_bytes_thr[i]; + if (t < comm_time_thr[i]) t = comm_time_thr[i]; + } } - commtime += t; + if (threaded) commtime += t; _grid->GlobalSum(commtime); commtime/=NP; if ( calls > 0. ) { @@ -1065,6 +1150,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000. << " GB/s per rank"<