1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 15:55:37 +00:00

Merge branch 'develop' into feature/qed-fvol

This commit is contained in:
James Harrison 2018-02-07 10:11:00 +00:00
commit 7f8ca54285
15 changed files with 186 additions and 38 deletions

View File

@ -187,10 +187,11 @@ Alternatively, some CPU codenames can be directly used:
| `<code>` | Description | | `<code>` | Description |
| ----------- | -------------------------------------- | | ----------- | -------------------------------------- |
| `KNL` | [Intel Xeon Phi codename Knights Landing](http://ark.intel.com/products/codename/48999/Knights-Landing) | | `KNL` | [Intel Xeon Phi codename Knights Landing](http://ark.intel.com/products/codename/48999/Knights-Landing) |
| `SKL` | [Intel Skylake with AVX512 extensions](https://ark.intel.com/products/codename/37572/Skylake#@server) |
| `BGQ` | Blue Gene/Q | | `BGQ` | Blue Gene/Q |
#### Notes: #### Notes:
- We currently support AVX512 only for the Intel compiler. Support for GCC and clang will appear in future versions of Grid when the AVX512 support within GCC and clang will be more advanced. - We currently support AVX512 for the Intel compiler and GCC (KNL and SKL target). Support for clang will appear in future versions of Grid when the AVX512 support in the compiler will be more advanced.
- For BG/Q only [bgclang](http://trac.alcf.anl.gov/projects/llvm-bgq) is supported. We do not presently plan to support more compilers for this platform. - For BG/Q only [bgclang](http://trac.alcf.anl.gov/projects/llvm-bgq) is supported. We do not presently plan to support more compilers for this platform.
- BG/Q performances are currently rather poor. This is being investigated for future versions. - BG/Q performances are currently rather poor. This is being investigated for future versions.
- The vector size for the `GEN` target can be specified with the `configure` script option `--enable-gen-simd-width`. - The vector size for the `GEN` target can be specified with the `configure` script option `--enable-gen-simd-width`.

View File

@ -48,7 +48,6 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads(); int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
std::vector<int> latt4 = GridDefaultLatt(); std::vector<int> latt4 = GridDefaultLatt();
int Ls=16; int Ls=16;
@ -57,6 +56,10 @@ int main (int argc, char ** argv)
std::stringstream ss(argv[i+1]); ss >> Ls; std::stringstream ss(argv[i+1]); ss >> Ls;
} }
GridLogLayout();
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
@ -187,7 +190,7 @@ int main (int argc, char ** argv)
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; // std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
@ -226,7 +229,7 @@ int main (int argc, char ** argv)
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
@ -277,7 +280,7 @@ int main (int argc, char ** argv)
double t1=usecond(); double t1=usecond();
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
@ -355,7 +358,7 @@ int main (int argc, char ** argv)
// sDw.stat.print(); // sDw.stat.print();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=(1344.0*volume*ncall)/2; double flops=(single_site_flops*volume*ncall)/2.0;
std::cout<<GridLogMessage << "sDeo mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "sDeo mflop/s = "<< flops/(t1-t0)<<std::endl;
std::cout<<GridLogMessage << "sDeo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl; std::cout<<GridLogMessage << "sDeo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
@ -478,7 +481,7 @@ int main (int argc, char ** argv)
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=(1344.0*volume*ncall)/2; double flops=(single_site_flops*volume*ncall)/2.0;
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl; std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;

View File

@ -51,6 +51,7 @@ int main (int argc, char ** argv)
{ {
Grid_init(&argc,&argv); Grid_init(&argc,&argv);
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
std::cout << GridLogMessage<< "* Kernel options --dslash-generic, --dslash-unroll, --dslash-asm" <<std::endl; std::cout << GridLogMessage<< "* Kernel options --dslash-generic, --dslash-unroll, --dslash-asm" <<std::endl;
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
@ -107,6 +108,7 @@ void benchDw(std::vector<int> & latt4, int Ls, int threads,int report )
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
std::vector<int> seeds4({1,2,3,4}); std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8}); std::vector<int> seeds5({5,6,7,8});
@ -196,7 +198,7 @@ void benchDw(std::vector<int> & latt4, int Ls, int threads,int report )
if ( ! report ) { if ( ! report ) {
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
std::cout <<"\t"<<NP<< "\t"<<flops/(t1-t0)<< "\t"; std::cout <<"\t"<<NP<< "\t"<<flops/(t1-t0)<< "\t";
} }
@ -228,7 +230,7 @@ void benchDw(std::vector<int> & latt4, int Ls, int threads,int report )
if(!report){ if(!report){
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=(1344.0*volume*ncall)/2; double flops=(single_site_flops*volume*ncall)/2.0;
std::cout<< flops/(t1-t0); std::cout<< flops/(t1-t0);
} }
} }
@ -237,6 +239,7 @@ void benchDw(std::vector<int> & latt4, int Ls, int threads,int report )
#define CHECK_SDW #define CHECK_SDW
void benchsDw(std::vector<int> & latt4, int Ls, int threads, int report ) void benchsDw(std::vector<int> & latt4, int Ls, int threads, int report )
{ {
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt4, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt4, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
@ -321,7 +324,7 @@ void benchsDw(std::vector<int> & latt4, int Ls, int threads, int report )
Counter.Report(); Counter.Report();
} else { } else {
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
std::cout<<"\t"<< flops/(t1-t0); std::cout<<"\t"<< flops/(t1-t0);
} }
@ -358,7 +361,7 @@ void benchsDw(std::vector<int> & latt4, int Ls, int threads, int report )
CounterSdw.Report(); CounterSdw.Report();
} else { } else {
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=(1344.0*volume*ncall)/2; double flops=(single_site_flops*volume*ncall)/2.0;
std::cout<<"\t"<< flops/(t1-t0); std::cout<<"\t"<< flops/(t1-t0);
} }
} }

View File

@ -107,7 +107,7 @@ int main (int argc, char ** argv)
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=2*1344*volume*ncall; double flops=2*1320*volume*ncall;
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; // std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
@ -134,7 +134,7 @@ int main (int argc, char ** argv)
FGrid->Barrier(); FGrid->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=2*1344*volume*ncall; double flops=2*1320*volume*ncall;
std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called half prec comms Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
@ -174,7 +174,7 @@ int main (int argc, char ** argv)
FGrid_d->Barrier(); FGrid_d->Barrier();
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu]; double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
double flops=2*1344*volume*ncall; double flops=2*1320*volume*ncall;
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; // std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;

View File

@ -4,7 +4,7 @@
Source file: ./benchmarks/Benchmark_wilson.cc Source file: ./benchmarks/Benchmark_wilson.cc
Copyright (C) 2015 Copyright (C) 2018
Author: Peter Boyle <paboyle@ph.ed.ac.uk> Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk> Author: paboyle <paboyle@ph.ed.ac.uk>
@ -32,6 +32,9 @@ using namespace std;
using namespace Grid; using namespace Grid;
using namespace Grid::QCD; using namespace Grid::QCD;
#include "Grid/util/Profiling.h"
template<class d> template<class d>
struct scal { struct scal {
d internal; d internal;
@ -45,6 +48,7 @@ struct scal {
}; };
bool overlapComms = false; bool overlapComms = false;
bool perfProfiling = false;
int main (int argc, char ** argv) int main (int argc, char ** argv)
{ {
@ -53,6 +57,12 @@ int main (int argc, char ** argv)
if( GridCmdOptionExists(argv,argv+argc,"--asynch") ){ if( GridCmdOptionExists(argv,argv+argc,"--asynch") ){
overlapComms = true; overlapComms = true;
} }
if( GridCmdOptionExists(argv,argv+argc,"--perf") ){
perfProfiling = true;
}
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
std::vector<int> latt_size = GridDefaultLatt(); std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd()); std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
@ -61,10 +71,15 @@ int main (int argc, char ** argv)
GridRedBlackCartesian RBGrid(&Grid); GridRedBlackCartesian RBGrid(&Grid);
int threads = GridThread::GetThreads(); int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
GridLogLayout();
std::cout<<GridLogMessage << "Grid floating point word size is REALF"<< sizeof(RealF)<<std::endl; std::cout<<GridLogMessage << "Grid floating point word size is REALF"<< sizeof(RealF)<<std::endl;
std::cout<<GridLogMessage << "Grid floating point word size is REALD"<< sizeof(RealD)<<std::endl; std::cout<<GridLogMessage << "Grid floating point word size is REALD"<< sizeof(RealD)<<std::endl;
std::cout<<GridLogMessage << "Grid floating point word size is REAL"<< sizeof(Real)<<std::endl; std::cout<<GridLogMessage << "Grid floating point word size is REAL"<< sizeof(Real)<<std::endl;
std::cout<<GridLogMessage << "Grid number of colours : "<< QCD::Nc <<std::endl;
std::cout<<GridLogMessage << "Benchmarking Wilson operator in the fundamental representation" << std::endl;
std::vector<int> seeds({1,2,3,4}); std::vector<int> seeds({1,2,3,4});
GridParallelRNG pRNG(&Grid); GridParallelRNG pRNG(&Grid);
@ -134,9 +149,25 @@ int main (int argc, char ** argv)
Dw.Dhop(src,result,0); Dw.Dhop(src,result,0);
} }
double t1=usecond(); double t1=usecond();
double flops=1344*volume*ncall; double flops=single_site_flops*volume*ncall;
if (perfProfiling){
std::cout<<GridLogMessage << "Profiling Dw with perf"<<std::endl;
System::profile("kernel", [&]() {
for(int i=0;i<ncall;i++){
Dw.Dhop(src,result,0);
}
});
std::cout<<GridLogMessage << "Generated kernel.data"<<std::endl;
std::cout<<GridLogMessage << "Use with: perf report -i kernel.data"<<std::endl;
}
std::cout<<GridLogMessage << "Called Dw"<<std::endl; std::cout<<GridLogMessage << "Called Dw"<<std::endl;
std::cout<<GridLogMessage << "flops per site " << single_site_flops << std::endl;
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl; std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl; std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;

View File

@ -62,6 +62,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage<< "* Kernel options --dslash-generic, --dslash-unroll, --dslash-asm" <<std::endl; std::cout << GridLogMessage<< "* Kernel options --dslash-generic, --dslash-unroll, --dslash-asm" <<std::endl;
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
std::cout << GridLogMessage<< "* Number of colours "<< QCD::Nc <<std::endl;
std::cout << GridLogMessage<< "* Benchmarking WilsonFermionR::Dhop "<<std::endl; std::cout << GridLogMessage<< "* Benchmarking WilsonFermionR::Dhop "<<std::endl;
std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl; std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl; if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
@ -69,13 +70,15 @@ int main (int argc, char ** argv)
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl; if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl; if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl;
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl; if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
std::cout << GridLogMessage << "* OpenMP threads : "<< GridThread::GetThreads() <<std::endl;
std::cout << GridLogMessage << "* MPI tasks : "<< GridCmdVectorIntToString(mpi_layout) << std::endl;
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
std::cout<<GridLogMessage << "============================================================================="<< std::endl; std::cout<<GridLogMessage << "================================================================================================="<< std::endl;
std::cout<<GridLogMessage << "= Benchmarking Wilson" << std::endl; std::cout<<GridLogMessage << "= Benchmarking Wilson operator in the fundamental representation" << std::endl;
std::cout<<GridLogMessage << "============================================================================="<< std::endl; std::cout<<GridLogMessage << "================================================================================================="<< std::endl;
std::cout<<GridLogMessage << "Volume\t\t\tWilson/MFLOPs\tWilsonDag/MFLOPs" << std::endl; std::cout<<GridLogMessage << "Volume\t\t\tWilson/MFLOPs\tWilsonDag/MFLOPs\tWilsonEO/MFLOPs\tWilsonDagEO/MFLOPs" << std::endl;
std::cout<<GridLogMessage << "============================================================================="<< std::endl; std::cout<<GridLogMessage << "================================================================================================="<< std::endl;
int Lmax = 32; int Lmax = 32;
int dmin = 0; int dmin = 0;
@ -98,12 +101,19 @@ int main (int argc, char ** argv)
GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds); GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(seeds);
LatticeGaugeField Umu(&Grid); random(pRNG,Umu); LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
LatticeFermion src(&Grid); random(pRNG,src); LatticeFermion src(&Grid); random(pRNG,src);
LatticeFermion src_o(&RBGrid); pickCheckerboard(Odd,src_o,src);
LatticeFermion result(&Grid); result=zero; LatticeFermion result(&Grid); result=zero;
LatticeFermion result_e(&RBGrid); result_e=zero;
double volume = std::accumulate(latt_size.begin(),latt_size.end(),1,std::multiplies<int>()); double volume = std::accumulate(latt_size.begin(),latt_size.end(),1,std::multiplies<int>());
WilsonFermionR Dw(Umu,Grid,RBGrid,mass,params); WilsonFermionR Dw(Umu,Grid,RBGrid,mass,params);
// Full operator
bench_wilson(src,result,Dw,volume,DaggerNo);
bench_wilson(src,result,Dw,volume,DaggerYes);
std::cout << "\t";
// EO
bench_wilson(src,result,Dw,volume,DaggerNo); bench_wilson(src,result,Dw,volume,DaggerNo);
bench_wilson(src,result,Dw,volume,DaggerYes); bench_wilson(src,result,Dw,volume,DaggerYes);
std::cout << std::endl; std::cout << std::endl;
@ -122,9 +132,26 @@ void bench_wilson (
int const dag ) int const dag )
{ {
int ncall = 1000; int ncall = 1000;
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
double t0 = usecond(); double t0 = usecond();
for(int i=0; i<ncall; i++) { Dw.Dhop(src,result,dag); } for(int i=0; i<ncall; i++) { Dw.Dhop(src,result,dag); }
double t1 = usecond(); double t1 = usecond();
double flops = 1344 * volume * ncall; double flops = single_site_flops * volume * ncall;
std::cout << flops/(t1-t0) << "\t\t";
}
void bench_wilson_eo (
LatticeFermion & src,
LatticeFermion & result,
WilsonFermionR & Dw,
double const volume,
int const dag )
{
int ncall = 1000;
long unsigned int single_site_flops = 8*QCD::Nc*(7+16*QCD::Nc);
double t0 = usecond();
for(int i=0; i<ncall; i++) { Dw.DhopEO(src,result,dag); }
double t1 = usecond();
double flops = (single_site_flops * volume * ncall)/2.0;
std::cout << flops/(t1-t0) << "\t\t"; std::cout << flops/(t1-t0) << "\t\t";
} }

View File

@ -249,6 +249,9 @@ case ${ax_cv_cxx_compiler_vendor} in
AVX512) AVX512)
AC_DEFINE([AVX512],[1],[AVX512 intrinsics]) AC_DEFINE([AVX512],[1],[AVX512 intrinsics])
SIMD_FLAGS='-mavx512f -mavx512pf -mavx512er -mavx512cd';; SIMD_FLAGS='-mavx512f -mavx512pf -mavx512er -mavx512cd';;
SKL)
AC_DEFINE([AVX512],[1],[AVX512 intrinsics for SkyLake Xeon])
SIMD_FLAGS='-march=skylake-avx512';;
KNC) KNC)
AC_DEFINE([IMCI],[1],[IMCI intrinsics for Knights Corner]) AC_DEFINE([IMCI],[1],[IMCI intrinsics for Knights Corner])
SIMD_FLAGS='';; SIMD_FLAGS='';;

View File

@ -57,7 +57,7 @@ std::vector<std::string> TFundtoHirep<Rep>::getOutput(void)
template <typename Rep> template <typename Rep>
void TFundtoHirep<Rep>::setup(void) void TFundtoHirep<Rep>::setup(void)
{ {
env().template registerLattice<typename Rep::LatticeField>(getName()); envCreateLat(typename Rep::LatticeField, getName());
} }
// execution /////////////////////////////////////////////////////////////////// // execution ///////////////////////////////////////////////////////////////////
@ -70,6 +70,6 @@ void TFundtoHirep<Rep>::execute(void)
Rep TargetRepresentation(U._grid); Rep TargetRepresentation(U._grid);
TargetRepresentation.update_representation(U); TargetRepresentation.update_representation(U);
typename Rep::LatticeField &URep = *env().template createLattice<typename Rep::LatticeField>(getName()); auto &URep = envGet(typename Rep::LatticeField, getName());
URep = TargetRepresentation.U; URep = TargetRepresentation.U;
} }

View File

@ -182,6 +182,7 @@ void GlobalSharedMemory::OptimalCommunicator(const std::vector<int> &processors,
#ifdef GRID_MPI3_SHMMMAP #ifdef GRID_MPI3_SHMMMAP
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{ {
std::cout << "SharedMemoryAllocate "<< bytes<< " MMAP implementation "<<std::endl;
assert(_ShmSetup==1); assert(_ShmSetup==1);
assert(_ShmAlloc==0); assert(_ShmAlloc==0);
////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -218,6 +219,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
assert(((uint64_t)ptr&0x3F)==0); assert(((uint64_t)ptr&0x3F)==0);
close(fd); close(fd);
WorldShmCommBufs[r] =ptr; WorldShmCommBufs[r] =ptr;
std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< bytes<< "bytes)"<<std::endl;
} }
_ShmAlloc=1; _ShmAlloc=1;
_ShmAllocBytes = bytes; _ShmAllocBytes = bytes;
@ -232,6 +234,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
//////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags) void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{ {
std::cout << "SharedMemoryAllocate "<< bytes<< " SHMOPEN implementation "<<std::endl;
assert(_ShmSetup==1); assert(_ShmSetup==1);
assert(_ShmAlloc==0); assert(_ShmAlloc==0);
MPI_Barrier(WorldShmComm); MPI_Barrier(WorldShmComm);
@ -259,7 +262,11 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#endif #endif
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0); void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
if ( ptr == (void * )MAP_FAILED ) { perror("failed mmap"); assert(0); } std::cout << "Set WorldShmCommBufs["<<r<<"]="<<ptr<< "("<< size<< "bytes)"<<std::endl;
if ( ptr == (void * )MAP_FAILED ) {
perror("failed mmap");
assert(0);
}
assert(((uint64_t)ptr&0x3F)==0); assert(((uint64_t)ptr&0x3F)==0);
WorldShmCommBufs[r] =ptr; WorldShmCommBufs[r] =ptr;
@ -318,11 +325,12 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
heap_size = GlobalSharedMemory::ShmAllocBytes(); heap_size = GlobalSharedMemory::ShmAllocBytes();
for(int r=0;r<ShmSize;r++){ for(int r=0;r<ShmSize;r++){
uint32_t sr = (r==ShmRank) ? GlobalSharedMemory::WorldRank : 0 ; uint32_t wsr = (r==ShmRank) ? GlobalSharedMemory::WorldShmRank : 0 ;
MPI_Allreduce(MPI_IN_PLACE,&sr,1,MPI_UINT32_T,MPI_SUM,comm); MPI_Allreduce(MPI_IN_PLACE,&wsr,1,MPI_UINT32_T,MPI_SUM,ShmComm);
ShmCommBufs[r] = GlobalSharedMemory::WorldShmCommBufs[sr]; ShmCommBufs[r] = GlobalSharedMemory::WorldShmCommBufs[wsr];
// std::cout << "SetCommunicator ShmCommBufs ["<< r<< "] = "<< ShmCommBufs[r]<< " wsr = "<<wsr<<std::endl;
} }
ShmBufferFreeAll(); ShmBufferFreeAll();

View File

@ -469,7 +469,7 @@ void CayleyFermion5D<Impl>::MooeeInternalAsm(const FermionField &psi, FermionFie
} }
a0 = a0+incr; a0 = a0+incr;
a1 = a1+incr; a1 = a1+incr;
a2 = a2+sizeof(Simd::scalar_type); a2 = a2+sizeof(typename Simd::scalar_type);
}} }}
{ {
int lexa = s1+LLs*site; int lexa = s1+LLs*site;
@ -701,7 +701,7 @@ void CayleyFermion5D<Impl>::MooeeInternalZAsm(const FermionField &psi, FermionFi
} }
a0 = a0+incr; a0 = a0+incr;
a1 = a1+incr; a1 = a1+incr;
a2 = a2+sizeof(Simd::scalar_type); a2 = a2+sizeof(typename Simd::scalar_type);
}} }}
{ {
int lexa = s1+LLs*site; int lexa = s1+LLs*site;

View File

@ -475,7 +475,7 @@ namespace QCD {
} }
a0 = a0 + incr; a0 = a0 + incr;
a1 = a1 + incr; a1 = a1 + incr;
a2 = a2 + sizeof(Simd::scalar_type); a2 = a2 + sizeof(typename Simd::scalar_type);
} }
} }

View File

@ -853,7 +853,7 @@ namespace QCD {
a0 = a0 + incr; a0 = a0 + incr;
a1 = a1 + incr; a1 = a1 + incr;
a2 = a2 + sizeof(Simd::scalar_type); a2 = a2 + sizeof(typename Simd::scalar_type);
} }
} }

View File

@ -556,7 +556,7 @@ namespace Optimization {
v3 = _mm256_add_epi32(v1, v2); v3 = _mm256_add_epi32(v1, v2);
v1 = _mm256_hadd_epi32(v3, v3); v1 = _mm256_hadd_epi32(v3, v3);
v2 = _mm256_hadd_epi32(v1, v1); v2 = _mm256_hadd_epi32(v1, v1);
u1 = _mm256_castsi256_si128(v2) // upper half u1 = _mm256_castsi256_si128(v2); // upper half
u2 = _mm256_extracti128_si256(v2, 1); // lower half u2 = _mm256_extracti128_si256(v2, 1); // lower half
ret = _mm_add_epi32(u1, u2); ret = _mm_add_epi32(u1, u2);
return _mm_cvtsi128_si32(ret); return _mm_cvtsi128_si32(ret);

72
lib/util/Profiling.h Normal file
View File

@ -0,0 +1,72 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/util/Profiling.h
Copyright (C) 2018
Author: Guido Cossu <guido.cossu@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef GRID_PERF_PROFILING_H
#define GRID_PERF_PROFILING_H
#include <sstream>
#include <iostream>
#include <functional>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <unistd.h>
#include <signal.h>
struct System
{
static void profile(const std::string& name,std::function<void()> body) {
std::string filename = name.find(".data") == std::string::npos ? (name + ".data") : name;
// Launch profiler
pid_t pid;
std::stringstream s;
s << getpid();
pid = fork();
if (pid == 0) {
auto fd=open("/dev/null",O_RDWR);
dup2(fd,1);
dup2(fd,2);
exit(execl("/usr/bin/perf","perf","record","-o",filename.c_str(),"-p",s.str().c_str(),nullptr));
}
// Run body
body();
// Kill profiler
kill(pid,SIGINT);
waitpid(pid,nullptr,0);
}
static void profile(std::function<void()> body) {
profile("perf.data",body);
}
};
#endif // GRID_PERF_PROFILING_H