1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-09 23:45:36 +00:00

Updated meson field benchmark for dirac structures

This commit is contained in:
paboyle 2018-07-26 09:09:29 +01:00
parent cce339deaf
commit 71e1006ba8

View File

@ -67,9 +67,10 @@ void sliceInnerProductMesonField(std::vector< std::vector<ComplexD> > &mat,
// sum across these down to scalars
// splitting the SIMD
std::vector<vector_type,alignedAllocator<vector_type> > lvSum(rd*Lblock*Rblock);
for (int r = 0; r < rd * Lblock * Rblock; r++){
parallel_for (int r = 0; r < rd * Lblock * Rblock; r++){
lvSum[r] = zero;
}
std::vector<scalar_type > lsSum(ld*Lblock*Rblock,scalar_type(0.0));
int e1= grid->_slice_nblock[orthogdim];
@ -90,7 +91,6 @@ void sliceInnerProductMesonField(std::vector< std::vector<ComplexD> > &mat,
for(int j=0;j<Rblock;j++){
int idx = i+Lblock*j+Lblock*Rblock*r;
auto right = rhs[j]._odata[ss];
#if 1
vector_type vv = left()(0)(0) * right()(0)(0)
+ left()(0)(1) * right()(0)(1)
+ left()(0)(2) * right()(0)(2)
@ -103,9 +103,6 @@ void sliceInnerProductMesonField(std::vector< std::vector<ComplexD> > &mat,
+ left()(3)(0) * right()(3)(0)
+ left()(3)(1) * right()(3)(1)
+ left()(3)(2) * right()(3)(2);
#else
vector_type vv = TensorRemove(innerProduct(left,right));
#endif
lvSum[idx]=lvSum[idx]+vv;
}
}
@ -163,6 +160,297 @@ void sliceInnerProductMesonField(std::vector< std::vector<ComplexD> > &mat,
return;
}
template<class vobj>
void sliceInnerProductMesonFieldGamma(std::vector< std::vector<ComplexD> > &mat,
const std::vector<Lattice<vobj> > &lhs,
const std::vector<Lattice<vobj> > &rhs,
int orthogdim,
std::vector<Gamma::Algebra> gammas)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
int Lblock = lhs.size();
int Rblock = rhs.size();
GridBase *grid = lhs[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
int Ngamma = gammas.size();
// int Nmom = mom.size();
assert(mat.size()==Lblock*Rblock*Ngamma);
for(int t=0;t<mat.size();t++){
assert(mat[t].size()==Nt);
}
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
int MFrvol = rd*Lblock*Rblock*Ngamma;
int MFlvol = ld*Lblock*Rblock*Ngamma;
int MFfvol = fd*Lblock*Rblock*Ngamma;
std::vector<vector_type,alignedAllocator<vector_type> > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++){
lvSum[r] = zero;
}
std::vector<scalar_type > lsSum(MFlvol);
parallel_for (int r = 0; r < MFlvol; r++){
lsSum[r]=scalar_type(0.0);
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
std::cout << GridLogMessage << " Entering first parallel loop "<<std::endl;
// Parallelise over t-direction doesn't expose as much parallelism as needed for KNL
parallel_for(int r=0;r<rd;r++){
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++){
for(int mu=0;mu<Ngamma;mu++){
auto left = Gamma(gammas[mu])*conjugate(lhs[i]._odata[ss]);
for(int j=0;j<Rblock;j++){
auto right = rhs[j]._odata[ss];
vector_type vv = left()(0)(0) * right()(0)(0)
+ left()(0)(1) * right()(0)(1)
+ left()(0)(2) * right()(0)(2)
+ left()(1)(0) * right()(1)(0)
+ left()(1)(1) * right()(1)(1)
+ left()(1)(2) * right()(1)(2)
+ left()(2)(0) * right()(2)(0)
+ left()(2)(1) * right()(2)(1)
+ left()(2)(2) * right()(2)(2)
+ left()(3)(0) * right()(3)(0)
+ left()(3)(1) * right()(3)(1)
+ left()(3)(2) * right()(3)(2);
int idx = mu+i*Ngamma+Lblock*Ngamma*j+Ngamma*Lblock*Rblock*r;
lvSum[idx]=lvSum[idx]+vv;
}
}
}
}
}
}
std::cout << GridLogMessage << " Entering second parallel loop "<<std::endl;
// Sum across simd lanes in the plane, breaking out orthog dir.
parallel_for(int rt=0;rt<rd;rt++){
iScalar<vector_type> temp;
std::vector<int> icoor(Nd);
std::vector<iScalar<scalar_type> > extracted(Nsimd);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
for(int mu=0;mu<Ngamma;mu++){
int ij_rdx = mu+i*Ngamma+Ngamma*Lblock*j+Ngamma*Lblock*Rblock*rt;
temp._internal = lvSum[ij_rdx];
extract(temp,extracted);
for(int idx=0;idx<Nsimd;idx++){
grid->iCoorFromIindex(icoor,idx);
int ldx =rt+icoor[orthogdim]*rd;
int ij_ldx = mu+i*Ngamma+Ngamma*Lblock*j+Ngamma*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx]._internal;
}
}}}
}
std::cout << GridLogMessage << " Entering non parallel loop "<<std::endl;
for(int t=0;t<fd;t++)
{
int pt = t / ld; // processor plane
int lt = t % ld;
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
if (pt == grid->_processor_coor[orthogdim]){
int ij_dx = i + Lblock * j + Lblock * Rblock * lt;
mat[i+j*Lblock][t] = lsSum[ij_dx];
}
else{
mat[i+j*Lblock][t] = scalar_type(0.0);
}
}}
}
std::cout << GridLogMessage << " Done "<<std::endl;
// defer sum over nodes.
return;
}
template<class vobj>
void sliceInnerProductMesonFieldGamma1(std::vector< std::vector<ComplexD> > &mat,
const std::vector<Lattice<vobj> > &lhs,
const std::vector<Lattice<vobj> > &rhs,
int orthogdim,
std::vector<Gamma::Algebra> gammas)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef iSpinMatrix<vector_type> SpinMatrix_v;
typedef iSpinMatrix<scalar_type> SpinMatrix_s;
int Lblock = lhs.size();
int Rblock = rhs.size();
GridBase *grid = lhs[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
int Ngamma = gammas.size();
// int Nmom = mom.size();
assert(mat.size()==Lblock*Rblock*Ngamma);
for(int t=0;t<mat.size();t++){
assert(mat[t].size()==Nt);
}
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
int MFrvol = rd*Lblock*Rblock;
int MFlvol = ld*Lblock*Rblock;
int MFfvol = fd*Lblock*Rblock*Ngamma; // Do to dirac matrices here
Vector<SpinMatrix_v > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++){
lvSum[r] = zero;
}
Vector<SpinMatrix_s > lsSum(MFlvol);
parallel_for (int r = 0; r < MFlvol; r++){
lsSum[r]=scalar_type(0.0);
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
std::cout << GridLogMessage << " Entering first parallel loop "<<std::endl;
// Parallelise over t-direction doesn't expose as much parallelism as needed for KNL
parallel_for(int r=0;r<rd;r++){
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++){
auto left = conjugate(lhs[i]._odata[ss]);
for(int j=0;j<Rblock;j++){
SpinMatrix_v vv;
auto right = rhs[j]._odata[ss];
for(int s1=0;s1<Ns;s1++){
for(int s2=0;s2<Ns;s2++){
vv()(s1,s2)() = left()(s1)(0) * right()(s2)(0)
+ left()(s1)(1) * right()(s2)(1)
+ left()(s1)(2) * right()(s2)(2);
}}
int idx = i+Lblock*j+Lblock*Rblock*r;
lvSum[idx]=lvSum[idx]+vv;
}
}
}
}
}
std::cout << GridLogMessage << " Entering second parallel loop "<<std::endl;
// Sum across simd lanes in the plane, breaking out orthog dir.
parallel_for(int rt=0;rt<rd;rt++){
std::vector<int> icoor(Nd);
std::vector<SpinMatrix_s> extracted(Nsimd);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
int ij_rdx = i+Lblock*j+Lblock*Rblock*rt;
extract(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++){
grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = i+Lblock*j+Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
}
}}
}
std::cout << GridLogMessage << " Entering third parallel loop "<<std::endl;
parallel_for(int t=0;t<fd;t++)
{
int pt = t / ld; // processor plane
int lt = t % ld;
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
if (pt == grid->_processor_coor[orthogdim]){
int ij_dx = i + Lblock * j + Lblock * Rblock * lt;
for(int mu=0;mu<Ngamma;mu++){
mat[mu+i*Ngamma+j*Lblock*Ngamma][t] = trace(lsSum[ij_dx]*Gamma(gammas[mu]));
}
}
else{
for(int mu=0;mu<Ngamma;mu++){
mat[mu+i*Ngamma+j*Lblock*Ngamma][t] = scalar_type(0.0);
}
}
}}
}
std::cout << GridLogMessage << " Done "<<std::endl;
// defer sum over nodes.
return;
}
/*
template void sliceInnerProductMesonField<SpinColourVector>(std::vector< std::vector<ComplexD> > &mat,
const std::vector<Lattice<SpinColourVector> > &lhs,
@ -170,6 +458,31 @@ template void sliceInnerProductMesonField<SpinColourVector>(std::vector< std::ve
int orthogdim) ;
*/
std::vector<Gamma::Algebra> Gmu4 ( {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT });
std::vector<Gamma::Algebra> Gmu16 ( {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
});
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
@ -190,7 +503,7 @@ int main (int argc, char ** argv)
pRNG.SeedFixedIntegers(seeds);
const int Nm = 32; // number of all modes (high + low)
int Nm = atoi(argv[1]); // number of all modes (high + low)
std::vector<LatticeFermion> v(Nm,&Grid);
std::vector<LatticeFermion> w(Nm,&Grid);
@ -205,13 +518,14 @@ int main (int argc, char ** argv)
std::vector<ComplexD> ip(nt);
std::vector<std::vector<ComplexD> > MesonFields (Nm*Nm);
std::vector<std::vector<ComplexD> > MesonFields4 (Nm*Nm*4);
std::vector<std::vector<ComplexD> > MesonFields16 (Nm*Nm*16);
std::vector<std::vector<ComplexD> > MesonFieldsRef(Nm*Nm);
for(int i=0;i<Nm;i++) {
for(int j=0;j<Nm;j++) {
MesonFields [i+j*Nm].resize(nt);
MesonFieldsRef[i+j*Nm].resize(nt);
}}
for(int i=0;i<MesonFields.size();i++ ) MesonFields [i].resize(nt);
for(int i=0;i<MesonFieldsRef.size();i++) MesonFieldsRef[i].resize(nt);
for(int i=0;i<MesonFields4.size();i++ ) MesonFields4 [i].resize(nt);
for(int i=0;i<MesonFields16.size();i++ ) MesonFields16 [i].resize(nt);
GridLogMessage.TimingMode(1);
@ -225,15 +539,54 @@ int main (int argc, char ** argv)
}
}}
double t1 = usecond();
std::cout<<GridLogMessage << "Done "<< (t1-t0) <<" usecond " <<std::endl;
std::cout<<GridLogMessage << "Done "<< flops/(t1-t0) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t1-t0) <<" MB/s " <<std::endl;
std::cout<<GridLogMessage << "Running loop with new code for Nt="<<nt<<std::endl;
double t2 = usecond();
t0 = usecond();
sliceInnerProductMesonField(MesonFields,w,v,Tp);
double t3 = usecond();
std::cout<<GridLogMessage << "Done "<< flops/(t3-t2) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t3-t2) <<" MB/s " <<std::endl;
t1 = usecond();
std::cout<<GridLogMessage << "Done "<< (t1-t0) <<" usecond " <<std::endl;
std::cout<<GridLogMessage << "Done "<< flops/(t1-t0) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t1-t0) <<" MB/s " <<std::endl;
std::cout<<GridLogMessage << "Running loop with Four gammas code for Nt="<<nt<<std::endl;
flops = vol * (11.0 * 8.0 + 6.0) * Nm*Nm*4;
byte = vol * (12.0 * sizeof(Complex) ) * Nm*Nm
+ vol * ( 2.0 * sizeof(Complex) ) * Nm*Nm* 4;
t0 = usecond();
sliceInnerProductMesonFieldGamma(MesonFields4,w,v,Tp,Gmu4);
t1 = usecond();
std::cout<<GridLogMessage << "Done "<< (t1-t0) <<" usecond " <<std::endl;
std::cout<<GridLogMessage << "Done "<< flops/(t1-t0) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t1-t0) <<" MB/s " <<std::endl;
std::cout<<GridLogMessage << "Running loop with Sixteen gammas code for Nt="<<nt<<std::endl;
flops = vol * (11.0 * 8.0 + 6.0) * Nm*Nm*16;
byte = vol * (12.0 * sizeof(Complex) ) * Nm*Nm
+ vol * ( 2.0 * sizeof(Complex) ) * Nm*Nm* 16;
t0 = usecond();
sliceInnerProductMesonFieldGamma(MesonFields16,w,v,Tp,Gmu16);
t1 = usecond();
std::cout<<GridLogMessage << "Done "<< (t1-t0) <<" usecond " <<std::endl;
std::cout<<GridLogMessage << "Done "<< flops/(t1-t0) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t1-t0) <<" MB/s " <<std::endl;
std::cout<<GridLogMessage << "Running loop with Sixteen gammas code1 for Nt="<<nt<<std::endl;
flops = vol * ( 2 * 8.0 + 6.0) * Nm*Nm*16;
byte = vol * (12.0 * sizeof(Complex) ) * Nm*Nm
+ vol * ( 2.0 * sizeof(Complex) ) * Nm*Nm* 16;
t0 = usecond();
sliceInnerProductMesonFieldGamma1(MesonFields16,w,v,Tp,Gmu16);
t1 = usecond();
std::cout<<GridLogMessage << "Done "<< (t1-t0) <<" usecond " <<std::endl;
std::cout<<GridLogMessage << "Done "<< flops/(t1-t0) <<" mflops " <<std::endl;
std::cout<<GridLogMessage << "Done "<< byte /(t1-t0) <<" MB/s " <<std::endl;
RealD err = 0;
ComplexD diff;