mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Staggered multinode block cg debugged. Missing global sum.
Code stalls and resumes on KNL at cambridge. Curious. CG iterations 23ms each, then 3200 ms pauses. Mean bandwidth reports as 200MB/s. Comms dominant in the report. However, the time behaviour suggests it is *bursty*.... Could be swap to disk?
This commit is contained in:
parent
383ca7d392
commit
d9cd4f0273
@ -199,7 +199,12 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
|
|||||||
|
|
||||||
Linop.HermOp(X, AD);
|
Linop.HermOp(X, AD);
|
||||||
tmp = B - AD;
|
tmp = B - AD;
|
||||||
|
//std::cout << GridLogMessage << " initial tmp " << norm2(tmp)<< std::endl;
|
||||||
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
|
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
|
||||||
|
//std::cout << GridLogMessage << " initial Q " << norm2(Q)<< std::endl;
|
||||||
|
//std::cout << GridLogMessage << " m_rr " << m_rr<<std::endl;
|
||||||
|
//std::cout << GridLogMessage << " m_C " << m_C<<std::endl;
|
||||||
|
//std::cout << GridLogMessage << " m_Cinv " << m_Cinv<<std::endl;
|
||||||
D=Q;
|
D=Q;
|
||||||
|
|
||||||
std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
|
std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
|
||||||
@ -221,13 +226,15 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
|
|||||||
MatrixTimer.Start();
|
MatrixTimer.Start();
|
||||||
Linop.HermOp(D, Z);
|
Linop.HermOp(D, Z);
|
||||||
MatrixTimer.Stop();
|
MatrixTimer.Stop();
|
||||||
|
//std::cout << GridLogMessage << " norm2 Z " <<norm2(Z)<<std::endl;
|
||||||
|
|
||||||
//4. M = [D^dag Z]^{-1}
|
//4. M = [D^dag Z]^{-1}
|
||||||
sliceInnerTimer.Start();
|
sliceInnerTimer.Start();
|
||||||
sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
|
sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
|
||||||
sliceInnerTimer.Stop();
|
sliceInnerTimer.Stop();
|
||||||
m_M = m_DZ.inverse();
|
m_M = m_DZ.inverse();
|
||||||
|
//std::cout << GridLogMessage << " m_DZ " <<m_DZ<<std::endl;
|
||||||
|
|
||||||
//5. X = X + D MC
|
//5. X = X + D MC
|
||||||
m_tmp = m_M * m_C;
|
m_tmp = m_M * m_C;
|
||||||
sliceMaddTimer.Start();
|
sliceMaddTimer.Start();
|
||||||
|
@ -369,6 +369,7 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
inline GridBase *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Orthog)
|
inline GridBase *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Orthog)
|
||||||
{
|
{
|
||||||
int NN = BlockSolverGrid->_ndimension;
|
int NN = BlockSolverGrid->_ndimension;
|
||||||
@ -387,6 +388,7 @@ inline GridBase *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Or
|
|||||||
}
|
}
|
||||||
return (GridBase *)new GridCartesian(latt_phys,simd_phys,mpi_phys);
|
return (GridBase *)new GridCartesian(latt_phys,simd_phys,mpi_phys);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
template<class vobj>
|
template<class vobj>
|
||||||
static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,const Lattice<vobj> &Y,int Orthog,RealD scale=1.0)
|
static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,const Lattice<vobj> &Y,int Orthog,RealD scale=1.0)
|
||||||
@ -398,14 +400,15 @@ static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice
|
|||||||
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
||||||
|
|
||||||
GridBase *FullGrid = X._grid;
|
GridBase *FullGrid = X._grid;
|
||||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||||
|
|
||||||
Lattice<vobj> Xslice(SliceGrid);
|
// Lattice<vobj> Xslice(SliceGrid);
|
||||||
Lattice<vobj> Rslice(SliceGrid);
|
// Lattice<vobj> Rslice(SliceGrid);
|
||||||
|
|
||||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||||
int nh = FullGrid->_ndimension;
|
int nh = FullGrid->_ndimension;
|
||||||
int nl = SliceGrid->_ndimension;
|
// int nl = SliceGrid->_ndimension;
|
||||||
|
int nl = nh-1;
|
||||||
|
|
||||||
//FIXME package in a convenient iterator
|
//FIXME package in a convenient iterator
|
||||||
//Should loop over a plane orthogonal to direction "Orthog"
|
//Should loop over a plane orthogonal to direction "Orthog"
|
||||||
@ -448,14 +451,14 @@ static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<
|
|||||||
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
||||||
|
|
||||||
GridBase *FullGrid = X._grid;
|
GridBase *FullGrid = X._grid;
|
||||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||||
|
// Lattice<vobj> Xslice(SliceGrid);
|
||||||
Lattice<vobj> Xslice(SliceGrid);
|
// Lattice<vobj> Rslice(SliceGrid);
|
||||||
Lattice<vobj> Rslice(SliceGrid);
|
|
||||||
|
|
||||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||||
int nh = FullGrid->_ndimension;
|
int nh = FullGrid->_ndimension;
|
||||||
int nl = SliceGrid->_ndimension;
|
// int nl = SliceGrid->_ndimension;
|
||||||
|
int nl=1;
|
||||||
|
|
||||||
//FIXME package in a convenient iterator
|
//FIXME package in a convenient iterator
|
||||||
//Should loop over a plane orthogonal to direction "Orthog"
|
//Should loop over a plane orthogonal to direction "Orthog"
|
||||||
@ -498,18 +501,19 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
|||||||
typedef typename vobj::vector_type vector_type;
|
typedef typename vobj::vector_type vector_type;
|
||||||
|
|
||||||
GridBase *FullGrid = lhs._grid;
|
GridBase *FullGrid = lhs._grid;
|
||||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||||
|
|
||||||
int Nblock = FullGrid->GlobalDimensions()[Orthog];
|
int Nblock = FullGrid->GlobalDimensions()[Orthog];
|
||||||
|
|
||||||
Lattice<vobj> Lslice(SliceGrid);
|
// Lattice<vobj> Lslice(SliceGrid);
|
||||||
Lattice<vobj> Rslice(SliceGrid);
|
// Lattice<vobj> Rslice(SliceGrid);
|
||||||
|
|
||||||
mat = Eigen::MatrixXcd::Zero(Nblock,Nblock);
|
mat = Eigen::MatrixXcd::Zero(Nblock,Nblock);
|
||||||
|
|
||||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||||
int nh = FullGrid->_ndimension;
|
int nh = FullGrid->_ndimension;
|
||||||
int nl = SliceGrid->_ndimension;
|
// int nl = SliceGrid->_ndimension;
|
||||||
|
int nl = nh-1;
|
||||||
|
|
||||||
//FIXME package in a convenient iterator
|
//FIXME package in a convenient iterator
|
||||||
//Should loop over a plane orthogonal to direction "Orthog"
|
//Should loop over a plane orthogonal to direction "Orthog"
|
||||||
@ -550,6 +554,14 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
|||||||
mat += mat_thread;
|
mat += mat_thread;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for(int i=0;i<Nblock;i++){
|
||||||
|
for(int j=0;j<Nblock;j++){
|
||||||
|
ComplexD sum = mat(i,j);
|
||||||
|
FullGrid->GlobalSum(sum);
|
||||||
|
mat(i,j)=sum;
|
||||||
|
}}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,8 +230,15 @@ void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOr
|
|||||||
{
|
{
|
||||||
Compressor compressor;
|
Compressor compressor;
|
||||||
int LLs = in._grid->_rdimensions[0];
|
int LLs = in._grid->_rdimensions[0];
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
DhopTotalTime -= usecond();
|
||||||
|
DhopCommTime -= usecond();
|
||||||
st.HaloExchange(in,compressor);
|
st.HaloExchange(in,compressor);
|
||||||
|
DhopCommTime += usecond();
|
||||||
|
|
||||||
|
DhopComputeTime -= usecond();
|
||||||
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
||||||
if (dag == DaggerYes) {
|
if (dag == DaggerYes) {
|
||||||
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
@ -244,12 +251,15 @@ void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOr
|
|||||||
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
|
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
DhopComputeTime += usecond();
|
||||||
|
DhopTotalTime += usecond();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
||||||
{
|
{
|
||||||
|
DhopCalls+=1;
|
||||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
conformable(in._grid,out._grid); // drops the cb check
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
@ -261,6 +271,7 @@ void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionFie
|
|||||||
template<class Impl>
|
template<class Impl>
|
||||||
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||||
{
|
{
|
||||||
|
DhopCalls+=1;
|
||||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
conformable(in._grid,out._grid); // drops the cb check
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
@ -272,6 +283,7 @@ void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionFie
|
|||||||
template<class Impl>
|
template<class Impl>
|
||||||
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||||
{
|
{
|
||||||
|
DhopCalls+=2;
|
||||||
conformable(in._grid,FermionGrid()); // verifies full grid
|
conformable(in._grid,FermionGrid()); // verifies full grid
|
||||||
conformable(in._grid,out._grid);
|
conformable(in._grid,out._grid);
|
||||||
|
|
||||||
@ -280,6 +292,54 @@ void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField
|
|||||||
DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
|
DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Report(void)
|
||||||
|
{
|
||||||
|
std::vector<int> latt = GridDefaultLatt();
|
||||||
|
RealD volume = Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||||
|
RealD NP = _FourDimGrid->_Nprocessors;
|
||||||
|
RealD NN = _FourDimGrid->NodeCount();
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D Number of DhopEO Calls : "
|
||||||
|
<< DhopCalls << std::endl;
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D TotalTime /Calls : "
|
||||||
|
<< DhopTotalTime / DhopCalls << " us" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D CommTime /Calls : "
|
||||||
|
<< DhopCommTime / DhopCalls << " us" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D ComputeTime/Calls : "
|
||||||
|
<< DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||||
|
|
||||||
|
// Average the compute time
|
||||||
|
_FourDimGrid->GlobalSum(DhopComputeTime);
|
||||||
|
DhopComputeTime/=NP;
|
||||||
|
|
||||||
|
RealD mflops = 1154*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl;
|
||||||
|
|
||||||
|
RealD Fullmflops = 1154*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D Stencil" <<std::endl; Stencil.Report();
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||||
|
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D StencilOdd" <<std::endl; StencilOdd.Report();
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::ZeroCounters(void)
|
||||||
|
{
|
||||||
|
DhopCalls = 0;
|
||||||
|
DhopTotalTime = 0;
|
||||||
|
DhopCommTime = 0;
|
||||||
|
DhopComputeTime = 0;
|
||||||
|
Stencil.ZeroCounters();
|
||||||
|
StencilEven.ZeroCounters();
|
||||||
|
StencilOdd.ZeroCounters();
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////
|
||||||
// Implement the general interface. Here we use SAME mass on all slices
|
// Implement the general interface. Here we use SAME mass on all slices
|
||||||
|
@ -55,6 +55,16 @@ namespace QCD {
|
|||||||
FermionField _tmp;
|
FermionField _tmp;
|
||||||
FermionField &tmp(void) { return _tmp; }
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
////////////////////////////////////////
|
||||||
|
// Performance monitoring
|
||||||
|
////////////////////////////////////////
|
||||||
|
void Report(void);
|
||||||
|
void ZeroCounters(void);
|
||||||
|
double DhopTotalTime;
|
||||||
|
double DhopCalls;
|
||||||
|
double DhopCommTime;
|
||||||
|
double DhopComputeTime;
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
// Implement the abstract base
|
// Implement the abstract base
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
|
@ -75,7 +75,7 @@ int main (int argc, char ** argv)
|
|||||||
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
RealD mass=0.003;
|
RealD mass=0.003;
|
||||||
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass);
|
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass);
|
||||||
MdagMLinearOperator<ImprovedStaggeredFermion5DR,FermionField> HermOp(Ds);
|
MdagMLinearOperator<ImprovedStaggeredFermion5DR,FermionField> HermOp(Ds);
|
||||||
|
|
||||||
ConjugateGradient<FermionField> CG(1.0e-8,10000);
|
ConjugateGradient<FermionField> CG(1.0e-8,10000);
|
||||||
@ -99,21 +99,27 @@ int main (int argc, char ** argv)
|
|||||||
std::cout << GridLogMessage << " Calling 5d CG for "<<Ls <<" right hand sides" <<std::endl;
|
std::cout << GridLogMessage << " Calling 5d CG for "<<Ls <<" right hand sides" <<std::endl;
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
result=zero;
|
result=zero;
|
||||||
|
Ds.ZeroCounters();
|
||||||
CG(HermOp,src,result);
|
CG(HermOp,src,result);
|
||||||
|
Ds.Report();
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
|
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
std::cout << GridLogMessage << " Calling multiRHS CG for "<<Ls <<" right hand sides" <<std::endl;
|
std::cout << GridLogMessage << " Calling multiRHS CG for "<<Ls <<" right hand sides" <<std::endl;
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
result=zero;
|
result=zero;
|
||||||
|
Ds.ZeroCounters();
|
||||||
mCG(HermOp,src,result);
|
mCG(HermOp,src,result);
|
||||||
|
Ds.Report();
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
|
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
std::cout << GridLogMessage << " Calling Block CG for "<<Ls <<" right hand sides" <<std::endl;
|
std::cout << GridLogMessage << " Calling Block CG for "<<Ls <<" right hand sides" <<std::endl;
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
result=zero;
|
result=zero;
|
||||||
|
Ds.ZeroCounters();
|
||||||
BCGrQ(HermOp,src,result);
|
BCGrQ(HermOp,src,result);
|
||||||
|
Ds.Report();
|
||||||
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user