mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 05:54:32 +00:00 
			
		
		
		
	Updated to compile and run fast on CUDA
This commit is contained in:
		@@ -111,6 +111,7 @@ public:
 | 
			
		||||
    default:
 | 
			
		||||
      GRID_ASSERT(0);
 | 
			
		||||
    }
 | 
			
		||||
    return CUBLAS_COMPUTE_32F_FAST_16F;
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  // Force construct once
 | 
			
		||||
 
 | 
			
		||||
@@ -228,6 +228,11 @@ public:
 | 
			
		||||
  //
 | 
			
		||||
  void Project(Field &data,std::vector< typename Field::scalar_object > & projected_gdata)
 | 
			
		||||
  {
 | 
			
		||||
    double t_import=0;
 | 
			
		||||
    double t_export=0;
 | 
			
		||||
    double t_gemm  =0;
 | 
			
		||||
    double t_allreduce=0;
 | 
			
		||||
    t_import-=usecond();
 | 
			
		||||
    this->ImportVector(data);
 | 
			
		||||
 | 
			
		||||
    std::vector< typename Field::scalar_object > projected_planes;
 | 
			
		||||
@@ -243,12 +248,14 @@ public:
 | 
			
		||||
    acceleratorPut(Vd[0],Vh);
 | 
			
		||||
    acceleratorPut(Md[0],Mh);
 | 
			
		||||
    acceleratorPut(Pd[0],Ph);
 | 
			
		||||
    t_import+=usecond();
 | 
			
		||||
 | 
			
		||||
    GridBLAS BLAS;
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////
 | 
			
		||||
    // P_im = VMmx . Vxi
 | 
			
		||||
    /////////////////////////////////////////
 | 
			
		||||
    t_gemm-=usecond();
 | 
			
		||||
    BLAS.gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N, 
 | 
			
		||||
    		     words*nt,nmom,nxyz,
 | 
			
		||||
		     scalar(1.0),
 | 
			
		||||
@@ -257,8 +264,11 @@ public:
 | 
			
		||||
		     scalar(0.0),  // wipe out result
 | 
			
		||||
		     Pd);
 | 
			
		||||
    BLAS.synchronise();
 | 
			
		||||
    t_gemm+=usecond();
 | 
			
		||||
 | 
			
		||||
    t_export-=usecond();
 | 
			
		||||
    ExportMomentumProjection(projected_planes); // resizes
 | 
			
		||||
    t_export+=usecond();
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////
 | 
			
		||||
    // Reduce across MPI ranks
 | 
			
		||||
@@ -275,7 +285,15 @@ public:
 | 
			
		||||
      int st = grid->LocalStarts()[nd-1];
 | 
			
		||||
      projected_gdata[t+st + gt*m] = projected_planes[t+lt*m];
 | 
			
		||||
    }}
 | 
			
		||||
    t_allreduce-=usecond();
 | 
			
		||||
    grid->GlobalSumVector((scalar *)&projected_gdata[0],gt*nmom*words);
 | 
			
		||||
    t_allreduce+=usecond();
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogPerformance<<" MomentumProject t_import  "<<t_import<<"us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogPerformance<<" MomentumProject t_export  "<<t_export<<"us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogPerformance<<" MomentumProject t_gemm    "<<t_gemm<<"us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogPerformance<<" MomentumProject t_reduce  "<<t_allreduce<<"us"<<std::endl;
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -10,7 +10,7 @@ class LatticeBase {};
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void accelerator_inline conformable(GridBase *lhs,GridBase *rhs)
 | 
			
		||||
{
 | 
			
		||||
  GRID_ASSERT(lhs == rhs);
 | 
			
		||||
  assert(lhs == rhs);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 
 | 
			
		||||
@@ -222,7 +222,7 @@ public:
 | 
			
		||||
 | 
			
		||||
  #if 0
 | 
			
		||||
  static accelerator_inline typename SiteCloverTriangle::vector_type triangle_elem(const SiteCloverTriangle& triangle, int block, int i, int j) {
 | 
			
		||||
    GRID_ASSERT(i != j);
 | 
			
		||||
    assert(i != j);
 | 
			
		||||
    if(i < j) {
 | 
			
		||||
      return triangle()(block)(triangle_index(i, j));
 | 
			
		||||
    } else { // i > j
 | 
			
		||||
@@ -232,7 +232,7 @@ public:
 | 
			
		||||
  #else
 | 
			
		||||
  template<typename vobj>
 | 
			
		||||
  static accelerator_inline vobj triangle_elem(const iImplCloverTriangle<vobj>& triangle, int block, int i, int j) {
 | 
			
		||||
    GRID_ASSERT(i != j);
 | 
			
		||||
    assert(i != j);
 | 
			
		||||
    if(i < j) {
 | 
			
		||||
      return triangle()(block)(triangle_index(i, j));
 | 
			
		||||
    } else { // i > j
 | 
			
		||||
 
 | 
			
		||||
@@ -62,7 +62,7 @@ public:
 | 
			
		||||
			 const FermionField *rhs_vj,
 | 
			
		||||
			 std::vector<Gamma::Algebra> gammas,
 | 
			
		||||
			 const std::vector<ComplexField > &mom,
 | 
			
		||||
			 int orthogdim, double *t_kernel = nullptr, double *t_gsum = nullptr);
 | 
			
		||||
			 int orthogdim);
 | 
			
		||||
 | 
			
		||||
  template <typename TensorType> // output: rank 5 tensor, e.g. Eigen::Tensor<ComplexD, 5>
 | 
			
		||||
  static void AslashField(TensorType &mat, 
 | 
			
		||||
@@ -70,7 +70,7 @@ public:
 | 
			
		||||
        const FermionField *rhs_vj,
 | 
			
		||||
        const std::vector<ComplexField> &emB0,
 | 
			
		||||
        const std::vector<ComplexField> &emB1,
 | 
			
		||||
        int orthogdim, double *t_kernel = nullptr, double *t_gsum = nullptr);
 | 
			
		||||
        int orthogdim);
 | 
			
		||||
 | 
			
		||||
  template <typename TensorType>
 | 
			
		||||
  typename std::enable_if<(std::is_same<Eigen::Tensor<ComplexD,3>, TensorType>::value ||
 | 
			
		||||
@@ -136,7 +136,7 @@ typedef iVecComplex<vComplex >             vVecComplex;
 | 
			
		||||
typedef Lattice<vVecComplex>               LatticeVecComplex;
 | 
			
		||||
 | 
			
		||||
#define A2A_GPU_KERNELS
 | 
			
		||||
#ifdef A2A_GPU_KERNELS
 | 
			
		||||
 | 
			
		||||
template <class FImpl>
 | 
			
		||||
template <typename TensorType>
 | 
			
		||||
void A2Autils<FImpl>::MesonField(TensorType &mat, 
 | 
			
		||||
@@ -144,7 +144,7 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
 | 
			
		||||
				 const FermionField *rhs_vj,
 | 
			
		||||
				 std::vector<Gamma::Algebra> gammas,
 | 
			
		||||
				 const std::vector<ComplexField > &mom,
 | 
			
		||||
				 int orthogdim, double *t_kernel, double *t_gsum) 
 | 
			
		||||
				 int orthogdim) 
 | 
			
		||||
{
 | 
			
		||||
  const int block=A2Ablocking;
 | 
			
		||||
  typedef typename FImpl::SiteSpinor vobj;
 | 
			
		||||
@@ -173,24 +173,34 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
 | 
			
		||||
 | 
			
		||||
  std::cout <<GridLogMessage<< "A2A Meson Field"<<std::endl;
 | 
			
		||||
  MomentumProject<LatticeVecSpinMatrix,ComplexField> MP;
 | 
			
		||||
  std::cout <<GridLogMessage<< "Momentum project constructed"<<std::endl;
 | 
			
		||||
  MP.Allocate(Nmom,grid);
 | 
			
		||||
  std::cout <<GridLogMessage<< "Momentum project allocated"<<std::endl;
 | 
			
		||||
  MP.ImportMomenta(mom);
 | 
			
		||||
  std::cout <<GridLogMessage<< "Momentum project momenta imported"<<std::endl;
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
  double t_view, t_gamma, t_kernel, t_momproj;
 | 
			
		||||
  t_view=0;
 | 
			
		||||
  t_gamma=0;
 | 
			
		||||
  t_kernel=0;
 | 
			
		||||
  t_momproj=0;
 | 
			
		||||
  
 | 
			
		||||
  
 | 
			
		||||
  std::vector<VecSpinMatrix> sliced;
 | 
			
		||||
  for(int i=0;i<Lblock;i++){
 | 
			
		||||
    t_view -= usecond();
 | 
			
		||||
    autoView(SpinMat_v,SpinMat,AcceleratorWrite);
 | 
			
		||||
    autoView(lhs_v,lhs_wi[i],AcceleratorRead);
 | 
			
		||||
    t_view += usecond();
 | 
			
		||||
    for(int jo=0;jo<Rblock;jo+=block){
 | 
			
		||||
      for(int j=jo;j<MIN(Rblock,jo+block);j++){
 | 
			
		||||
	int jj=j%block;
 | 
			
		||||
	t_view -= usecond();
 | 
			
		||||
	autoView(rhs_v,rhs_vj[j],AcceleratorRead); // Create a vector of views
 | 
			
		||||
	t_view += usecond();
 | 
			
		||||
	//////////////////////////////////////////
 | 
			
		||||
	// Should write a SpinOuterColorTrace
 | 
			
		||||
	//////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
	t_kernel -= usecond();
 | 
			
		||||
	accelerator_for(ss,grid->oSites(),(size_t)Nsimd,{
 | 
			
		||||
	    auto left = conjugate(lhs_v(ss));
 | 
			
		||||
	    auto right = rhs_v(ss);
 | 
			
		||||
@@ -203,48 +213,38 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
 | 
			
		||||
	      }}
 | 
			
		||||
	    coalescedWrite(SpinMat_v[ss],vv);
 | 
			
		||||
	  });
 | 
			
		||||
	t_kernel += usecond();
 | 
			
		||||
 | 
			
		||||
      }// j within block
 | 
			
		||||
      // After getting the sitewise product do the mom phase loop
 | 
			
		||||
#if 1
 | 
			
		||||
      std::cout <<GridLogMessage<< "A2A contract "<<std::endl;
 | 
			
		||||
 | 
			
		||||
      assert(orthogdim==Nd-1);
 | 
			
		||||
      t_momproj -= usecond();
 | 
			
		||||
      MP.Project(SpinMat,sliced);
 | 
			
		||||
      std::cout <<GridLogMessage<< "A2A MP Project "<<std::endl;
 | 
			
		||||
      for(int m=0;m<Nmom;m++){
 | 
			
		||||
	for(int t=0;t<Nt;t++){
 | 
			
		||||
      t_momproj +=  usecond();
 | 
			
		||||
 | 
			
		||||
      t_gamma -=  usecond();
 | 
			
		||||
      thread_for2d( m, Nmom,t,Nt,{
 | 
			
		||||
	  //      for(int m=0;m<Nmom;m++)
 | 
			
		||||
	  //	for(int t=0;t<Nt;t++)
 | 
			
		||||
	  int idx = t+m*Nt;
 | 
			
		||||
	  for(int j=jo;j<MIN(Rblock,jo+block);j++){
 | 
			
		||||
	    int jj=j%block;
 | 
			
		||||
	    auto tmp = peekIndex<LorentzIndex>(sliced[idx],jj);
 | 
			
		||||
	    for(int mu=0;mu<Ngamma;mu++){
 | 
			
		||||
	      auto trSG = trace(tmp*Gamma(gammas[mu]));
 | 
			
		||||
	      mat(m,mu,t,i,j) = trSG()();
 | 
			
		||||
	      mat((long)m,mu,(long)t,i,j) = trSG()();
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }      
 | 
			
		||||
#else
 | 
			
		||||
      for(int m=0;m<Nmom;m++){
 | 
			
		||||
 | 
			
		||||
	MomSpinMat   = SpinMat * mom[m];
 | 
			
		||||
 | 
			
		||||
	sliceSum(MomSpinMat,sliced,orthogdim);
 | 
			
		||||
 | 
			
		||||
	for(int mu=0;mu<Ngamma;mu++){
 | 
			
		||||
	  for(int t=0;t<sliced.size();t++){
 | 
			
		||||
	    for(int j=jo;j<MIN(Rblock,jo+block);j++){
 | 
			
		||||
	      int jj=j%block;
 | 
			
		||||
	      auto tmp = peekIndex<LorentzIndex>(sliced[t],jj);
 | 
			
		||||
	      auto trSG = trace(tmp*Gamma(gammas[mu]));
 | 
			
		||||
	      mat(m,mu,t,i,j) = trSG()();
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
#endif
 | 
			
		||||
      }); 
 | 
			
		||||
      t_gamma +=  usecond();
 | 
			
		||||
    }//jo
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogMessage<<" A2A::MesonField t_view    "<<t_view/1e6<<"s"<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<<" A2A::MesonField t_momproj "<<t_momproj/1e6<<"s"<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<<" A2A::MesonField t_kernel  "<<t_kernel/1e6<<"s"<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<<" A2A::MesonField t_gamma   "<<t_gamma/1e6<<"s"<<std::endl;
 | 
			
		||||
  
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// "A-slash" field w_i(x)^dag * i * A_mu * gamma_mu * v_j(x)
 | 
			
		||||
@@ -268,7 +268,7 @@ void A2Autils<FImpl>::AslashField(TensorType &mat,
 | 
			
		||||
				  const FermionField *rhs_vj,
 | 
			
		||||
				  const std::vector<ComplexField> &emB0,
 | 
			
		||||
				  const std::vector<ComplexField> &emB1,
 | 
			
		||||
				  int orthogdim, double *t_kernel, double *t_gsum) 
 | 
			
		||||
				  int orthogdim) 
 | 
			
		||||
{
 | 
			
		||||
  const int block=A2Ablocking;
 | 
			
		||||
  typedef typename FImpl::SiteSpinor vobj;
 | 
			
		||||
@@ -355,354 +355,6 @@ void A2Autils<FImpl>::AslashField(TensorType &mat,
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
template <class FImpl>
 | 
			
		||||
template <typename TensorType>
 | 
			
		||||
void A2Autils<FImpl>::MesonField(TensorType &mat, 
 | 
			
		||||
				 const FermionField *lhs_wi,
 | 
			
		||||
				 const FermionField *rhs_vj,
 | 
			
		||||
				 std::vector<Gamma::Algebra> gammas,
 | 
			
		||||
				 const std::vector<ComplexField > &mom,
 | 
			
		||||
				 int orthogdim, double *t_kernel, double *t_gsum) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename FImpl::SiteSpinor vobj;
 | 
			
		||||
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  typedef typename vobj::scalar_type scalar_type;
 | 
			
		||||
  typedef typename vobj::vector_type vector_type;
 | 
			
		||||
 | 
			
		||||
  typedef iSpinMatrix<vector_type> SpinMatrix_v;
 | 
			
		||||
  typedef iSpinMatrix<scalar_type> SpinMatrix_s;
 | 
			
		||||
  
 | 
			
		||||
  int Lblock = mat.dimension(3); 
 | 
			
		||||
  int Rblock = mat.dimension(4);
 | 
			
		||||
 | 
			
		||||
  GridBase *grid = lhs_wi[0].Grid();
 | 
			
		||||
  
 | 
			
		||||
  const int    Nd = grid->_ndimension;
 | 
			
		||||
  const int Nsimd = grid->Nsimd();
 | 
			
		||||
 | 
			
		||||
  int Nt     = grid->GlobalDimensions()[orthogdim];
 | 
			
		||||
  int Ngamma = gammas.size();
 | 
			
		||||
  int Nmom   = mom.size();
 | 
			
		||||
 | 
			
		||||
  int fd=grid->_fdimensions[orthogdim];
 | 
			
		||||
  int ld=grid->_ldimensions[orthogdim];
 | 
			
		||||
  int rd=grid->_rdimensions[orthogdim];
 | 
			
		||||
 | 
			
		||||
  // will locally sum vectors first
 | 
			
		||||
  // sum across these down to scalars
 | 
			
		||||
  // splitting the SIMD
 | 
			
		||||
  int MFrvol = rd*Lblock*Rblock*Nmom;
 | 
			
		||||
  int MFlvol = ld*Lblock*Rblock*Nmom;
 | 
			
		||||
 | 
			
		||||
  std::vector<SpinMatrix_v > lvSum(MFrvol);
 | 
			
		||||
  for(int r=0;r<MFrvol;r++){
 | 
			
		||||
    lvSum[r] = Zero();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::vector<SpinMatrix_s > lsSum(MFlvol);             
 | 
			
		||||
  for(int r=0;r<MFlvol;r++){
 | 
			
		||||
    lsSum[r]=scalar_type(0.0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  int e1=    grid->_slice_nblock[orthogdim];
 | 
			
		||||
  int e2=    grid->_slice_block [orthogdim];
 | 
			
		||||
  int stride=grid->_slice_stride[orthogdim];
 | 
			
		||||
 | 
			
		||||
  // potentially wasting cores here if local time extent too small
 | 
			
		||||
  if (t_kernel) *t_kernel = -usecond();
 | 
			
		||||
  for(int r=0;r<rd;r++) {
 | 
			
		||||
 | 
			
		||||
    int so=r*grid->_ostride[orthogdim]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
 | 
			
		||||
	int ss= so+n*stride+b;
 | 
			
		||||
 | 
			
		||||
	for(int i=0;i<Lblock;i++){
 | 
			
		||||
 | 
			
		||||
	  // Recreate view potentially expensive outside fo UVM mode
 | 
			
		||||
	  autoView(lhs_v,lhs_wi[i],CpuRead);
 | 
			
		||||
	  auto left = conjugate(lhs_v[ss]);
 | 
			
		||||
	  for(int j=0;j<Rblock;j++){
 | 
			
		||||
 | 
			
		||||
	    SpinMatrix_v vv;
 | 
			
		||||
	    // Recreate view potentially expensive outside fo UVM mode
 | 
			
		||||
	    autoView(rhs_v,rhs_vj[j],CpuRead);
 | 
			
		||||
	    auto right = rhs_v[ss];
 | 
			
		||||
	    for(int s1=0;s1<Ns;s1++){
 | 
			
		||||
	    for(int s2=0;s2<Ns;s2++){
 | 
			
		||||
	      vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
 | 
			
		||||
		+             left()(s2)(1) * right()(s1)(1)
 | 
			
		||||
		+             left()(s2)(2) * right()(s1)(2);
 | 
			
		||||
	    }}
 | 
			
		||||
	    
 | 
			
		||||
	    // After getting the sitewise product do the mom phase loop
 | 
			
		||||
	    int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r;
 | 
			
		||||
	    for ( int m=0;m<Nmom;m++){
 | 
			
		||||
	      int idx = m+base;
 | 
			
		||||
	      autoView(mom_v,mom[m],CpuRead);
 | 
			
		||||
	      auto phase = mom_v[ss];
 | 
			
		||||
	      mac(&lvSum[idx],&vv,&phase);
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  // Sum across simd lanes in the plane, breaking out orthog dir.
 | 
			
		||||
  for(int rt=0;rt<rd;rt++){
 | 
			
		||||
 | 
			
		||||
    Coordinate icoor(Nd);
 | 
			
		||||
    ExtractBuffer<SpinMatrix_s> extracted(Nsimd);               
 | 
			
		||||
 | 
			
		||||
    for(int i=0;i<Lblock;i++){
 | 
			
		||||
    for(int j=0;j<Rblock;j++){
 | 
			
		||||
    for(int m=0;m<Nmom;m++){
 | 
			
		||||
 | 
			
		||||
      int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt;
 | 
			
		||||
 | 
			
		||||
      extract(lvSum[ij_rdx],extracted);
 | 
			
		||||
 | 
			
		||||
      for(int idx=0;idx<Nsimd;idx++){
 | 
			
		||||
 | 
			
		||||
	grid->iCoorFromIindex(icoor,idx);
 | 
			
		||||
 | 
			
		||||
	int ldx    = rt+icoor[orthogdim]*rd;
 | 
			
		||||
 | 
			
		||||
	int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx;
 | 
			
		||||
 | 
			
		||||
	lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }}}
 | 
			
		||||
  }
 | 
			
		||||
  if (t_kernel) *t_kernel += usecond();
 | 
			
		||||
  GRID_ASSERT(mat.dimension(0) == Nmom);
 | 
			
		||||
  GRID_ASSERT(mat.dimension(1) == Ngamma);
 | 
			
		||||
  GRID_ASSERT(mat.dimension(2) == Nt);
 | 
			
		||||
 | 
			
		||||
  // ld loop and local only??
 | 
			
		||||
  int pd = grid->_processors[orthogdim];
 | 
			
		||||
  int pc = grid->_processor_coor[orthogdim];
 | 
			
		||||
  thread_for_collapse(2,lt,ld,{
 | 
			
		||||
    for(int pt=0;pt<pd;pt++){
 | 
			
		||||
      int t = lt + pt*ld;
 | 
			
		||||
      if (pt == pc){
 | 
			
		||||
	for(int i=0;i<Lblock;i++){
 | 
			
		||||
	  for(int j=0;j<Rblock;j++){
 | 
			
		||||
	    for(int m=0;m<Nmom;m++){
 | 
			
		||||
	      int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt;
 | 
			
		||||
	      for(int mu=0;mu<Ngamma;mu++){
 | 
			
		||||
		// this is a bit slow
 | 
			
		||||
		mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gammas[mu]))()()();
 | 
			
		||||
	      }
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      } else { 
 | 
			
		||||
	const scalar_type zz(0.0);
 | 
			
		||||
	for(int i=0;i<Lblock;i++){
 | 
			
		||||
	  for(int j=0;j<Rblock;j++){
 | 
			
		||||
	    for(int mu=0;mu<Ngamma;mu++){
 | 
			
		||||
	      for(int m=0;m<Nmom;m++){
 | 
			
		||||
		mat(m,mu,t,i,j) =zz;
 | 
			
		||||
	      }
 | 
			
		||||
	    }
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  });
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // This global sum is taking as much as 50% of time on 16 nodes
 | 
			
		||||
  // Vector size is 7 x 16 x 32 x 16 x 16 x sizeof(complex) = 2MB - 60MB depending on volume
 | 
			
		||||
  // Healthy size that should suffice
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////
 | 
			
		||||
  if (t_gsum) *t_gsum = -usecond();
 | 
			
		||||
  grid->GlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock);
 | 
			
		||||
  if (t_gsum) *t_gsum += usecond();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class FImpl>
 | 
			
		||||
template <typename TensorType>
 | 
			
		||||
void A2Autils<FImpl>::AslashField(TensorType &mat, 
 | 
			
		||||
				  const FermionField *lhs_wi,
 | 
			
		||||
				  const FermionField *rhs_vj,
 | 
			
		||||
				  const std::vector<ComplexField> &emB0,
 | 
			
		||||
				  const std::vector<ComplexField> &emB1,
 | 
			
		||||
				  int orthogdim, double *t_kernel, double *t_gsum) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename FermionField::vector_object vobj;
 | 
			
		||||
  typedef typename vobj::scalar_object         sobj;
 | 
			
		||||
  typedef typename vobj::scalar_type           scalar_type;
 | 
			
		||||
  typedef typename vobj::vector_type           vector_type;
 | 
			
		||||
 | 
			
		||||
  typedef iSpinMatrix<vector_type> SpinMatrix_v;
 | 
			
		||||
  typedef iSpinMatrix<scalar_type> SpinMatrix_s;
 | 
			
		||||
  typedef iSinglet<vector_type>    Singlet_v;
 | 
			
		||||
  typedef iSinglet<scalar_type>    Singlet_s;
 | 
			
		||||
    
 | 
			
		||||
  int Lblock = mat.dimension(3); 
 | 
			
		||||
  int Rblock = mat.dimension(4);
 | 
			
		||||
  
 | 
			
		||||
  GridBase *grid = lhs_wi[0].Grid();
 | 
			
		||||
  
 | 
			
		||||
  const int    Nd = grid->_ndimension;
 | 
			
		||||
  const int Nsimd = grid->Nsimd();
 | 
			
		||||
 | 
			
		||||
  int Nt  = grid->GlobalDimensions()[orthogdim];
 | 
			
		||||
  int Nem = emB0.size();
 | 
			
		||||
  GRID_ASSERT(emB1.size() == Nem);
 | 
			
		||||
 | 
			
		||||
  int fd=grid->_fdimensions[orthogdim];
 | 
			
		||||
  int ld=grid->_ldimensions[orthogdim];
 | 
			
		||||
  int rd=grid->_rdimensions[orthogdim];
 | 
			
		||||
  
 | 
			
		||||
    // will locally sum vectors first
 | 
			
		||||
    // sum across these down to scalars
 | 
			
		||||
    // splitting the SIMD
 | 
			
		||||
    int MFrvol = rd*Lblock*Rblock*Nem;
 | 
			
		||||
    int MFlvol = ld*Lblock*Rblock*Nem;
 | 
			
		||||
 | 
			
		||||
    std::vector<vector_type> lvSum(MFrvol);
 | 
			
		||||
    thread_for(r,MFrvol,
 | 
			
		||||
    {
 | 
			
		||||
      lvSum[r] = Zero();
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    std::vector<scalar_type> lsSum(MFlvol);             
 | 
			
		||||
    thread_for(r,MFlvol,
 | 
			
		||||
    {
 | 
			
		||||
        lsSum[r] = scalar_type(0.0);
 | 
			
		||||
    });
 | 
			
		||||
 | 
			
		||||
    int e1=    grid->_slice_nblock[orthogdim];
 | 
			
		||||
    int e2=    grid->_slice_block [orthogdim];
 | 
			
		||||
    int stride=grid->_slice_stride[orthogdim];
 | 
			
		||||
 | 
			
		||||
    // Nested parallelism would be ok
 | 
			
		||||
    // Wasting cores here. Test case r
 | 
			
		||||
    if (t_kernel) *t_kernel = -usecond();
 | 
			
		||||
    for(int r=0;r<rd;r++)
 | 
			
		||||
    {
 | 
			
		||||
        int so=r*grid->_ostride[orthogdim]; // base offset for start of plane 
 | 
			
		||||
 | 
			
		||||
        for(int n=0;n<e1;n++)
 | 
			
		||||
        for(int b=0;b<e2;b++)
 | 
			
		||||
        {
 | 
			
		||||
            int ss= so+n*stride+b;
 | 
			
		||||
 | 
			
		||||
            for(int i=0;i<Lblock;i++)
 | 
			
		||||
            {
 | 
			
		||||
  	        autoView(wi_v,lhs_wi[i],CpuRead);
 | 
			
		||||
                auto left = conjugate(wi_v[ss]);
 | 
			
		||||
 | 
			
		||||
                for(int j=0;j<Rblock;j++)
 | 
			
		||||
                {
 | 
			
		||||
                    SpinMatrix_v vv;
 | 
			
		||||
		    autoView(vj_v,rhs_vj[j],CpuRead);
 | 
			
		||||
                    auto right = vj_v[ss];
 | 
			
		||||
 | 
			
		||||
                    for(int s1=0;s1<Ns;s1++)
 | 
			
		||||
                    for(int s2=0;s2<Ns;s2++)
 | 
			
		||||
                    {
 | 
			
		||||
		          vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
 | 
			
		||||
                                        + left()(s2)(1) * right()(s1)(1)
 | 
			
		||||
                                        + left()(s2)(2) * right()(s1)(2);
 | 
			
		||||
                    }
 | 
			
		||||
 | 
			
		||||
		    // After getting the sitewise product do the mom phase loop
 | 
			
		||||
                    int base = Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*r;
 | 
			
		||||
 | 
			
		||||
                    for ( int m=0;m<Nem;m++)
 | 
			
		||||
                    {
 | 
			
		||||
  		        autoView(emB0_v,emB0[m],CpuRead);
 | 
			
		||||
		        autoView(emB1_v,emB1[m],CpuRead);
 | 
			
		||||
                        int idx  = m+base;
 | 
			
		||||
                        auto b0  = emB0_v[ss];
 | 
			
		||||
                        auto b1  = emB1_v[ss];
 | 
			
		||||
                        auto cb0 = conjugate(b0);
 | 
			
		||||
                        auto cb1 = conjugate(b1);
 | 
			
		||||
 | 
			
		||||
                        lvSum[idx] += - vv()(3,0)()*b0()()()  - vv()(2,0)()*cb1()()()
 | 
			
		||||
                                      + vv()(3,1)()*b1()()()  - vv()(2,1)()*cb0()()()
 | 
			
		||||
                                      + vv()(0,2)()*b1()()()  + vv()(1,2)()*b0()()()
 | 
			
		||||
                                      + vv()(0,3)()*cb0()()() - vv()(1,3)()*cb1()()();
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Sum across simd lanes in the plane, breaking out orthog dir.
 | 
			
		||||
    thread_for(rt,rd,
 | 
			
		||||
    {
 | 
			
		||||
        Coordinate icoor(Nd);
 | 
			
		||||
        ExtractBuffer<scalar_type> extracted(Nsimd);               
 | 
			
		||||
 | 
			
		||||
        for(int i=0;i<Lblock;i++)
 | 
			
		||||
        for(int j=0;j<Rblock;j++)
 | 
			
		||||
        for(int m=0;m<Nem;m++)
 | 
			
		||||
        {
 | 
			
		||||
 | 
			
		||||
            int ij_rdx = m+Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*rt;
 | 
			
		||||
 | 
			
		||||
            extract<vector_type,scalar_type>(lvSum[ij_rdx],extracted);
 | 
			
		||||
            for(int idx=0;idx<Nsimd;idx++)
 | 
			
		||||
            {
 | 
			
		||||
                grid->iCoorFromIindex(icoor,idx);
 | 
			
		||||
 | 
			
		||||
                int ldx    = rt+icoor[orthogdim]*rd;
 | 
			
		||||
                int ij_ldx = m+Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*ldx;
 | 
			
		||||
 | 
			
		||||
                lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    });
 | 
			
		||||
    if (t_kernel) *t_kernel += usecond();
 | 
			
		||||
 | 
			
		||||
    // ld loop and local only??
 | 
			
		||||
    int pd = grid->_processors[orthogdim];
 | 
			
		||||
    int pc = grid->_processor_coor[orthogdim];
 | 
			
		||||
    thread_for_collapse(2,lt,ld,
 | 
			
		||||
    {
 | 
			
		||||
        for(int pt=0;pt<pd;pt++)
 | 
			
		||||
        {
 | 
			
		||||
            int t = lt + pt*ld;
 | 
			
		||||
            if (pt == pc)
 | 
			
		||||
            {
 | 
			
		||||
                for(int i=0;i<Lblock;i++)
 | 
			
		||||
                for(int j=0;j<Rblock;j++)
 | 
			
		||||
                for(int m=0;m<Nem;m++)
 | 
			
		||||
                {
 | 
			
		||||
                    int ij_dx = m+Nem*i + Nem*Lblock * j + Nem*Lblock * Rblock * lt;
 | 
			
		||||
 | 
			
		||||
                    mat(m,0,t,i,j) = lsSum[ij_dx];
 | 
			
		||||
                }
 | 
			
		||||
            } 
 | 
			
		||||
            else 
 | 
			
		||||
            { 
 | 
			
		||||
                const scalar_type zz(0.0);
 | 
			
		||||
 | 
			
		||||
                for(int i=0;i<Lblock;i++)
 | 
			
		||||
                for(int j=0;j<Rblock;j++)
 | 
			
		||||
                for(int m=0;m<Nem;m++)
 | 
			
		||||
                {
 | 
			
		||||
                    mat(m,0,t,i,j) = zz;
 | 
			
		||||
                }
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
    });
 | 
			
		||||
    if (t_gsum) *t_gsum = -usecond();
 | 
			
		||||
    grid->GlobalSumVector(&mat(0,0,0,0,0),Nem*Nt*Lblock*Rblock);
 | 
			
		||||
    if (t_gsum) *t_gsum += usecond();
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Schematic thoughts about more generalised four quark insertion
 | 
			
		||||
//
 | 
			
		||||
 
 | 
			
		||||
@@ -119,7 +119,7 @@ static void generatorDiagonal(int diagIndex, iGroupMatrix<cplx> &ta) {
 | 
			
		||||
// Map a su2 subgroup number to the pair of rows that are non zero
 | 
			
		||||
////////////////////////////////////////////////////////////////////////
 | 
			
		||||
static accelerator_inline void su2SubGroupIndex(int &i1, int &i2, int su2_index, GroupName::SU) {
 | 
			
		||||
  GRID_ASSERT((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
 | 
			
		||||
  assert((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
 | 
			
		||||
 | 
			
		||||
  int spare = su2_index;
 | 
			
		||||
  for (i1 = 0; spare >= (ncolour - 1 - i1); i1++) {
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ namespace Grid{
 | 
			
		||||
    static accelerator_inline void IndexFromCoor (const coor_t& coor,int &index,const coor_t &dims){
 | 
			
		||||
      int64_t index64;
 | 
			
		||||
      IndexFromCoor(coor,index64,dims);
 | 
			
		||||
      GRID_ASSERT(index64<2*1024*1024*1024LL);
 | 
			
		||||
      assert(index64<2*1024*1024*1024LL);
 | 
			
		||||
      index = (int) index64;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -292,7 +292,7 @@ AC_ARG_ENABLE([accelerator],
 | 
			
		||||
case ${ac_ACCELERATOR} in
 | 
			
		||||
    cuda)
 | 
			
		||||
      echo CUDA acceleration
 | 
			
		||||
      LIBS="${LIBS} -lcuda"
 | 
			
		||||
      LIBS="${LIBS} -lcuda -lcublas"
 | 
			
		||||
      AC_DEFINE([GRID_CUDA],[1],[Use CUDA offload]);;
 | 
			
		||||
    sycl)
 | 
			
		||||
      echo SYCL acceleration
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user