mirror of
				https://github.com/paboyle/Grid.git
				synced 2025-11-04 05:54:32 +00:00 
			
		
		
		
	Merge branch 'develop' into feature/hadrons
This commit is contained in:
		@@ -102,5 +102,5 @@ script:
 | 
			
		||||
    - ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto
 | 
			
		||||
    - make -j4
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
 | 
			
		||||
    - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then mpirun -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -48,9 +48,9 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
 | 
			
		||||
  int maxlat=16;
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=2){
 | 
			
		||||
    for(int Ls=1;Ls<=16;Ls*=2){
 | 
			
		||||
  int maxlat=24;
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=4){
 | 
			
		||||
    for(int Ls=8;Ls<=32;Ls*=2){
 | 
			
		||||
 | 
			
		||||
      std::vector<int> latt_size  ({lat*mpi_layout[0],
 | 
			
		||||
      				    lat*mpi_layout[1],
 | 
			
		||||
@@ -124,8 +124,8 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=2){
 | 
			
		||||
    for(int Ls=1;Ls<=16;Ls*=2){
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=4){
 | 
			
		||||
    for(int Ls=8;Ls<=32;Ls*=2){
 | 
			
		||||
 | 
			
		||||
      std::vector<int> latt_size  ({lat,lat,lat,lat});
 | 
			
		||||
 | 
			
		||||
@@ -194,14 +194,14 @@ int main (int argc, char ** argv)
 | 
			
		||||
  }  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  Nloop=100;
 | 
			
		||||
  Nloop=10;
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
 | 
			
		||||
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=2){
 | 
			
		||||
    for(int Ls=1;Ls<=16;Ls*=2){
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=4){
 | 
			
		||||
    for(int Ls=8;Ls<=32;Ls*=2){
 | 
			
		||||
 | 
			
		||||
      std::vector<int> latt_size  ({lat*mpi_layout[0],
 | 
			
		||||
      				    lat*mpi_layout[1],
 | 
			
		||||
@@ -281,8 +281,8 @@ int main (int argc, char ** argv)
 | 
			
		||||
  std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "  L  "<<"\t\t"<<" Ls  "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
 | 
			
		||||
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=2){
 | 
			
		||||
    for(int Ls=1;Ls<=16;Ls*=2){
 | 
			
		||||
  for(int lat=4;lat<=maxlat;lat+=4){
 | 
			
		||||
    for(int Ls=8;Ls<=32;Ls*=2){
 | 
			
		||||
 | 
			
		||||
      std::vector<int> latt_size  ({lat*mpi_layout[0],
 | 
			
		||||
      				    lat*mpi_layout[1],
 | 
			
		||||
@@ -324,8 +324,8 @@ int main (int argc, char ** argv)
 | 
			
		||||
					    (void *)&rbuf[mu][0],
 | 
			
		||||
					    recv_from_rank,
 | 
			
		||||
					    bytes);
 | 
			
		||||
	    //	    Grid.StencilSendToRecvFromComplete(requests);
 | 
			
		||||
	    //	    requests.resize(0);
 | 
			
		||||
	    Grid.StencilSendToRecvFromComplete(requests);
 | 
			
		||||
	    requests.resize(0);
 | 
			
		||||
 | 
			
		||||
	    comm_proc = mpi_layout[mu]-1;
 | 
			
		||||
	  
 | 
			
		||||
 
 | 
			
		||||
@@ -48,16 +48,16 @@ typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR;
 | 
			
		||||
typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF;
 | 
			
		||||
typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  int threads = GridThread::GetThreads();
 | 
			
		||||
  std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt4 = GridDefaultLatt();
 | 
			
		||||
  const int Ls=8;
 | 
			
		||||
  const int Ls=16;
 | 
			
		||||
  GridCartesian         * UGrid   = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
 | 
			
		||||
  GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
 | 
			
		||||
  GridCartesian         * FGrid   = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
 | 
			
		||||
@@ -71,35 +71,66 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
  std::vector<int> seeds4({1,2,3,4});
 | 
			
		||||
  std::vector<int> seeds5({5,6,7,8});
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
  std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
 | 
			
		||||
  GridParallelRNG          RNG4(UGrid);  RNG4.SeedFixedIntegers(seeds4);
 | 
			
		||||
  std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
 | 
			
		||||
  GridParallelRNG          RNG5(FGrid);  RNG5.SeedFixedIntegers(seeds5);
 | 
			
		||||
  std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
 | 
			
		||||
 | 
			
		||||
  LatticeFermion src   (FGrid); random(RNG5,src);
 | 
			
		||||
#if 0
 | 
			
		||||
  src = zero;
 | 
			
		||||
  {
 | 
			
		||||
    std::vector<int> origin({0,0,0,latt4[2]-1,0});
 | 
			
		||||
    SpinColourVectorF tmp;
 | 
			
		||||
    tmp=zero;
 | 
			
		||||
    tmp()(0)(0)=Complex(-2.0,0.0);
 | 
			
		||||
    std::cout << " source site 0 " << tmp<<std::endl;
 | 
			
		||||
    pokeSite(tmp,src,origin);
 | 
			
		||||
  }
 | 
			
		||||
#else
 | 
			
		||||
  RealD N2 = 1.0/::sqrt(norm2(src));
 | 
			
		||||
  src = src*N2;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  LatticeFermion result(FGrid); result=zero;
 | 
			
		||||
  LatticeFermion    ref(FGrid);    ref=zero;
 | 
			
		||||
  LatticeFermion    tmp(FGrid);
 | 
			
		||||
  LatticeFermion    err(FGrid);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
 | 
			
		||||
  LatticeGaugeField Umu(UGrid); 
 | 
			
		||||
  random(RNG4,Umu);
 | 
			
		||||
 | 
			
		||||
  LatticeGaugeField Umu5d(FGrid); 
 | 
			
		||||
  SU3::HotConfiguration(RNG4,Umu); 
 | 
			
		||||
  std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
 | 
			
		||||
#if 0
 | 
			
		||||
  Umu=1.0;
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    LatticeColourMatrix ttmp(UGrid);
 | 
			
		||||
    ttmp = PeekIndex<LorentzIndex>(Umu,mu);
 | 
			
		||||
    //    if (mu !=2 ) ttmp = 0;
 | 
			
		||||
    //    ttmp = ttmp* pow(10.0,mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu,ttmp,mu);
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////
 | 
			
		||||
  // Naive wilson implementation
 | 
			
		||||
  ////////////////////////////////////
 | 
			
		||||
  // replicate across fifth dimension
 | 
			
		||||
  LatticeGaugeField Umu5d(FGrid); 
 | 
			
		||||
  std::vector<LatticeColourMatrix> U(4,FGrid);
 | 
			
		||||
  for(int ss=0;ss<Umu._grid->oSites();ss++){
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      Umu5d._odata[Ls*ss+s] = Umu._odata[ss];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////
 | 
			
		||||
  // Naive wilson implementation
 | 
			
		||||
  ////////////////////////////////////
 | 
			
		||||
  std::vector<LatticeColourMatrix> U(4,FGrid);
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
 | 
			
		||||
  }
 | 
			
		||||
  std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
 | 
			
		||||
 | 
			
		||||
  if (1)
 | 
			
		||||
  {
 | 
			
		||||
@@ -121,6 +152,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
  RealD NP = UGrid->_Nprocessors;
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage << "Creating action operator " << std::endl;
 | 
			
		||||
  DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
 | 
			
		||||
@@ -136,10 +168,11 @@ int main (int argc, char ** argv)
 | 
			
		||||
  if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
 | 
			
		||||
 | 
			
		||||
  int ncall =100;
 | 
			
		||||
  int ncall =1000;
 | 
			
		||||
  if (1) {
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
    Dw.ZeroCounters();
 | 
			
		||||
    Dw.Dhop(src,result,0);
 | 
			
		||||
    double t0=usecond();
 | 
			
		||||
    for(int i=0;i<ncall;i++){
 | 
			
		||||
      __SSC_START;
 | 
			
		||||
@@ -153,12 +186,22 @@ int main (int argc, char ** argv)
 | 
			
		||||
    double flops=1344*volume*ncall;
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl;
 | 
			
		||||
    //    std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
 | 
			
		||||
    //    std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "mflop/s per rank =  "<< flops/(t1-t0)/NP<<std::endl;
 | 
			
		||||
    err = ref-result; 
 | 
			
		||||
    std::cout<<GridLogMessage << "norm diff   "<< norm2(err)<<std::endl;
 | 
			
		||||
 | 
			
		||||
    /*
 | 
			
		||||
    if(( norm2(err)>1.0e-4) ) { 
 | 
			
		||||
      std::cout << "RESULT\n " << result<<std::endl;
 | 
			
		||||
      std::cout << "REF   \n " << ref   <<std::endl;
 | 
			
		||||
      std::cout << "ERR   \n " << err   <<std::endl;
 | 
			
		||||
      FGrid->Barrier();
 | 
			
		||||
      exit(-1);
 | 
			
		||||
    }
 | 
			
		||||
    */
 | 
			
		||||
    assert (norm2(err)< 1.0e-4 );
 | 
			
		||||
    Dw.Report();
 | 
			
		||||
  }
 | 
			
		||||
@@ -182,21 +225,13 @@ int main (int argc, char ** argv)
 | 
			
		||||
    LatticeFermion sresult(sFGrid);
 | 
			
		||||
 | 
			
		||||
    WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5);
 | 
			
		||||
  
 | 
			
		||||
    for(int x=0;x<latt4[0];x++){
 | 
			
		||||
    for(int y=0;y<latt4[1];y++){
 | 
			
		||||
    for(int z=0;z<latt4[2];z++){
 | 
			
		||||
    for(int t=0;t<latt4[3];t++){
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      std::vector<int> site({s,x,y,z,t});
 | 
			
		||||
      SpinColourVector tmp;
 | 
			
		||||
      peekSite(tmp,src,site);
 | 
			
		||||
      pokeSite(tmp,ssrc,site);
 | 
			
		||||
    }}}}}
 | 
			
		||||
 | 
			
		||||
    localConvert(src,ssrc);
 | 
			
		||||
    std::cout<<GridLogMessage<< "src norms "<< norm2(src)<<" " <<norm2(ssrc)<<std::endl;
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
    double t0=usecond();
 | 
			
		||||
    sDw.Dhop(ssrc,sresult,0);
 | 
			
		||||
    sDw.ZeroCounters();
 | 
			
		||||
    double t0=usecond();
 | 
			
		||||
    for(int i=0;i<ncall;i++){
 | 
			
		||||
      __SSC_START;
 | 
			
		||||
      sDw.Dhop(ssrc,sresult,0);
 | 
			
		||||
@@ -210,46 +245,47 @@ int main (int argc, char ** argv)
 | 
			
		||||
    std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl;
 | 
			
		||||
    std::cout<<GridLogMessage << "mflop/s per rank =  "<< flops/(t1-t0)/NP<<std::endl;
 | 
			
		||||
    //    std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
 | 
			
		||||
    sDw.Report();
 | 
			
		||||
  
 | 
			
		||||
    if(0){
 | 
			
		||||
      for(int i=0;i< PerformanceCounter::NumTypes(); i++ ){
 | 
			
		||||
	sDw.Dhop(ssrc,sresult,0);
 | 
			
		||||
	PerformanceCounter Counter(i);
 | 
			
		||||
	Counter.Start();
 | 
			
		||||
	sDw.Dhop(ssrc,sresult,0);
 | 
			
		||||
	Counter.Stop();
 | 
			
		||||
	Counter.Report();
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
 | 
			
		||||
 | 
			
		||||
    RealD sum=0;
 | 
			
		||||
    for(int x=0;x<latt4[0];x++){
 | 
			
		||||
    for(int y=0;y<latt4[1];y++){
 | 
			
		||||
    for(int z=0;z<latt4[2];z++){
 | 
			
		||||
    for(int t=0;t<latt4[3];t++){
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      std::vector<int> site({s,x,y,z,t});
 | 
			
		||||
      SpinColourVector normal, simd;
 | 
			
		||||
      peekSite(normal,result,site);
 | 
			
		||||
      peekSite(simd,sresult,site);
 | 
			
		||||
      sum=sum+norm2(normal-simd);
 | 
			
		||||
      if (norm2(normal-simd) > 1.0e-6 ) {
 | 
			
		||||
	std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" "<<norm2(normal-simd)<<std::endl;
 | 
			
		||||
	std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" normal "<<normal<<std::endl;
 | 
			
		||||
	std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" simd   "<<simd<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
    }}}}}
 | 
			
		||||
    std::cout<<GridLogMessage<<" difference between normal and simd is "<<sum<<std::endl;
 | 
			
		||||
    assert (sum< 1.0e-4 );
 | 
			
		||||
 | 
			
		||||
    err=zero;
 | 
			
		||||
    localConvert(sresult,err);
 | 
			
		||||
    err = err - ref;
 | 
			
		||||
    sum = norm2(err);
 | 
			
		||||
    std::cout<<GridLogMessage<<" difference between normal ref and simd is "<<sum<<std::endl;
 | 
			
		||||
    if(sum > 1.0e-4 ){
 | 
			
		||||
      std::cout<< "sD REF\n " <<ref << std::endl;
 | 
			
		||||
      std::cout<< "sD ERR   \n " <<err  <<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
    //    assert(sum < 1.0e-4);
 | 
			
		||||
 | 
			
		||||
    if (1) {
 | 
			
		||||
    err=zero;
 | 
			
		||||
    localConvert(sresult,err);
 | 
			
		||||
    err = err - result;
 | 
			
		||||
    sum = norm2(err);
 | 
			
		||||
    std::cout<<GridLogMessage<<" difference between normal result and simd is "<<sum<<std::endl;
 | 
			
		||||
    if(sum > 1.0e-4 ){
 | 
			
		||||
      std::cout<< "sD REF\n " <<result << std::endl;
 | 
			
		||||
      std::cout<< "sD ERR   \n " << err  <<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
    assert(sum < 1.0e-4);
 | 
			
		||||
    
 | 
			
		||||
    if(1){
 | 
			
		||||
      std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
 | 
			
		||||
      if ( sizeof(Real)==4 )   std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
 | 
			
		||||
      if ( sizeof(Real)==8 )   std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric   ) 
 | 
			
		||||
	std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) 
 | 
			
		||||
	std::cout << GridLogMessage<< "* Using Nc=3       WilsonKernels" <<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) 
 | 
			
		||||
	std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
 | 
			
		||||
 | 
			
		||||
      LatticeFermion sr_eo(sFGrid);
 | 
			
		||||
 | 
			
		||||
      LatticeFermion ssrc_e (sFrbGrid);
 | 
			
		||||
      LatticeFermion ssrc_o (sFrbGrid);
 | 
			
		||||
      LatticeFermion sr_e   (sFrbGrid);
 | 
			
		||||
@@ -257,33 +293,23 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,ssrc_e,ssrc);
 | 
			
		||||
      pickCheckerboard(Odd,ssrc_o,ssrc);
 | 
			
		||||
 | 
			
		||||
      setCheckerboard(sr_eo,ssrc_o);
 | 
			
		||||
      setCheckerboard(sr_eo,ssrc_e);
 | 
			
		||||
      //      setCheckerboard(sr_eo,ssrc_o);
 | 
			
		||||
      //      setCheckerboard(sr_eo,ssrc_e);
 | 
			
		||||
 | 
			
		||||
      sr_e = zero;
 | 
			
		||||
      sr_o = zero;
 | 
			
		||||
 | 
			
		||||
      std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
 | 
			
		||||
      if ( sizeof(Real)==4 )   std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
 | 
			
		||||
      if ( sizeof(Real)==8 )   std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric   ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3       WilsonKernels" <<std::endl;
 | 
			
		||||
      if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3   WilsonKernels" <<std::endl;
 | 
			
		||||
      std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
 | 
			
		||||
 | 
			
		||||
      FGrid->Barrier();
 | 
			
		||||
      sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
 | 
			
		||||
      sDw.ZeroCounters();
 | 
			
		||||
      sDw.stat.init("DhopEO");
 | 
			
		||||
      //      sDw.stat.init("DhopEO");
 | 
			
		||||
      double t0=usecond();
 | 
			
		||||
      for (int i = 0; i < ncall; i++) {
 | 
			
		||||
        sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
 | 
			
		||||
      }
 | 
			
		||||
      double t1=usecond();
 | 
			
		||||
      FGrid->Barrier();
 | 
			
		||||
      sDw.stat.print();
 | 
			
		||||
      //      sDw.stat.print();
 | 
			
		||||
 | 
			
		||||
      double volume=Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
 | 
			
		||||
      double flops=(1344.0*volume*ncall)/2;
 | 
			
		||||
@@ -298,22 +324,26 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
      pickCheckerboard(Even,ssrc_e,sresult);
 | 
			
		||||
      pickCheckerboard(Odd ,ssrc_o,sresult);
 | 
			
		||||
 | 
			
		||||
      ssrc_e = ssrc_e - sr_e;
 | 
			
		||||
      RealD error = norm2(ssrc_e);
 | 
			
		||||
 | 
			
		||||
      std::cout<<GridLogMessage << "sE norm diff   "<< norm2(ssrc_e)<< "  vec nrm"<<norm2(sr_e) <<std::endl;
 | 
			
		||||
      ssrc_o = ssrc_o - sr_o;
 | 
			
		||||
 | 
			
		||||
      ssrc_o = ssrc_o - sr_o;
 | 
			
		||||
      error+= norm2(ssrc_o);
 | 
			
		||||
      std::cout<<GridLogMessage << "sO norm diff   "<< norm2(ssrc_o)<< "  vec nrm"<<norm2(sr_o) <<std::endl;
 | 
			
		||||
      if(error>1.0e-4) { 
 | 
			
		||||
 | 
			
		||||
      if(( error>1.0e-4) ) { 
 | 
			
		||||
	setCheckerboard(ssrc,ssrc_o);
 | 
			
		||||
	setCheckerboard(ssrc,ssrc_e);
 | 
			
		||||
	std::cout<< ssrc << std::endl;
 | 
			
		||||
	std::cout<< "DIFF\n " <<ssrc << std::endl;
 | 
			
		||||
	setCheckerboard(ssrc,sr_o);
 | 
			
		||||
	setCheckerboard(ssrc,sr_e);
 | 
			
		||||
	std::cout<< "CBRESULT\n " <<ssrc << std::endl;
 | 
			
		||||
	std::cout<< "RESULT\n " <<sresult<< std::endl;
 | 
			
		||||
      }
 | 
			
		||||
      assert(error<1.0e-4);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (1)
 | 
			
		||||
@@ -324,25 +354,30 @@ int main (int argc, char ** argv)
 | 
			
		||||
      //    ref =  src - Gamma(Gamma::Algebra::GammaX)* src ; // 1+gamma_x
 | 
			
		||||
      tmp = U[mu]*Cshift(src,mu+1,1);
 | 
			
		||||
      for(int i=0;i<ref._odata.size();i++){
 | 
			
		||||
  ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
	ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      tmp =adj(U[mu])*src;
 | 
			
		||||
      tmp =Cshift(tmp,mu+1,-1);
 | 
			
		||||
      for(int i=0;i<ref._odata.size();i++){
 | 
			
		||||
  ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
	ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    ref = -0.5*ref;
 | 
			
		||||
  }
 | 
			
		||||
  //  dump=1;
 | 
			
		||||
  Dw.Dhop(src,result,1);
 | 
			
		||||
  std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm dag ref    "<< norm2(ref)<<std::endl;
 | 
			
		||||
  err = ref-result; 
 | 
			
		||||
  std::cout<<GridLogMessage << "norm diff   "<< norm2(err)<<std::endl;
 | 
			
		||||
  assert(norm2(err)<1.0e-4);
 | 
			
		||||
  std::cout<<GridLogMessage << "norm dag diff   "<< norm2(err)<<std::endl;
 | 
			
		||||
  if((norm2(err)>1.0e-4)){
 | 
			
		||||
	std::cout<< "DAG RESULT\n "  <<ref     << std::endl;
 | 
			
		||||
	std::cout<< "DAG sRESULT\n " <<result  << std::endl;
 | 
			
		||||
	std::cout<< "DAG ERR   \n "  << err    <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  LatticeFermion src_e (FrbGrid);
 | 
			
		||||
  LatticeFermion src_o (FrbGrid);
 | 
			
		||||
  LatticeFermion r_e   (FrbGrid);
 | 
			
		||||
@@ -350,13 +385,18 @@ int main (int argc, char ** argv)
 | 
			
		||||
  LatticeFermion r_eo  (FGrid);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage << "Calling Deo and Doe and assert Deo+Doe == Dunprec"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "Calling Deo and Doe and //assert Deo+Doe == Dunprec"<<std::endl;
 | 
			
		||||
  pickCheckerboard(Even,src_e,src);
 | 
			
		||||
  pickCheckerboard(Odd,src_o,src);
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage << "src_e"<<norm2(src_e)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "src_o"<<norm2(src_o)<<std::endl;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // S-direction is INNERMOST and takes no part in the parity.
 | 
			
		||||
  static int Opt;  // these are a temporary hack
 | 
			
		||||
  static int Comms;  // these are a temporary hack
 | 
			
		||||
 | 
			
		||||
  std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionR::DhopEO                "<<std::endl;
 | 
			
		||||
  std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
 | 
			
		||||
@@ -369,6 +409,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
  {
 | 
			
		||||
    Dw.ZeroCounters();
 | 
			
		||||
    FGrid->Barrier();
 | 
			
		||||
    Dw.DhopEO(src_o,r_e,DaggerNo);
 | 
			
		||||
    double t0=usecond();
 | 
			
		||||
    for(int i=0;i<ncall;i++){
 | 
			
		||||
      Dw.DhopEO(src_o,r_e,DaggerNo);
 | 
			
		||||
@@ -396,14 +437,19 @@ int main (int argc, char ** argv)
 | 
			
		||||
 | 
			
		||||
  err = r_eo-result; 
 | 
			
		||||
  std::cout<<GridLogMessage << "norm diff   "<< norm2(err)<<std::endl;
 | 
			
		||||
  assert(norm2(err)<1.0e-4);
 | 
			
		||||
  if((norm2(err)>1.0e-4)){
 | 
			
		||||
	std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
 | 
			
		||||
	std::cout<< "Deo REF\n " <<result  << std::endl;
 | 
			
		||||
	std::cout<< "Deo ERR   \n " << err <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  pickCheckerboard(Even,src_e,err);
 | 
			
		||||
  pickCheckerboard(Odd,src_o,err);
 | 
			
		||||
  std::cout<<GridLogMessage << "norm diff even  "<< norm2(src_e)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm diff odd   "<< norm2(src_o)<<std::endl;
 | 
			
		||||
  assert(norm2(src_e)<1.0e-4);
 | 
			
		||||
  assert(norm2(src_o)<1.0e-4);
 | 
			
		||||
 | 
			
		||||
  //assert(norm2(src_e)<1.0e-4);
 | 
			
		||||
  //assert(norm2(src_o)<1.0e-4);
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -77,8 +77,7 @@ int main (int argc, char ** argv)
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    double start=usecond();
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int t=0;t<threads;t++){
 | 
			
		||||
    parallel_for(int t=0;t<threads;t++){
 | 
			
		||||
 | 
			
		||||
      sum[t] = x[t]._odata[0];
 | 
			
		||||
      for(int i=0;i<Nloop;i++){
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										134
									
								
								benchmarks/Benchmark_staggered.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								benchmarks/Benchmark_staggered.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,134 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./benchmarks/Benchmark_staggered.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
 | 
			
		||||
using namespace std;
 | 
			
		||||
using namespace Grid;
 | 
			
		||||
using namespace Grid::QCD;
 | 
			
		||||
 | 
			
		||||
int main (int argc, char ** argv)
 | 
			
		||||
{
 | 
			
		||||
  Grid_init(&argc,&argv);
 | 
			
		||||
 | 
			
		||||
  std::vector<int> latt_size   = GridDefaultLatt();
 | 
			
		||||
  std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
 | 
			
		||||
  std::vector<int> mpi_layout  = GridDefaultMpi();
 | 
			
		||||
  GridCartesian               Grid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
  GridRedBlackCartesian     RBGrid(latt_size,simd_layout,mpi_layout);
 | 
			
		||||
 | 
			
		||||
  int threads = GridThread::GetThreads();
 | 
			
		||||
  std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "Grid floating point word size is REALF"<< sizeof(RealF)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "Grid floating point word size is REALD"<< sizeof(RealD)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "Grid floating point word size is REAL"<< sizeof(Real)<<std::endl;
 | 
			
		||||
 | 
			
		||||
  std::vector<int> seeds({1,2,3,4});
 | 
			
		||||
  GridParallelRNG          pRNG(&Grid);
 | 
			
		||||
  pRNG.SeedFixedIntegers(seeds);
 | 
			
		||||
  //  pRNG.SeedRandomDevice();
 | 
			
		||||
 | 
			
		||||
  typedef typename ImprovedStaggeredFermionR::FermionField FermionField; 
 | 
			
		||||
  typename ImprovedStaggeredFermionR::ImplParams params; 
 | 
			
		||||
 | 
			
		||||
  FermionField src   (&Grid); random(pRNG,src);
 | 
			
		||||
  FermionField result(&Grid); result=zero;
 | 
			
		||||
  FermionField    ref(&Grid);    ref=zero;
 | 
			
		||||
  FermionField    tmp(&Grid);    tmp=zero;
 | 
			
		||||
  FermionField    err(&Grid);    tmp=zero;
 | 
			
		||||
  LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
 | 
			
		||||
  std::vector<LatticeColourMatrix> U(4,&Grid);
 | 
			
		||||
 | 
			
		||||
  double volume=1;
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    volume=volume*latt_size[mu];
 | 
			
		||||
  }  
 | 
			
		||||
 | 
			
		||||
  // Only one non-zero (y)
 | 
			
		||||
#if 0
 | 
			
		||||
  Umu=zero;
 | 
			
		||||
  Complex cone(1.0,0.0);
 | 
			
		||||
  for(int nn=0;nn<Nd;nn++){
 | 
			
		||||
    random(pRNG,U[nn]);
 | 
			
		||||
    if(1) {
 | 
			
		||||
      if (nn!=2) { U[nn]=zero; std::cout<<GridLogMessage << "zeroing gauge field in dir "<<nn<<std::endl; }
 | 
			
		||||
      //      else       { U[nn]= cone;std::cout<<GridLogMessage << "unit gauge field in dir "<<nn<<std::endl; }
 | 
			
		||||
      else       { std::cout<<GridLogMessage << "random gauge field in dir "<<nn<<std::endl; }
 | 
			
		||||
    }
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu,U[nn],nn);
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
    U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
 | 
			
		||||
  }
 | 
			
		||||
  ref = zero;
 | 
			
		||||
  /*  
 | 
			
		||||
  { // Naive wilson implementation
 | 
			
		||||
    ref = zero;
 | 
			
		||||
    for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
      //    ref =  src + Gamma(Gamma::GammaX)* src ; // 1-gamma_x
 | 
			
		||||
      tmp = U[mu]*Cshift(src,mu,1);
 | 
			
		||||
      for(int i=0;i<ref._odata.size();i++){
 | 
			
		||||
	ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      tmp =adj(U[mu])*src;
 | 
			
		||||
      tmp =Cshift(tmp,mu,-1);
 | 
			
		||||
      for(int i=0;i<ref._odata.size();i++){
 | 
			
		||||
	ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  ref = -0.5*ref;
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
  RealD mass=0.1;
 | 
			
		||||
  RealD c1=9.0/8.0;
 | 
			
		||||
  RealD c2=-1.0/24.0;
 | 
			
		||||
  RealD u0=1.0;
 | 
			
		||||
  ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
 | 
			
		||||
  
 | 
			
		||||
  std::cout<<GridLogMessage << "Calling Ds"<<std::endl;
 | 
			
		||||
  int ncall=1000;
 | 
			
		||||
  double t0=usecond();
 | 
			
		||||
  for(int i=0;i<ncall;i++){
 | 
			
		||||
    Ds.Dhop(src,result,0);
 | 
			
		||||
  }
 | 
			
		||||
  double t1=usecond();
 | 
			
		||||
  double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 +  == 1146
 | 
			
		||||
  
 | 
			
		||||
  std::cout<<GridLogMessage << "Called Ds"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "norm ref    "<< norm2(ref)<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage << "mflop/s =   "<< flops/(t1-t0)<<std::endl;
 | 
			
		||||
  err = ref-result; 
 | 
			
		||||
  std::cout<<GridLogMessage << "norm diff   "<< norm2(err)<<std::endl;
 | 
			
		||||
 | 
			
		||||
  Grid_finalize();
 | 
			
		||||
}
 | 
			
		||||
@@ -6,7 +6,7 @@ AC_CANONICAL_TARGET
 | 
			
		||||
AM_INIT_AUTOMAKE(subdir-objects)
 | 
			
		||||
AC_CONFIG_MACRO_DIR([m4])
 | 
			
		||||
AC_CONFIG_SRCDIR([lib/Grid.h])
 | 
			
		||||
AC_CONFIG_HEADERS([lib/Config.h])
 | 
			
		||||
AC_CONFIG_HEADERS([lib/Config.h],[sed -i 's|PACKAGE_|GRID_|' lib/Config.h])
 | 
			
		||||
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
 | 
			
		||||
 | 
			
		||||
############### Checks for programs
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										52
									
								
								lib/Grid.h
									
									
									
									
									
								
							
							
						
						
									
										52
									
								
								lib/Grid.h
									
									
									
									
									
								
							@@ -38,52 +38,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef GRID_H
 | 
			
		||||
#define GRID_H
 | 
			
		||||
 | 
			
		||||
///////////////////
 | 
			
		||||
// Std C++ dependencies
 | 
			
		||||
///////////////////
 | 
			
		||||
#include <cassert>
 | 
			
		||||
#include <complex>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <iomanip>
 | 
			
		||||
#include <random>
 | 
			
		||||
#include <functional>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <signal.h>
 | 
			
		||||
#include <ctime>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <chrono>
 | 
			
		||||
 | 
			
		||||
///////////////////
 | 
			
		||||
// Grid headers
 | 
			
		||||
///////////////////
 | 
			
		||||
#include "Config.h"
 | 
			
		||||
#include <Grid/Timer.h>
 | 
			
		||||
#include <Grid/PerfCount.h>
 | 
			
		||||
#include <Grid/Log.h>
 | 
			
		||||
#include <Grid/AlignedAllocator.h>
 | 
			
		||||
#include <Grid/Simd.h>
 | 
			
		||||
#include <Grid/serialisation/Serialisation.h>
 | 
			
		||||
#include <Grid/Threads.h>
 | 
			
		||||
#include <Grid/Lexicographic.h>
 | 
			
		||||
#include <Grid/Init.h>
 | 
			
		||||
#include <Grid/Communicator.h> 
 | 
			
		||||
#include <Grid/Cartesian.h>    
 | 
			
		||||
#include <Grid/Tensors.h>      
 | 
			
		||||
#include <Grid/Lattice.h>      
 | 
			
		||||
#include <Grid/Cshift.h>       
 | 
			
		||||
#include <Grid/Stencil.h>      
 | 
			
		||||
#include <Grid/Algorithms.h>   
 | 
			
		||||
#include <Grid/parallelIO/BinaryIO.h>
 | 
			
		||||
#include <Grid/FFT.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/QCD.h>
 | 
			
		||||
#include <Grid/parallelIO/NerscIO.h>
 | 
			
		||||
#include <Grid/qcd/hmc/NerscCheckpointer.h>
 | 
			
		||||
#include <Grid/qcd/hmc/HmcRunner.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/GridQCDcore.h>
 | 
			
		||||
#include <Grid/qcd/action/Action.h>
 | 
			
		||||
#include <Grid/qcd/smearing/Smearing.h>
 | 
			
		||||
#include <Grid/qcd/hmc/HMC_aggregate.h>
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										81
									
								
								lib/GridCore.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								lib/GridCore.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,81 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/Grid.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
//
 | 
			
		||||
//  Grid.h
 | 
			
		||||
//  simd
 | 
			
		||||
//
 | 
			
		||||
//  Created by Peter Boyle on 09/05/2014.
 | 
			
		||||
//  Copyright (c) 2014 University of Edinburgh. All rights reserved.
 | 
			
		||||
//
 | 
			
		||||
 | 
			
		||||
#ifndef GRID_BASE_H
 | 
			
		||||
#define GRID_BASE_H
 | 
			
		||||
 | 
			
		||||
///////////////////
 | 
			
		||||
// Std C++ dependencies
 | 
			
		||||
///////////////////
 | 
			
		||||
#include <cassert>
 | 
			
		||||
#include <complex>
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <iostream>
 | 
			
		||||
#include <iomanip>
 | 
			
		||||
#include <random>
 | 
			
		||||
#include <functional>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <stdlib.h>
 | 
			
		||||
#include <stdio.h>
 | 
			
		||||
#include <signal.h>
 | 
			
		||||
#include <ctime>
 | 
			
		||||
#include <sys/time.h>
 | 
			
		||||
#include <chrono>
 | 
			
		||||
 | 
			
		||||
///////////////////
 | 
			
		||||
// Grid headers
 | 
			
		||||
///////////////////
 | 
			
		||||
#include "Config.h"
 | 
			
		||||
 | 
			
		||||
#include <Grid/perfmon/Timer.h>
 | 
			
		||||
#include <Grid/perfmon/PerfCount.h>
 | 
			
		||||
#include <Grid/log/Log.h>
 | 
			
		||||
#include <Grid/allocator/AlignedAllocator.h>
 | 
			
		||||
#include <Grid/simd/Simd.h>
 | 
			
		||||
#include <Grid/serialisation/Serialisation.h>
 | 
			
		||||
#include <Grid/threads/Threads.h>
 | 
			
		||||
#include <Grid/util/Util.h>
 | 
			
		||||
#include <Grid/communicator/Communicator.h> 
 | 
			
		||||
#include <Grid/cartesian/Cartesian.h>    
 | 
			
		||||
#include <Grid/tensors/Tensors.h>      
 | 
			
		||||
#include <Grid/lattice/Lattice.h>      
 | 
			
		||||
#include <Grid/cshift/Cshift.h>       
 | 
			
		||||
#include <Grid/stencil/Stencil.h>      
 | 
			
		||||
#include <Grid/parallelIO/BinaryIO.h>
 | 
			
		||||
#include <Grid/algorithms/Algorithms.h>   
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -2,12 +2,12 @@
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/hmc/HMC.cc
 | 
			
		||||
    Source file: ./lib/Grid.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: neo <cossu@post.kek.jp>
 | 
			
		||||
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
@@ -27,10 +27,16 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#ifndef GRID_QCD_CORE_H
 | 
			
		||||
#define GRID_QCD_CORE_H
 | 
			
		||||
 | 
			
		||||
namespace Grid{
 | 
			
		||||
  namespace QCD{
 | 
			
		||||
/////////////////////////
 | 
			
		||||
// Core Grid QCD headers
 | 
			
		||||
/////////////////////////
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/qcd/QCD.h>
 | 
			
		||||
#include <Grid/qcd/spin/Spin.h>
 | 
			
		||||
#include <Grid/qcd/utils/Utils.h>
 | 
			
		||||
#include <Grid/qcd/representations/Representations.h>
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
										
											Binary file not shown.
										
									
								
							@@ -1,154 +0,0 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/Old/Tensor_peek.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_MATH_PEEK_H
 | 
			
		||||
#define GRID_MATH_PEEK_H
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Peek on a specific index; returns a scalar in that index, tensor inherits rest
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// If we hit the right index, return scalar with no further recursion
 | 
			
		||||
 | 
			
		||||
//template<int Level> inline ComplexF peekIndex(const ComplexF arg) { return arg;}
 | 
			
		||||
//template<int Level> inline ComplexD peekIndex(const ComplexD arg) { return arg;}
 | 
			
		||||
//template<int Level> inline RealF peekIndex(const RealF arg) { return arg;}
 | 
			
		||||
//template<int Level> inline RealD peekIndex(const RealD arg) { return arg;}
 | 
			
		||||
#if 0
 | 
			
		||||
// Scalar peek, no indices
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iScalar<vtype> &arg) ->  iScalar<vtype> 
 | 
			
		||||
{
 | 
			
		||||
  return arg;
 | 
			
		||||
}
 | 
			
		||||
// Vector peek, one index
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iVector<vtype,N> &arg,int i) -> iScalar<vtype> // Index matches
 | 
			
		||||
{
 | 
			
		||||
  iScalar<vtype> ret;                              // return scalar
 | 
			
		||||
  ret._internal = arg._internal[i];
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
// Matrix peek, two indices
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) ->  iScalar<vtype>
 | 
			
		||||
{
 | 
			
		||||
  iScalar<vtype> ret;                              // return scalar
 | 
			
		||||
  ret._internal = arg._internal[i][j];
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/////////////
 | 
			
		||||
// No match peek for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
 | 
			
		||||
/////////////
 | 
			
		||||
// scalar
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iScalar<vtype> &arg) -> iScalar<decltype(peekIndex<Level>(arg._internal))>
 | 
			
		||||
{
 | 
			
		||||
  iScalar<decltype(peekIndex<Level>(arg._internal))> ret;
 | 
			
		||||
  ret._internal= peekIndex<Level>(arg._internal);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iScalar<vtype> &arg,int i) ->  iScalar<decltype(peekIndex<Level>(arg._internal,i))> 
 | 
			
		||||
{
 | 
			
		||||
  iScalar<decltype(peekIndex<Level>(arg._internal,i))> ret;
 | 
			
		||||
  ret._internal=peekIndex<Level>(arg._internal,i);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iScalar<vtype> &arg,int i,int j) ->  iScalar<decltype(peekIndex<Level>(arg._internal,i,j))>
 | 
			
		||||
{
 | 
			
		||||
  iScalar<decltype(peekIndex<Level>(arg._internal,i,j))> ret;
 | 
			
		||||
  ret._internal=peekIndex<Level>(arg._internal,i,j);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
// vector
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
auto peekIndex(const iVector<vtype,N> &arg) ->   iVector<decltype(peekIndex<Level>(arg._internal[0])),N>
 | 
			
		||||
{
 | 
			
		||||
  iVector<decltype(peekIndex<Level>(arg._internal[0])),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    ret._internal[ii]=peekIndex<Level>(arg._internal[ii]);
 | 
			
		||||
  }
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iVector<vtype,N> &arg,int i) ->  iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N>
 | 
			
		||||
{
 | 
			
		||||
  iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i);
 | 
			
		||||
  }
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iVector<vtype,N> &arg,int i,int j) ->  iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N> 
 | 
			
		||||
{
 | 
			
		||||
  iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i,j);
 | 
			
		||||
  }
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// matrix
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
auto peekIndex(const iMatrix<vtype,N> &arg) ->   iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N> 
 | 
			
		||||
{
 | 
			
		||||
  iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj]);// Could avoid this because peeking a scalar is dumb
 | 
			
		||||
  }}
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iMatrix<vtype,N> &arg,int i) ->   iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N>
 | 
			
		||||
{
 | 
			
		||||
  iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i);
 | 
			
		||||
  }}
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) ->   iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N>
 | 
			
		||||
{
 | 
			
		||||
  iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N> ret;
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i,j);
 | 
			
		||||
  }}
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -1,127 +0,0 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/Old/Tensor_poke.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_MATH_POKE_H
 | 
			
		||||
#define GRID_MATH_POKE_H
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Poke a specific index; 
 | 
			
		||||
//////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#if 0
 | 
			
		||||
// Scalar poke
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iScalar<vtype> &ret, const iScalar<vtype> &arg)
 | 
			
		||||
{
 | 
			
		||||
  ret._internal = arg._internal;
 | 
			
		||||
}
 | 
			
		||||
// Vector poke, one index
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iVector<vtype,N> &ret, const iScalar<vtype> &arg,int i)
 | 
			
		||||
{
 | 
			
		||||
  ret._internal[i] = arg._internal;
 | 
			
		||||
}
 | 
			
		||||
//Matrix poke, two indices
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iMatrix<vtype,N> &ret, const iScalar<vtype> &arg,int i,int j)
 | 
			
		||||
{
 | 
			
		||||
  ret._internal[i][j] = arg._internal;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/////////////
 | 
			
		||||
// No match poke for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
 | 
			
		||||
/////////////
 | 
			
		||||
// scalar
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal))>  &arg)
 | 
			
		||||
{
 | 
			
		||||
  pokeIndex<Level>(ret._internal,arg._internal);
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0))> &arg, int i)
 | 
			
		||||
		 
 | 
			
		||||
{
 | 
			
		||||
  pokeIndex<Level>(ret._internal,arg._internal,i);
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0,0))> &arg,int i,int j)
 | 
			
		||||
{
 | 
			
		||||
  pokeIndex<Level>(ret._internal,arg._internal,i,j);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Vector
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iVector<vtype,N> &ret, iVector<decltype(peekIndex<Level>(ret._internal)),N>  &arg)
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii],arg._internal[ii]);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg,int i,int j)
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i,j);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Matrix
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal)),N> &arg)		 
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj]);
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i);
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline 
 | 
			
		||||
  void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg, int i,int j)
 | 
			
		||||
{
 | 
			
		||||
  for(int ii=0;ii<N;ii++){
 | 
			
		||||
  for(int jj=0;jj<N;jj++){
 | 
			
		||||
    pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i,j);
 | 
			
		||||
  }}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@@ -42,15 +42,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/NormalEquations.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/SchurRedBlack.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
 | 
			
		||||
 | 
			
		||||
// Lanczos support
 | 
			
		||||
#include <Grid/algorithms/iterative/MatrixUtils.h>
 | 
			
		||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/algorithms/CoarsenedMatrix.h>
 | 
			
		||||
#include <Grid/algorithms/FFT.h>
 | 
			
		||||
 | 
			
		||||
// Eigen/lanczos
 | 
			
		||||
// EigCg
 | 
			
		||||
@@ -267,8 +267,7 @@ namespace Grid {
 | 
			
		||||
      SimpleCompressor<siteVector> compressor;
 | 
			
		||||
      Stencil.HaloExchange(in,compressor);
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<Grid()->oSites();ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<Grid()->oSites();ss++){
 | 
			
		||||
        siteVector res = zero;
 | 
			
		||||
	siteVector nbr;
 | 
			
		||||
	int ptype;
 | 
			
		||||
@@ -380,8 +379,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
	  Subspace.ProjectToSubspace(oProj,oblock);
 | 
			
		||||
	  //	  blockProject(iProj,iblock,Subspace.subspace);
 | 
			
		||||
	  //	  blockProject(oProj,oblock,Subspace.subspace);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
	  for(int ss=0;ss<Grid()->oSites();ss++){
 | 
			
		||||
	  parallel_for(int ss=0;ss<Grid()->oSites();ss++){
 | 
			
		||||
	    for(int j=0;j<nbasis;j++){
 | 
			
		||||
	      if( disp!= 0 ) {
 | 
			
		||||
		A[p]._odata[ss](j,i) = oProj._odata[ss](j);
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,7 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
double MultiShiftFunction::approx(double x)
 | 
			
		||||
 
 | 
			
		||||
@@ -45,6 +45,8 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
                           // Defaults true.
 | 
			
		||||
  RealD Tolerance;
 | 
			
		||||
  Integer MaxIterations;
 | 
			
		||||
  Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
 | 
			
		||||
  
 | 
			
		||||
  ConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true)
 | 
			
		||||
      : Tolerance(tol),
 | 
			
		||||
        MaxIterations(maxit),
 | 
			
		||||
@@ -155,13 +157,14 @@ class ConjugateGradient : public OperatorFunction<Field> {
 | 
			
		||||
        std::cout << std::endl;
 | 
			
		||||
 | 
			
		||||
        if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
 | 
			
		||||
 | 
			
		||||
	IterationsToComplete = k;	
 | 
			
		||||
        return;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    std::cout << GridLogMessage << "ConjugateGradient did NOT converge"
 | 
			
		||||
              << std::endl;
 | 
			
		||||
    if (ErrorOnNoConverge) assert(0);
 | 
			
		||||
    IterationsToComplete = k;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -35,6 +35,7 @@ namespace Grid {
 | 
			
		||||
  class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
 | 
			
		||||
  public:                                                
 | 
			
		||||
    RealD   Tolerance;
 | 
			
		||||
    RealD   InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
 | 
			
		||||
    Integer MaxInnerIterations;
 | 
			
		||||
    Integer MaxOuterIterations;
 | 
			
		||||
    GridBase* SinglePrecGrid; //Grid for single-precision fields
 | 
			
		||||
@@ -42,12 +43,16 @@ namespace Grid {
 | 
			
		||||
    LinearOperatorBase<FieldF> &Linop_f;
 | 
			
		||||
    LinearOperatorBase<FieldD> &Linop_d;
 | 
			
		||||
 | 
			
		||||
    Integer TotalInnerIterations; //Number of inner CG iterations
 | 
			
		||||
    Integer TotalOuterIterations; //Number of restarts
 | 
			
		||||
    Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
 | 
			
		||||
 | 
			
		||||
    //Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
 | 
			
		||||
    LinearFunction<FieldF> *guesser;
 | 
			
		||||
    
 | 
			
		||||
    MixedPrecisionConjugateGradient(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d) :
 | 
			
		||||
      Linop_f(_Linop_f), Linop_d(_Linop_d),
 | 
			
		||||
      Tolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
 | 
			
		||||
      Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
 | 
			
		||||
      OuterLoopNormMult(100.), guesser(NULL){ };
 | 
			
		||||
 | 
			
		||||
    void useGuesser(LinearFunction<FieldF> &g){
 | 
			
		||||
@@ -55,6 +60,8 @@ namespace Grid {
 | 
			
		||||
    }
 | 
			
		||||
  
 | 
			
		||||
    void operator() (const FieldD &src_d_in, FieldD &sol_d){
 | 
			
		||||
      TotalInnerIterations = 0;
 | 
			
		||||
	
 | 
			
		||||
      GridStopWatch TotalTimer;
 | 
			
		||||
      TotalTimer.Start();
 | 
			
		||||
    
 | 
			
		||||
@@ -74,7 +81,7 @@ namespace Grid {
 | 
			
		||||
      FieldD src_d(DoublePrecGrid);
 | 
			
		||||
      src_d = src_d_in; //source for next inner iteration, computed from residual during operation
 | 
			
		||||
    
 | 
			
		||||
      RealD inner_tol = Tolerance;
 | 
			
		||||
      RealD inner_tol = InnerTolerance;
 | 
			
		||||
    
 | 
			
		||||
      FieldF src_f(SinglePrecGrid);
 | 
			
		||||
      src_f.checkerboard = cb;
 | 
			
		||||
@@ -89,7 +96,9 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
      GridStopWatch PrecChangeTimer;
 | 
			
		||||
    
 | 
			
		||||
      for(Integer outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++){
 | 
			
		||||
      Integer &outer_iter = TotalOuterIterations; //so it will be equal to the final iteration count
 | 
			
		||||
      
 | 
			
		||||
      for(outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++){
 | 
			
		||||
	//Compute double precision rsd and also new RHS vector.
 | 
			
		||||
	Linop_d.HermOp(sol_d, tmp_d);
 | 
			
		||||
	RealD norm = axpy_norm(src_d, -1., tmp_d, src_d_in); //src_d is residual vector
 | 
			
		||||
@@ -117,6 +126,7 @@ namespace Grid {
 | 
			
		||||
	InnerCGtimer.Start();
 | 
			
		||||
	CG_f(Linop_f, src_f, sol_f);
 | 
			
		||||
	InnerCGtimer.Stop();
 | 
			
		||||
	TotalInnerIterations += CG_f.IterationsToComplete;
 | 
			
		||||
      
 | 
			
		||||
	//Convert sol back to double and add to double prec solution
 | 
			
		||||
	PrecChangeTimer.Start();
 | 
			
		||||
@@ -131,9 +141,11 @@ namespace Grid {
 | 
			
		||||
    
 | 
			
		||||
      ConjugateGradient<FieldD> CG_d(Tolerance, MaxInnerIterations);
 | 
			
		||||
      CG_d(Linop_d, src_d_in, sol_d);
 | 
			
		||||
      TotalFinalStepIterations = CG_d.IterationsToComplete;
 | 
			
		||||
 | 
			
		||||
      TotalTimer.Stop();
 | 
			
		||||
      std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Total " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
 | 
			
		||||
      std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Inner CG iterations " << TotalInnerIterations << " Restarts " << TotalOuterIterations << " Final CG iterations " << TotalFinalStepIterations << std::endl;
 | 
			
		||||
      std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Total time " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
@@ -13,9 +13,10 @@ void *PointerCache::Insert(void *ptr,size_t bytes) {
 | 
			
		||||
 | 
			
		||||
  if (bytes < 4096 ) return NULL;
 | 
			
		||||
 | 
			
		||||
#ifdef _OPENMP
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
  assert(omp_in_parallel()==0);
 | 
			
		||||
#endif 
 | 
			
		||||
 | 
			
		||||
  void * ret = NULL;
 | 
			
		||||
  int v = -1;
 | 
			
		||||
 | 
			
		||||
@@ -52,7 +52,7 @@ public:
 | 
			
		||||
 | 
			
		||||
    // Physics Grid information.
 | 
			
		||||
    std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
 | 
			
		||||
    std::vector<int> _fdimensions;// Global dimensions of array prior to cb removal
 | 
			
		||||
    std::vector<int> _fdimensions;// (full) Global dimensions of array prior to cb removal
 | 
			
		||||
    std::vector<int> _gdimensions;// Global dimensions of array after cb removal
 | 
			
		||||
    std::vector<int> _ldimensions;// local dimensions of array with processor images removed
 | 
			
		||||
    std::vector<int> _rdimensions;// Reduced local dimensions with simd lane images and processor images removed 
 | 
			
		||||
@@ -77,7 +77,7 @@ public:
 | 
			
		||||
    // GridCartesian / GridRedBlackCartesian
 | 
			
		||||
    ////////////////////////////////////////////////////////////////
 | 
			
		||||
    virtual int CheckerBoarded(int dim)=0;
 | 
			
		||||
    virtual int CheckerBoard(std::vector<int> &site)=0;
 | 
			
		||||
    virtual int CheckerBoard(const std::vector<int> &site)=0;
 | 
			
		||||
    virtual int CheckerBoardDestination(int source_cb,int shift,int dim)=0;
 | 
			
		||||
    virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite)=0;
 | 
			
		||||
    virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int cb)=0;
 | 
			
		||||
@@ -121,7 +121,6 @@ public:
 | 
			
		||||
      Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////////////////
 | 
			
		||||
    // SIMD lane addressing
 | 
			
		||||
    //////////////////////////////////////////////////////////
 | 
			
		||||
@@ -207,16 +206,16 @@ public:
 | 
			
		||||
      std::vector<int> lcoor;
 | 
			
		||||
      GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
 | 
			
		||||
      rank = RankFromProcessorCoor(pcoor);
 | 
			
		||||
 | 
			
		||||
      /*
 | 
			
		||||
      std::vector<int> cblcoor(lcoor);
 | 
			
		||||
      for(int d=0;d<cblcoor.size();d++){
 | 
			
		||||
	if( this->CheckerBoarded(d) ) {
 | 
			
		||||
	  cblcoor[d] = lcoor[d]/2;
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      i_idx= iIndex(cblcoor);// this does not imply divide by 2 on checker dim
 | 
			
		||||
      o_idx= oIndex(lcoor);  // this implies divide by 2 on checkerdim
 | 
			
		||||
      */
 | 
			
		||||
      i_idx= iIndex(lcoor);
 | 
			
		||||
      o_idx= oIndex(lcoor);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector<int> &gcoor)
 | 
			
		||||
 
 | 
			
		||||
@@ -49,7 +49,7 @@ public:
 | 
			
		||||
    virtual int CheckerBoarded(int dim){
 | 
			
		||||
      return 0;
 | 
			
		||||
    }
 | 
			
		||||
    virtual int CheckerBoard(std::vector<int> &site){
 | 
			
		||||
    virtual int CheckerBoard(const std::vector<int> &site){
 | 
			
		||||
        return 0;
 | 
			
		||||
    }
 | 
			
		||||
    virtual int CheckerBoardDestination(int cb,int shift,int dim){
 | 
			
		||||
 
 | 
			
		||||
@@ -49,7 +49,7 @@ public:
 | 
			
		||||
      if( dim==_checker_dim) return 1;
 | 
			
		||||
      else return 0;
 | 
			
		||||
    }
 | 
			
		||||
    virtual int CheckerBoard(std::vector<int> &site){
 | 
			
		||||
    virtual int CheckerBoard(const std::vector<int> &site){
 | 
			
		||||
      int linear=0;
 | 
			
		||||
      assert(site.size()==_ndimension);
 | 
			
		||||
      for(int d=0;d<_ndimension;d++){ 
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -33,6 +34,7 @@ namespace Grid {
 | 
			
		||||
///////////////////////////////////////////////////////////////
 | 
			
		||||
void *              CartesianCommunicator::ShmCommBuf;
 | 
			
		||||
uint64_t            CartesianCommunicator::MAX_MPI_SHM_BYTES   = 128*1024*1024; 
 | 
			
		||||
CartesianCommunicator::CommunicatorPolicy_t  CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
// Alloc, free shmem region
 | 
			
		||||
@@ -88,7 +90,9 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
 | 
			
		||||
 | 
			
		||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
int                      CartesianCommunicator::NodeCount(void)    { return ProcessorCount();};
 | 
			
		||||
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						       void *xmit,
 | 
			
		||||
						       int xmit_to_rank,
 | 
			
		||||
						       void *recv,
 | 
			
		||||
@@ -96,6 +100,7 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
 | 
			
		||||
						       int bytes)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
 | 
			
		||||
  return 2.0*bytes;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
@@ -116,6 +116,12 @@ class CartesianCommunicator {
 | 
			
		||||
  // Implemented in Communicator_base.C
 | 
			
		||||
  /////////////////////////////////
 | 
			
		||||
  static void * ShmCommBuf;
 | 
			
		||||
 | 
			
		||||
  // Isend/Irecv/Wait, or Sendrecv blocking
 | 
			
		||||
  enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
 | 
			
		||||
  static CommunicatorPolicy_t CommunicatorPolicy;
 | 
			
		||||
  static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
 | 
			
		||||
 | 
			
		||||
  size_t heap_top;
 | 
			
		||||
  size_t heap_bytes;
 | 
			
		||||
 | 
			
		||||
@@ -148,6 +154,7 @@ class CartesianCommunicator {
 | 
			
		||||
  const std::vector<int> & ThisProcessorCoor(void) ;
 | 
			
		||||
  const std::vector<int> & ProcessorGrid(void)     ;
 | 
			
		||||
  int                      ProcessorCount(void)    ;
 | 
			
		||||
  int                      NodeCount(void)    ;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // very VERY rarely (Log, serial RNG) we need world without a grid
 | 
			
		||||
@@ -200,7 +207,7 @@ class CartesianCommunicator {
 | 
			
		||||
  
 | 
			
		||||
  void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
 | 
			
		||||
 | 
			
		||||
  void StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
  double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
				  void *xmit,
 | 
			
		||||
				  int xmit_to_rank,
 | 
			
		||||
				  void *recv,
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/GridQCDcore.h>
 | 
			
		||||
#include <Grid/qcd/action/ActionCore.h>
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
@@ -39,9 +41,13 @@ MPI_Comm CartesianCommunicator::communicator_world;
 | 
			
		||||
// Should error check all MPI calls.
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  int flag;
 | 
			
		||||
  int provided;
 | 
			
		||||
  MPI_Initialized(&flag); // needed to coexist with other libs apparently
 | 
			
		||||
  if ( !flag ) {
 | 
			
		||||
    MPI_Init(argc,argv);
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
 | 
			
		||||
    if ( provided != MPI_THREAD_MULTIPLE ) {
 | 
			
		||||
      QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
 | 
			
		||||
  ShmInitGeneric();
 | 
			
		||||
@@ -152,24 +158,34 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
 | 
			
		||||
						int from,
 | 
			
		||||
						int bytes)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
  int rank = _processor;
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
  ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
  
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { 
 | 
			
		||||
    MPI_Request xrq;
 | 
			
		||||
    MPI_Request rrq;
 | 
			
		||||
 | 
			
		||||
  list.push_back(xrq);
 | 
			
		||||
  list.push_back(rrq);
 | 
			
		||||
    ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
  } else { 
 | 
			
		||||
    // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
    ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
 | 
			
		||||
		      recv,bytes,MPI_CHAR,from, from,
 | 
			
		||||
		      communicator,MPI_STATUS_IGNORE);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  int nreq=list.size();
 | 
			
		||||
  std::vector<MPI_Status> status(nreq);
 | 
			
		||||
  int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { 
 | 
			
		||||
    int nreq=list.size();
 | 
			
		||||
    std::vector<MPI_Status> status(nreq);
 | 
			
		||||
    int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::Barrier(void)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
@@ -25,9 +25,23 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
 | 
			
		||||
#include <semaphore.h>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#include <limits.h>
 | 
			
		||||
#include <sys/types.h>
 | 
			
		||||
#include <sys/ipc.h>
 | 
			
		||||
#include <sys/shm.h>
 | 
			
		||||
#include <sys/mman.h>
 | 
			
		||||
//#include <zlib.h>
 | 
			
		||||
#ifndef SHM_HUGETLB
 | 
			
		||||
#define SHM_HUGETLB 04000
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -50,6 +64,10 @@ std::vector<int> CartesianCommunicator::GroupRanks;
 | 
			
		||||
std::vector<int> CartesianCommunicator::MyGroup;
 | 
			
		||||
std::vector<void *> CartesianCommunicator::ShmCommBufs;
 | 
			
		||||
 | 
			
		||||
int CartesianCommunicator::NodeCount(void)    { return GroupSize;};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#undef FORCE_COMMS
 | 
			
		||||
void *CartesianCommunicator::ShmBufferSelf(void)
 | 
			
		||||
{
 | 
			
		||||
  return ShmCommBufs[ShmRank];
 | 
			
		||||
@@ -57,6 +75,9 @@ void *CartesianCommunicator::ShmBufferSelf(void)
 | 
			
		||||
void *CartesianCommunicator::ShmBuffer(int rank)
 | 
			
		||||
{
 | 
			
		||||
  int gpeer = GroupRanks[rank];
 | 
			
		||||
#ifdef FORCE_COMMS
 | 
			
		||||
  return NULL;
 | 
			
		||||
#endif
 | 
			
		||||
  if (gpeer == MPI_UNDEFINED){
 | 
			
		||||
    return NULL;
 | 
			
		||||
  } else { 
 | 
			
		||||
@@ -65,7 +86,13 @@ void *CartesianCommunicator::ShmBuffer(int rank)
 | 
			
		||||
}
 | 
			
		||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
 | 
			
		||||
{
 | 
			
		||||
  static int count =0;
 | 
			
		||||
  int gpeer = GroupRanks[rank];
 | 
			
		||||
  assert(gpeer!=ShmRank); // never send to self
 | 
			
		||||
  assert(rank!=WorldRank);// never send to self
 | 
			
		||||
#ifdef FORCE_COMMS
 | 
			
		||||
  return NULL;
 | 
			
		||||
#endif
 | 
			
		||||
  if (gpeer == MPI_UNDEFINED){
 | 
			
		||||
    return NULL;
 | 
			
		||||
  } else { 
 | 
			
		||||
@@ -76,16 +103,27 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
 | 
			
		||||
  int flag;
 | 
			
		||||
  int provided;
 | 
			
		||||
  //  mtrace();
 | 
			
		||||
 | 
			
		||||
  MPI_Initialized(&flag); // needed to coexist with other libs apparently
 | 
			
		||||
  if ( !flag ) {
 | 
			
		||||
    MPI_Init(argc,argv);
 | 
			
		||||
    MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
 | 
			
		||||
    assert (provided == MPI_THREAD_MULTIPLE);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  Grid_quiesce_nodes();
 | 
			
		||||
 | 
			
		||||
  MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
 | 
			
		||||
  MPI_Comm_rank(communicator_world,&WorldRank);
 | 
			
		||||
  MPI_Comm_size(communicator_world,&WorldSize);
 | 
			
		||||
 | 
			
		||||
  if ( WorldRank == 0 ) {
 | 
			
		||||
    std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"<<WorldSize <<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Split into groups that can share memory
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -131,7 +169,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // find the group leaders world rank
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -141,7 +178,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
      leaders_group[group++] = l;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify the rank of the group in which I (and my leader) live
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -152,39 +188,114 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  assert(GroupRank!=-1);
 | 
			
		||||
  
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // allocate the shared window for our group
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
 | 
			
		||||
  ShmCommBuf = 0;
 | 
			
		||||
  ierr = MPI_Win_allocate_shared(MAX_MPI_SHM_BYTES,1,MPI_INFO_NULL,ShmComm,&ShmCommBuf,&ShmWindow);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  // KNL hack -- force to numa-domain 1 in flat
 | 
			
		||||
#if 0
 | 
			
		||||
  //#include <numaif.h>
 | 
			
		||||
  for(uint64_t page=0;page<MAX_MPI_SHM_BYTES;page+=4096){
 | 
			
		||||
    void *pages = (void *) ( page + ShmCommBuf );
 | 
			
		||||
    int status;
 | 
			
		||||
    int flags=MPOL_MF_MOVE_ALL;
 | 
			
		||||
    int nodes=1; // numa domain == MCDRAM
 | 
			
		||||
    unsigned long count=1;
 | 
			
		||||
    ierr= move_pages(0,count, &pages,&nodes,&status,flags);
 | 
			
		||||
    if (ierr && (page==0)) perror("numa relocate command failed");
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
  MPI_Win_lock_all (MPI_MODE_NOCHECK, ShmWindow);
 | 
			
		||||
  
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Plan: allocate a fixed SHM region. Scratch that is just used via some scheme during stencil comms, with no allocate free.
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  ShmCommBufs.resize(ShmSize);
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
    MPI_Aint sz;
 | 
			
		||||
    int dsp_unit;
 | 
			
		||||
    MPI_Win_shared_query (ShmWindow, r, &sz, &dsp_unit, &ShmCommBufs[r]);
 | 
			
		||||
 | 
			
		||||
#if 1
 | 
			
		||||
  char shm_name [NAME_MAX];
 | 
			
		||||
  if ( ShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<ShmSize;r++){
 | 
			
		||||
 | 
			
		||||
      size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
 | 
			
		||||
 | 
			
		||||
      sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
 | 
			
		||||
 | 
			
		||||
      shm_unlink(shm_name);
 | 
			
		||||
      int fd=shm_open(shm_name,O_RDWR|O_CREAT,0660);
 | 
			
		||||
      if ( fd < 0 ) {	perror("failed shm_open");	assert(0);      }
 | 
			
		||||
      ftruncate(fd, size);
 | 
			
		||||
 | 
			
		||||
      void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 | 
			
		||||
      if ( ptr == MAP_FAILED ) {       perror("failed mmap");      assert(0);    }
 | 
			
		||||
      assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
      ShmCommBufs[r] =ptr;
 | 
			
		||||
      
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
 | 
			
		||||
  if ( ShmRank != 0 ) { 
 | 
			
		||||
    for(int r=0;r<ShmSize;r++){
 | 
			
		||||
      size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES ;
 | 
			
		||||
    
 | 
			
		||||
      sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
 | 
			
		||||
 | 
			
		||||
      int fd=shm_open(shm_name,O_RDWR,0660);
 | 
			
		||||
      if ( fd<0 ) {	perror("failed shm_open");	assert(0);      }
 | 
			
		||||
 | 
			
		||||
      void * ptr =  mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 | 
			
		||||
      if ( ptr == MAP_FAILED ) {       perror("failed mmap");      assert(0);    }
 | 
			
		||||
      assert(((uint64_t)ptr&0x3F)==0);
 | 
			
		||||
      ShmCommBufs[r] =ptr;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
  std::vector<int> shmids(ShmSize);
 | 
			
		||||
 | 
			
		||||
  if ( ShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<ShmSize;r++){
 | 
			
		||||
      size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
 | 
			
		||||
      key_t key   = 0x4545 + r;
 | 
			
		||||
      if ((shmids[r]= shmget(key,size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
 | 
			
		||||
	int errsv = errno;
 | 
			
		||||
	printf("Errno %d\n",errsv);
 | 
			
		||||
	perror("shmget");
 | 
			
		||||
	exit(1);
 | 
			
		||||
      }
 | 
			
		||||
      printf("shmid: 0x%x\n", shmids[r]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
  MPI_Bcast(&shmids[0],ShmSize*sizeof(int),MPI_BYTE,0,ShmComm);
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
    ShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
 | 
			
		||||
    if (ShmCommBufs[r] == (uint64_t *)-1) {
 | 
			
		||||
      perror("Shared memory attach failure");
 | 
			
		||||
      shmctl(shmids[r], IPC_RMID, NULL);
 | 
			
		||||
      exit(2);
 | 
			
		||||
    }
 | 
			
		||||
    printf("shmaddr: %p\n", ShmCommBufs[r]);
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
  // Mark for clean up
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
    shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
  ShmCommBuf         = ShmCommBufs[ShmRank];
 | 
			
		||||
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
  if ( ShmRank == 0 ) {
 | 
			
		||||
    for(int r=0;r<ShmSize;r++){
 | 
			
		||||
      uint64_t * check = (uint64_t *) ShmCommBufs[r];
 | 
			
		||||
      check[0] = GroupRank;
 | 
			
		||||
      check[1] = r;
 | 
			
		||||
      check[2] = 0x5A5A5A;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
  for(int r=0;r<ShmSize;r++){
 | 
			
		||||
    uint64_t * check = (uint64_t *) ShmCommBufs[r];
 | 
			
		||||
    
 | 
			
		||||
    assert(check[0]==GroupRank);
 | 
			
		||||
    assert(check[1]==r);
 | 
			
		||||
    assert(check[2]==0x5A5A5A);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  MPI_Barrier(ShmComm);
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Verbose for now
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -192,7 +303,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
    std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
 | 
			
		||||
    std::cout<< WorldSize << " Ranks " ;
 | 
			
		||||
    std::cout<< GroupSize << " Nodes " ;
 | 
			
		||||
    std::cout<<  ShmSize  << " with ranks-per-node "<<std::endl;
 | 
			
		||||
    std::cout<< " with "<< ShmSize  << " ranks-per-node "<<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    std::cout<<GridLogMessage     <<"Grid MPI-3 configuration: allocated shared memory region of size ";
 | 
			
		||||
    std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
 | 
			
		||||
@@ -207,7 +318,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
      if(g!=ShmSize-1) std::cout<<",";
 | 
			
		||||
      else std::cout<<"}"<<std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  for(int g=0;g<GroupSize;g++){
 | 
			
		||||
@@ -216,23 +326,21 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
 | 
			
		||||
      if ( (ShmRank == 0) && (GroupRank==g) ) {
 | 
			
		||||
	std::cout<<MyGroup[r];
 | 
			
		||||
	if(r<ShmSize-1) std::cout<<",";
 | 
			
		||||
	else std::cout<<"}"<<std::endl;
 | 
			
		||||
	else std::cout<<"}"<<std::endl<<std::flush;
 | 
			
		||||
      }
 | 
			
		||||
      MPI_Barrier(communicator_world);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  assert(ShmSetup==0);  ShmSetup=1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Want to implement some magic ... Group sub-cubes into those on same node
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
 | 
			
		||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &dest,int &source)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<int> coor = _processor_coor;
 | 
			
		||||
 | 
			
		||||
  std::vector<int> coor = _processor_coor; // my coord
 | 
			
		||||
  assert(std::abs(shift) <_processors[dim]);
 | 
			
		||||
 | 
			
		||||
  coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
 | 
			
		||||
@@ -242,28 +350,32 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
 | 
			
		||||
  coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
 | 
			
		||||
  Lexicographic::IndexFromCoor(coor,dest,_processors);
 | 
			
		||||
  dest = LexicographicToWorldRank[dest];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}// rank is world rank.
 | 
			
		||||
 | 
			
		||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
 | 
			
		||||
{
 | 
			
		||||
  int rank;
 | 
			
		||||
  Lexicographic::IndexFromCoor(coor,rank,_processors);
 | 
			
		||||
  rank = LexicographicToWorldRank[rank];
 | 
			
		||||
  return rank;
 | 
			
		||||
}
 | 
			
		||||
}// rank is world rank
 | 
			
		||||
 | 
			
		||||
void  CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
 | 
			
		||||
{
 | 
			
		||||
  Lexicographic::CoorFromIndex(coor,rank,_processors);
 | 
			
		||||
  rank = LexicographicToWorldRank[rank];
 | 
			
		||||
  int lr=-1;
 | 
			
		||||
  for(int r=0;r<WorldSize;r++){// map world Rank to lexico and then to coor
 | 
			
		||||
    if( LexicographicToWorldRank[r]==rank) lr = r;
 | 
			
		||||
  }
 | 
			
		||||
  assert(lr!=-1);
 | 
			
		||||
  Lexicographic::CoorFromIndex(coor,lr,_processors);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
{ 
 | 
			
		||||
  int ierr;
 | 
			
		||||
 | 
			
		||||
  communicator=communicator_world;
 | 
			
		||||
 | 
			
		||||
  _ndimension = processors.size();
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Assert power of two shm_size.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -275,24 +387,22 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  assert(log2size != -1);
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Identify subblock of ranks on node spreading across dims
 | 
			
		||||
  // in a maximally symmetrical way
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  int dim = 0;
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int> WorldDims = processors;
 | 
			
		||||
 | 
			
		||||
  ShmDims.resize(_ndimension,1);
 | 
			
		||||
  ShmDims.resize  (_ndimension,1);
 | 
			
		||||
  GroupDims.resize(_ndimension);
 | 
			
		||||
    
 | 
			
		||||
  ShmCoor.resize(_ndimension);
 | 
			
		||||
  ShmCoor.resize  (_ndimension);
 | 
			
		||||
  GroupCoor.resize(_ndimension);
 | 
			
		||||
  WorldCoor.resize(_ndimension);
 | 
			
		||||
 | 
			
		||||
  int dim = 0;
 | 
			
		||||
  for(int l2=0;l2<log2size;l2++){
 | 
			
		||||
    while ( WorldDims[dim] / ShmDims[dim] <= 1 ) dim=(dim+1)%_ndimension;
 | 
			
		||||
    while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%_ndimension;
 | 
			
		||||
    ShmDims[dim]*=2;
 | 
			
		||||
    dim=(dim+1)%_ndimension;
 | 
			
		||||
  }
 | 
			
		||||
@@ -304,6 +414,29 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
    GroupDims[d] = WorldDims[d]/ShmDims[d];
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Verbose
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
#if 0
 | 
			
		||||
  std::cout<< GridLogMessage << "MPI-3 usage "<<std::endl;
 | 
			
		||||
  std::cout<< GridLogMessage << "SHM   ";
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    std::cout<< ShmDims[d] <<" ";
 | 
			
		||||
  }
 | 
			
		||||
  std::cout<< std::endl;
 | 
			
		||||
 | 
			
		||||
  std::cout<< GridLogMessage << "Group ";
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    std::cout<< GroupDims[d] <<" ";
 | 
			
		||||
  }
 | 
			
		||||
  std::cout<< std::endl;
 | 
			
		||||
 | 
			
		||||
  std::cout<< GridLogMessage<<"World ";
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    std::cout<< WorldDims[d] <<" ";
 | 
			
		||||
  }
 | 
			
		||||
  std::cout<< std::endl;
 | 
			
		||||
#endif
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Check processor counts match
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -317,29 +450,57 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
 | 
			
		||||
      
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Establish mapping between lexico physics coord and WorldRank
 | 
			
		||||
  // 
 | 
			
		||||
  ////////////////////////////////////////////////////////////////
 | 
			
		||||
  LexicographicToWorldRank.resize(WorldSize,0);
 | 
			
		||||
  Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
 | 
			
		||||
  Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
 | 
			
		||||
  for(int d=0;d<_ndimension;d++){
 | 
			
		||||
    WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
 | 
			
		||||
  }
 | 
			
		||||
  _processor_coor = WorldCoor;
 | 
			
		||||
 | 
			
		||||
  int lexico;
 | 
			
		||||
  Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
 | 
			
		||||
  LexicographicToWorldRank[lexico]=WorldRank;
 | 
			
		||||
  _processor = lexico;
 | 
			
		||||
  _processor      = WorldRank;
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  // global sum Lexico to World mapping
 | 
			
		||||
  ///////////////////////////////////////////////////////////////////
 | 
			
		||||
  int lexico;
 | 
			
		||||
  LexicographicToWorldRank.resize(WorldSize,0);
 | 
			
		||||
  Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
 | 
			
		||||
  LexicographicToWorldRank[lexico] = WorldRank;
 | 
			
		||||
  ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<WorldSize;i++){
 | 
			
		||||
 | 
			
		||||
    int wr = LexicographicToWorldRank[i];
 | 
			
		||||
    //    int wr = i;
 | 
			
		||||
 | 
			
		||||
    std::vector<int> coor(_ndimension);
 | 
			
		||||
    ProcessorCoorFromRank(wr,coor); // from world rank
 | 
			
		||||
    int ck = RankFromProcessorCoor(coor);
 | 
			
		||||
    assert(ck==wr);
 | 
			
		||||
 | 
			
		||||
    if ( wr == WorldRank ) { 
 | 
			
		||||
      for(int j=0;j<coor.size();j++) {
 | 
			
		||||
	assert(coor[j] == _processor_coor[j]);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    /*
 | 
			
		||||
    std::cout << GridLogMessage<< " Lexicographic "<<i;
 | 
			
		||||
    std::cout << " MPI rank      "<<wr;
 | 
			
		||||
    std::cout << " Coor          ";
 | 
			
		||||
    for(int j=0;j<coor.size();j++) std::cout << coor[j];
 | 
			
		||||
    std::cout<< std::endl;
 | 
			
		||||
    */
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Check everyone agrees on everyone elses coords
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    std::vector<int> mcoor = coor;
 | 
			
		||||
    this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int));
 | 
			
		||||
    for(int d = 0 ; d< _ndimension; d++) {
 | 
			
		||||
      assert(coor[d] == mcoor[d]);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
  int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
@@ -367,8 +528,6 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
 | 
			
		||||
  int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
					   int dest,
 | 
			
		||||
@@ -377,10 +536,14 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
 | 
			
		||||
					   int bytes)
 | 
			
		||||
{
 | 
			
		||||
  std::vector<CommsRequest_t> reqs(0);
 | 
			
		||||
  //    unsigned long  xcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    unsigned long  rcrc = crc32(0L, Z_NULL, 0);
 | 
			
		||||
  //    xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
 | 
			
		||||
  SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
 | 
			
		||||
  SendToRecvFromComplete(reqs);
 | 
			
		||||
  //    rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
 | 
			
		||||
  //    printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
 | 
			
		||||
					   void *recv,
 | 
			
		||||
					   int sender,
 | 
			
		||||
@@ -397,7 +560,6 @@ void CartesianCommunicator::SendRecvPacket(void *xmit,
 | 
			
		||||
    MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Basic Halo comms primitive
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						void *xmit,
 | 
			
		||||
@@ -406,95 +568,29 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
 | 
			
		||||
						int from,
 | 
			
		||||
						int bytes)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
  this->StencilBarrier();
 | 
			
		||||
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
  
 | 
			
		||||
  static int sequence;
 | 
			
		||||
 | 
			
		||||
  int myrank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  int tag;
 | 
			
		||||
  int check;
 | 
			
		||||
 | 
			
		||||
  assert(dest != _processor);
 | 
			
		||||
  assert(from != _processor);
 | 
			
		||||
  
 | 
			
		||||
  int gdest = GroupRanks[dest];
 | 
			
		||||
  int gfrom = GroupRanks[from];
 | 
			
		||||
  int gme   = GroupRanks[_processor];
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) { 
 | 
			
		||||
    MPI_Request xrq;
 | 
			
		||||
    MPI_Request rrq;
 | 
			
		||||
 | 
			
		||||
  sequence++;
 | 
			
		||||
  
 | 
			
		||||
  char *from_ptr = (char *)ShmCommBufs[ShmRank];
 | 
			
		||||
 | 
			
		||||
  int small = (bytes<MAX_MPI_SHM_BYTES);
 | 
			
		||||
 | 
			
		||||
  typedef uint64_t T;
 | 
			
		||||
  int words = bytes/sizeof(T);
 | 
			
		||||
 | 
			
		||||
  assert(((size_t)bytes &(sizeof(T)-1))==0);
 | 
			
		||||
  assert(gme == ShmRank);
 | 
			
		||||
 | 
			
		||||
  if ( small && (gdest !=MPI_UNDEFINED) ) {
 | 
			
		||||
 | 
			
		||||
    char *to_ptr   = (char *)ShmCommBufs[gdest];
 | 
			
		||||
 | 
			
		||||
    assert(gme != gdest);
 | 
			
		||||
 | 
			
		||||
    T *ip = (T *)xmit;
 | 
			
		||||
    T *op = (T *)to_ptr;
 | 
			
		||||
PARALLEL_FOR_LOOP 
 | 
			
		||||
    for(int w=0;w<words;w++) {
 | 
			
		||||
      op[w]=ip[w];
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bcopy(&_processor,&to_ptr[bytes],sizeof(_processor));
 | 
			
		||||
    bcopy(&  sequence,&to_ptr[bytes+4],sizeof(sequence));
 | 
			
		||||
  } else { 
 | 
			
		||||
    ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  this->StencilBarrier();
 | 
			
		||||
  
 | 
			
		||||
  if (small && (gfrom !=MPI_UNDEFINED) ) {
 | 
			
		||||
    T *ip = (T *)from_ptr;
 | 
			
		||||
    T *op = (T *)recv;
 | 
			
		||||
PARALLEL_FOR_LOOP 
 | 
			
		||||
    for(int w=0;w<words;w++) {
 | 
			
		||||
      op[w]=ip[w];
 | 
			
		||||
    }
 | 
			
		||||
    bcopy(&from_ptr[bytes]  ,&tag  ,sizeof(tag));
 | 
			
		||||
    bcopy(&from_ptr[bytes+4],&check,sizeof(check));
 | 
			
		||||
    assert(check==sequence);
 | 
			
		||||
    assert(tag==from);
 | 
			
		||||
  } else { 
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
  } else { 
 | 
			
		||||
    // Give the CPU to MPI immediately; can use threads to overlap optionally
 | 
			
		||||
    ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
 | 
			
		||||
		      recv,bytes,MPI_CHAR,from, from,
 | 
			
		||||
		      communicator,MPI_STATUS_IGNORE);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  this->StencilBarrier();
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
  MPI_Request xrq;
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
  int rank = _processor;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
  ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
  
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
 | 
			
		||||
  list.push_back(xrq);
 | 
			
		||||
  list.push_back(rrq);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
 | 
			
		||||
						       void *xmit,
 | 
			
		||||
						       int dest,
 | 
			
		||||
						       void *recv,
 | 
			
		||||
@@ -505,57 +601,63 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
 | 
			
		||||
  MPI_Request rrq;
 | 
			
		||||
 | 
			
		||||
  int ierr;
 | 
			
		||||
 | 
			
		||||
  assert(dest != _processor);
 | 
			
		||||
  assert(from != _processor);
 | 
			
		||||
  
 | 
			
		||||
  int gdest = GroupRanks[dest];
 | 
			
		||||
  int gfrom = GroupRanks[from];
 | 
			
		||||
  int gme   = GroupRanks[_processor];
 | 
			
		||||
 | 
			
		||||
  assert(gme == ShmRank);
 | 
			
		||||
  assert(dest != _processor);
 | 
			
		||||
  assert(from != _processor);
 | 
			
		||||
  assert(gme  == ShmRank);
 | 
			
		||||
  double off_node_bytes=0.0;
 | 
			
		||||
 | 
			
		||||
#ifdef FORCE_COMMS
 | 
			
		||||
  gdest = MPI_UNDEFINED;
 | 
			
		||||
  gfrom = MPI_UNDEFINED;
 | 
			
		||||
#endif
 | 
			
		||||
  if ( gfrom ==MPI_UNDEFINED) {
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( gdest == MPI_UNDEFINED ) {
 | 
			
		||||
    ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(xrq);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  if ( gfrom ==MPI_UNDEFINED) {
 | 
			
		||||
    ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
 | 
			
		||||
    assert(ierr==0);
 | 
			
		||||
    list.push_back(rrq);
 | 
			
		||||
    off_node_bytes+=bytes;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicySequential ) { 
 | 
			
		||||
    this->StencilSendToRecvFromComplete(list);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  return off_node_bytes;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
 | 
			
		||||
{
 | 
			
		||||
  SendToRecvFromComplete(list);
 | 
			
		||||
  SendToRecvFromComplete(waitall);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
  MPI_Win_sync (ShmWindow);   
 | 
			
		||||
  MPI_Barrier  (ShmComm);
 | 
			
		||||
  MPI_Win_sync (ShmWindow);   
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  int nreq=list.size();
 | 
			
		||||
 | 
			
		||||
  if (nreq==0) return;
 | 
			
		||||
 | 
			
		||||
  std::vector<MPI_Status> status(nreq);
 | 
			
		||||
  int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
  list.resize(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::Barrier(void)
 | 
			
		||||
{
 | 
			
		||||
  int ierr = MPI_Barrier(communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
{
 | 
			
		||||
  int ierr=MPI_Bcast(data,
 | 
			
		||||
@@ -565,7 +667,11 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
		     communicator);
 | 
			
		||||
  assert(ierr==0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int CartesianCommunicator::RankWorld(void){ 
 | 
			
		||||
  int r; 
 | 
			
		||||
  MPI_Comm_rank(communicator_world,&r);
 | 
			
		||||
  return r;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
 | 
			
		||||
{
 | 
			
		||||
  int ierr= MPI_Bcast(data,
 | 
			
		||||
 
 | 
			
		||||
@@ -27,6 +27,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include "Grid.h"
 | 
			
		||||
#include <mpi.h>
 | 
			
		||||
//#include <numaif.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
/// Workarounds:
 | 
			
		||||
@@ -42,19 +43,27 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#include <fcntl.h>
 | 
			
		||||
#include <unistd.h>
 | 
			
		||||
#include <limits.h>
 | 
			
		||||
 | 
			
		||||
typedef sem_t *Grid_semaphore;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#error  /*THis is deprecated*/
 | 
			
		||||
 | 
			
		||||
#if 0 
 | 
			
		||||
#define SEM_INIT(S)      S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
 | 
			
		||||
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
 | 
			
		||||
#define SEM_POST(S) assert ( sem_post(S) == 0 ); 
 | 
			
		||||
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
#define SEM_INIT(S)      ;
 | 
			
		||||
#define SEM_INIT_EXCL(S) ;
 | 
			
		||||
#define SEM_POST(S) ;
 | 
			
		||||
#define SEM_WAIT(S) ;
 | 
			
		||||
#endif
 | 
			
		||||
#include <sys/mman.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL };
 | 
			
		||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV };
 | 
			
		||||
 | 
			
		||||
struct Descriptor {
 | 
			
		||||
  uint64_t buf;
 | 
			
		||||
@@ -62,6 +71,12 @@ struct Descriptor {
 | 
			
		||||
  int rank;
 | 
			
		||||
  int tag;
 | 
			
		||||
  int command;
 | 
			
		||||
  uint64_t xbuf;
 | 
			
		||||
  uint64_t rbuf;
 | 
			
		||||
  int xtag;
 | 
			
		||||
  int rtag;
 | 
			
		||||
  int src;
 | 
			
		||||
  int dest;
 | 
			
		||||
  MPI_Request request;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -94,18 +109,14 @@ public:
 | 
			
		||||
 | 
			
		||||
  void SemInit(void) {
 | 
			
		||||
    sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
 | 
			
		||||
    //    printf("SEM_NAME: %s \n",sem_name);
 | 
			
		||||
    SEM_INIT(sem_head);
 | 
			
		||||
    sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
 | 
			
		||||
    //    printf("SEM_NAME: %s \n",sem_name);
 | 
			
		||||
    SEM_INIT(sem_tail);
 | 
			
		||||
  }  
 | 
			
		||||
  void SemInitExcl(void) {
 | 
			
		||||
    sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
 | 
			
		||||
    //    printf("SEM_INIT_EXCL: %s \n",sem_name);
 | 
			
		||||
    SEM_INIT_EXCL(sem_head);
 | 
			
		||||
    sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
 | 
			
		||||
    //    printf("SEM_INIT_EXCL: %s \n",sem_name);
 | 
			
		||||
    SEM_INIT_EXCL(sem_tail);
 | 
			
		||||
  }  
 | 
			
		||||
  void WakeUpDMA(void) { 
 | 
			
		||||
@@ -125,6 +136,13 @@ public:
 | 
			
		||||
    while(1){
 | 
			
		||||
      WaitForCommand();
 | 
			
		||||
      //      std::cout << "Getting command "<<std::endl;
 | 
			
		||||
#if 0
 | 
			
		||||
      _mm_monitor((void *)&state->head,0,0);
 | 
			
		||||
      int s=state->start;
 | 
			
		||||
      if ( s != state->head ) {
 | 
			
		||||
	_mm_mwait(0,0);
 | 
			
		||||
      }
 | 
			
		||||
#endif
 | 
			
		||||
      Event();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
@@ -132,6 +150,7 @@ public:
 | 
			
		||||
  int Event (void) ;
 | 
			
		||||
 | 
			
		||||
  uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
 | 
			
		||||
  void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ;
 | 
			
		||||
 | 
			
		||||
  void WaitAll() {
 | 
			
		||||
    //    std::cout << "Queueing WAIT command  "<<std::endl;
 | 
			
		||||
@@ -141,7 +160,7 @@ public:
 | 
			
		||||
    //    std::cout << "Waiting from semaphore "<<std::endl;
 | 
			
		||||
    WaitForComplete();
 | 
			
		||||
    //    std::cout << "Checking FIFO is empty "<<std::endl;
 | 
			
		||||
    assert ( state->tail == state->head );
 | 
			
		||||
    while ( state->tail != state->head );
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -196,6 +215,12 @@ public:
 | 
			
		||||
    //    std::cout << "Waking up DMA "<< slave<<std::endl;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  static void QueueSendRecv(int slave,void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) 
 | 
			
		||||
  {
 | 
			
		||||
    Slaves[slave].QueueSendRecv(xbuf,rbuf,bytes,xtag,rtag,comm,dest,src);
 | 
			
		||||
    Slaves[slave].WakeUpDMA();
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
 | 
			
		||||
    //    std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank  <<std::endl;
 | 
			
		||||
    Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
 | 
			
		||||
@@ -226,6 +251,28 @@ public:
 | 
			
		||||
    return;
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  static void QueueRoundRobinSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
 | 
			
		||||
    uint8_t * cxbuf = (uint8_t *) xbuf;
 | 
			
		||||
    uint8_t * crbuf = (uint8_t *) rbuf;
 | 
			
		||||
    static int rrp=0;
 | 
			
		||||
    int procs = VerticalSize-1;
 | 
			
		||||
    int myoff=0;
 | 
			
		||||
    int mywork=bytes;
 | 
			
		||||
    QueueSendRecv(rrp+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
 | 
			
		||||
    rrp = rrp+1;
 | 
			
		||||
    if ( rrp == (VerticalSize-1) ) rrp = 0;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  static void QueueMultiplexedSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
 | 
			
		||||
    uint8_t * cxbuf = (uint8_t *) xbuf;
 | 
			
		||||
    uint8_t * crbuf = (uint8_t *) rbuf;
 | 
			
		||||
    int mywork, myoff, procs;
 | 
			
		||||
    procs = VerticalSize-1;
 | 
			
		||||
    for(int s=0;s<procs;s++) {
 | 
			
		||||
      GetWork(bytes,s,mywork,myoff,procs);
 | 
			
		||||
      QueueSendRecv(s+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
  static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
 | 
			
		||||
    uint8_t * cbuf = (uint8_t *) buf;
 | 
			
		||||
    int mywork, myoff, procs;
 | 
			
		||||
@@ -275,6 +322,7 @@ std::vector<void *>            MPIoffloadEngine::VerticalShmBufs;
 | 
			
		||||
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
 | 
			
		||||
std::vector<int>               MPIoffloadEngine::UserCommunicatorToWorldRanks; 
 | 
			
		||||
 | 
			
		||||
int CartesianCommunicator::NodeCount(void)    { return HorizontalSize;};
 | 
			
		||||
int MPIoffloadEngine::ShmSetup = 0;
 | 
			
		||||
 | 
			
		||||
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
 | 
			
		||||
@@ -370,12 +418,22 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
 | 
			
		||||
      ftruncate(fd, size);
 | 
			
		||||
 | 
			
		||||
      VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
 | 
			
		||||
 | 
			
		||||
      if ( VerticalShmBufs[r] == MAP_FAILED ) { 
 | 
			
		||||
	perror("failed mmap");
 | 
			
		||||
	assert(0);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      /*
 | 
			
		||||
      for(uint64_t page=0;page<size;page+=4096){
 | 
			
		||||
	void *pages = (void *) ( page + (uint64_t)VerticalShmBufs[r] );
 | 
			
		||||
	int status;
 | 
			
		||||
	int flags=MPOL_MF_MOVE_ALL;
 | 
			
		||||
	int nodes=1; // numa domain == MCDRAM
 | 
			
		||||
	unsigned long count=1;
 | 
			
		||||
	ierr= move_pages(0,count, &pages,&nodes,&status,flags);
 | 
			
		||||
	if (ierr && (page==0)) perror("numa relocate command failed");
 | 
			
		||||
      }
 | 
			
		||||
      */
 | 
			
		||||
      uint64_t * check = (uint64_t *) VerticalShmBufs[r];
 | 
			
		||||
      check[0] = WorldRank;
 | 
			
		||||
      check[1] = r;
 | 
			
		||||
@@ -404,7 +462,7 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
 | 
			
		||||
    uint64_t * check = (uint64_t *) VerticalShmBufs[r];
 | 
			
		||||
    assert(check[0]== WorldRank);
 | 
			
		||||
    assert(check[1]== r);
 | 
			
		||||
    std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
 | 
			
		||||
    //    std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
 | 
			
		||||
  }
 | 
			
		||||
  }
 | 
			
		||||
#endif
 | 
			
		||||
@@ -542,6 +600,8 @@ int Slave::Event (void) {
 | 
			
		||||
  static int head_last;
 | 
			
		||||
  static int start_last;
 | 
			
		||||
  int ierr;
 | 
			
		||||
  MPI_Status stat;
 | 
			
		||||
  static int i=0;
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////
 | 
			
		||||
  // Try to advance the start pointers
 | 
			
		||||
@@ -550,11 +610,6 @@ int Slave::Event (void) {
 | 
			
		||||
  if ( s != state->head ) {
 | 
			
		||||
    switch ( state->Descrs[s].command ) {
 | 
			
		||||
    case COMMAND_ISEND:
 | 
			
		||||
      /*
 | 
			
		||||
            std::cout<< " Send "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
 | 
			
		||||
      	       << " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
 | 
			
		||||
       << " Comm " << MPIoffloadEngine::communicator_universe<< " me " <<universe_rank<< std::endl;
 | 
			
		||||
      */
 | 
			
		||||
      ierr = MPI_Isend((void *)(state->Descrs[s].buf+base), 
 | 
			
		||||
		       state->Descrs[s].bytes, 
 | 
			
		||||
		       MPI_CHAR,
 | 
			
		||||
@@ -568,11 +623,6 @@ int Slave::Event (void) {
 | 
			
		||||
      break;
 | 
			
		||||
 | 
			
		||||
    case COMMAND_IRECV:
 | 
			
		||||
      /*
 | 
			
		||||
      std::cout<< " Recv "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
 | 
			
		||||
	       << " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
 | 
			
		||||
	       << " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl;
 | 
			
		||||
      */
 | 
			
		||||
      ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base), 
 | 
			
		||||
		     state->Descrs[s].bytes, 
 | 
			
		||||
		     MPI_CHAR,
 | 
			
		||||
@@ -588,10 +638,32 @@ int Slave::Event (void) {
 | 
			
		||||
      return 1;
 | 
			
		||||
      break;
 | 
			
		||||
 | 
			
		||||
    case COMMAND_SENDRECV:
 | 
			
		||||
 | 
			
		||||
      //      fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10);
 | 
			
		||||
 | 
			
		||||
      ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10,
 | 
			
		||||
			(void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10,
 | 
			
		||||
			MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE);
 | 
			
		||||
 | 
			
		||||
      assert(ierr==0);
 | 
			
		||||
 | 
			
		||||
      //      fprintf(stderr,"Sendrecv done %d %d\n",ierr,i);
 | 
			
		||||
      //      MPI_Barrier(MPIoffloadEngine::HorizontalComm);
 | 
			
		||||
      //      fprintf(stderr,"Barrier\n");
 | 
			
		||||
      i++;
 | 
			
		||||
 | 
			
		||||
      state->start = PERI_PLUS(s);
 | 
			
		||||
 | 
			
		||||
      return 1;
 | 
			
		||||
      break;
 | 
			
		||||
 | 
			
		||||
    case COMMAND_WAITALL:
 | 
			
		||||
 | 
			
		||||
      for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
 | 
			
		||||
	MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
 | 
			
		||||
	if ( state->Descrs[t].command != COMMAND_SENDRECV ) {
 | 
			
		||||
	  MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
 | 
			
		||||
	}
 | 
			
		||||
      };
 | 
			
		||||
      s=PERI_PLUS(s);
 | 
			
		||||
      state->start = s;
 | 
			
		||||
@@ -613,6 +685,45 @@ int Slave::Event (void) {
 | 
			
		||||
  // External interaction with the queue
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  
 | 
			
		||||
void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) 
 | 
			
		||||
{
 | 
			
		||||
  int head =state->head;
 | 
			
		||||
  int next = PERI_PLUS(head);
 | 
			
		||||
  
 | 
			
		||||
  // Set up descriptor
 | 
			
		||||
  int worldrank;
 | 
			
		||||
  int hashtag;
 | 
			
		||||
  MPI_Comm    communicator;
 | 
			
		||||
  MPI_Request request;
 | 
			
		||||
  uint64_t relative;
 | 
			
		||||
  
 | 
			
		||||
  relative = (uint64_t)xbuf - base;
 | 
			
		||||
  state->Descrs[head].xbuf    = relative;
 | 
			
		||||
  
 | 
			
		||||
  relative= (uint64_t)rbuf - base;
 | 
			
		||||
  state->Descrs[head].rbuf    = relative;
 | 
			
		||||
  
 | 
			
		||||
  state->Descrs[head].bytes  = bytes;
 | 
			
		||||
  
 | 
			
		||||
  MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest);
 | 
			
		||||
  state->Descrs[head].dest   = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
 | 
			
		||||
  state->Descrs[head].xtag    = hashtag;
 | 
			
		||||
  
 | 
			
		||||
  MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src);
 | 
			
		||||
  state->Descrs[head].src    = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
 | 
			
		||||
  state->Descrs[head].rtag    = hashtag;
 | 
			
		||||
  
 | 
			
		||||
  state->Descrs[head].command= COMMAND_SENDRECV;
 | 
			
		||||
  
 | 
			
		||||
  // Block until FIFO has space
 | 
			
		||||
  while( state->tail==next );
 | 
			
		||||
  
 | 
			
		||||
  // Msync on weak order architectures
 | 
			
		||||
  
 | 
			
		||||
  // Advance pointer
 | 
			
		||||
  state->head = next;
 | 
			
		||||
  
 | 
			
		||||
};
 | 
			
		||||
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank) 
 | 
			
		||||
{
 | 
			
		||||
  /////////////////////////////////////////
 | 
			
		||||
@@ -812,19 +923,22 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
 | 
			
		||||
  assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
 | 
			
		||||
  assert(from!=_processor);
 | 
			
		||||
  assert(dest!=_processor);
 | 
			
		||||
  MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
 | 
			
		||||
  MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
  MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
 | 
			
		||||
 | 
			
		||||
  //MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
 | 
			
		||||
 | 
			
		||||
  //MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
 | 
			
		||||
  //MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  MPIoffloadEngine::WaitAll();
 | 
			
		||||
  //this->Barrier();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::StencilBarrier(void) { }
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
 
 | 
			
		||||
@@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -87,6 +88,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
@@ -97,7 +99,7 @@ void CartesianCommunicator::Barrier(void){}
 | 
			
		||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
 | 
			
		||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
 | 
			
		||||
int  CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) {  return 0;}
 | 
			
		||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor ;}
 | 
			
		||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){  coor = _processor_coor; }
 | 
			
		||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
 | 
			
		||||
{
 | 
			
		||||
  source =0;
 | 
			
		||||
 
 | 
			
		||||
@@ -27,6 +27,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <mpp/shmem.h>
 | 
			
		||||
#include <array>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
@@ -51,7 +52,7 @@ typedef struct HandShake_t {
 | 
			
		||||
} HandShake;
 | 
			
		||||
 | 
			
		||||
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
 | 
			
		||||
  array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
 | 
			
		||||
  std::array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
 | 
			
		||||
  ret.fill(SHMEM_SYNC_VALUE);
 | 
			
		||||
  return ret;
 | 
			
		||||
}
 | 
			
		||||
@@ -109,7 +110,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){
 | 
			
		||||
 | 
			
		||||
  source = u;
 | 
			
		||||
  dest   = 0;
 | 
			
		||||
  shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
  shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
  shmem_barrier_all(); // necessary?
 | 
			
		||||
  u = dest;
 | 
			
		||||
}
 | 
			
		||||
@@ -125,7 +126,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
 | 
			
		||||
 | 
			
		||||
  source = u;
 | 
			
		||||
  dest   = 0;
 | 
			
		||||
  shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
  shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
  shmem_barrier_all(); // necessary?
 | 
			
		||||
  u = dest;
 | 
			
		||||
}
 | 
			
		||||
@@ -137,7 +138,8 @@ void CartesianCommunicator::GlobalSum(float &f){
 | 
			
		||||
 | 
			
		||||
  source = f;
 | 
			
		||||
  dest   =0.0;
 | 
			
		||||
  shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
  shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
  shmem_barrier_all();
 | 
			
		||||
  f = dest;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
 | 
			
		||||
@@ -148,14 +150,16 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N)
 | 
			
		||||
  static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync =  psync_init;
 | 
			
		||||
 | 
			
		||||
  if ( shmem_addr_accessible(f,_processor)  ){
 | 
			
		||||
    shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
    shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
    shmem_barrier_all();
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    dest   =0.0;
 | 
			
		||||
    source = f[i];
 | 
			
		||||
    shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
    shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
    shmem_barrier_all();
 | 
			
		||||
    f[i] = dest;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -168,7 +172,8 @@ void CartesianCommunicator::GlobalSum(double &d)
 | 
			
		||||
 | 
			
		||||
  source = d;
 | 
			
		||||
  dest   = 0;
 | 
			
		||||
  shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
  shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
  shmem_barrier_all();
 | 
			
		||||
  d = dest;
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
 | 
			
		||||
@@ -180,14 +185,16 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  if ( shmem_addr_accessible(d,_processor)  ){
 | 
			
		||||
    shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
    shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
    shmem_barrier_all();
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(int i=0;i<N;i++){
 | 
			
		||||
    source = d[i];
 | 
			
		||||
    dest   =0.0;
 | 
			
		||||
    shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
 | 
			
		||||
    shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
 | 
			
		||||
    shmem_barrier_all();
 | 
			
		||||
    d[i] = dest;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -282,11 +289,13 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
 | 
			
		||||
  SHMEM_VET(recv);
 | 
			
		||||
  //  shmem_putmem_nb(recv,xmit,bytes,dest,NULL);
 | 
			
		||||
  shmem_putmem(recv,xmit,bytes,dest);
 | 
			
		||||
 | 
			
		||||
  if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all(); 
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
 | 
			
		||||
{
 | 
			
		||||
  //  shmem_quiet();      // I'm done
 | 
			
		||||
  shmem_barrier_all();// He's done too
 | 
			
		||||
  if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too
 | 
			
		||||
}
 | 
			
		||||
void CartesianCommunicator::Barrier(void)
 | 
			
		||||
{
 | 
			
		||||
@@ -301,13 +310,13 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
 | 
			
		||||
  int words = bytes/4;
 | 
			
		||||
 | 
			
		||||
  if ( shmem_addr_accessible(data,_processor)  ){
 | 
			
		||||
    shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync);
 | 
			
		||||
    shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data());
 | 
			
		||||
    return;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  for(int w=0;w<words;w++){
 | 
			
		||||
    word = array[w];
 | 
			
		||||
    shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
 | 
			
		||||
    shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
 | 
			
		||||
    if ( shmem_my_pe() != root ) {
 | 
			
		||||
      array[w] = word;
 | 
			
		||||
    }
 | 
			
		||||
@@ -325,13 +334,17 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
 | 
			
		||||
 | 
			
		||||
  for(int w=0;w<words;w++){
 | 
			
		||||
    word = array[w];
 | 
			
		||||
    shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
 | 
			
		||||
    shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
 | 
			
		||||
    if ( shmem_my_pe() != root ) {
 | 
			
		||||
      array[w]= word;
 | 
			
		||||
    }
 | 
			
		||||
    shmem_barrier_all();
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
int CartesianCommunicator::RankWorld(void){ 
 | 
			
		||||
  return shmem_my_pe();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,4 @@
 | 
			
		||||
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
@@ -53,15 +52,13 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<cobj> &buffer,int dimen
 | 
			
		||||
    cbmask = 0x3;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  int so  = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
  
 | 
			
		||||
  int so=plane*rhs._grid->_ostride[dimension]; // base offset for start of plane 
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension];
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
 | 
			
		||||
  int stride=rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  if ( cbmask == 0x3 ) { 
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o  = n*stride;
 | 
			
		||||
	int bo = n*e2;
 | 
			
		||||
@@ -74,14 +71,13 @@ PARALLEL_NESTED_LOOP2
 | 
			
		||||
     for(int n=0;n<e1;n++){
 | 
			
		||||
       for(int b=0;b<e2;b++){
 | 
			
		||||
	 int o  = n*stride;
 | 
			
		||||
	 int ocb=1<<rhs._grid->CheckerBoardFromOindexTable(o+b);
 | 
			
		||||
	 int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
 | 
			
		||||
	 if ( ocb &cbmask ) {
 | 
			
		||||
	   table.push_back(std::pair<int,int> (bo++,o+b));
 | 
			
		||||
	 }
 | 
			
		||||
       }
 | 
			
		||||
     }
 | 
			
		||||
PARALLEL_FOR_LOOP     
 | 
			
		||||
     for(int i=0;i<table.size();i++){
 | 
			
		||||
     parallel_for(int i=0;i<table.size();i++){
 | 
			
		||||
       buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
 | 
			
		||||
     }
 | 
			
		||||
  }
 | 
			
		||||
@@ -105,29 +101,30 @@ Gather_plane_extract(const Lattice<vobj> &rhs,std::vector<typename cobj::scalar_
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension];
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int n1=rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  int n2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
 | 
			
		||||
  if ( cbmask ==0x3){
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
 | 
			
		||||
	int o      =   n*n1;
 | 
			
		||||
	int offset = b+n*n2;
 | 
			
		||||
	int offset = b+n*e2;
 | 
			
		||||
	
 | 
			
		||||
	cobj temp =compress(rhs._odata[so+o+b]);
 | 
			
		||||
 | 
			
		||||
	extract<cobj>(temp,pointers,offset);
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
 | 
			
		||||
    assert(0); //Fixme think this is buggy
 | 
			
		||||
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    // Case of SIMD split AND checker dim cannot currently be hit, except in 
 | 
			
		||||
    // Test_cshift_red_black code.
 | 
			
		||||
    std::cout << " Dense packed buffer WARNING " <<std::endl;
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o=n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
 | 
			
		||||
	int o=n*n1;
 | 
			
		||||
	int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
 | 
			
		||||
	int offset = b+n*rhs._grid->_slice_block[dimension];
 | 
			
		||||
	int offset = b+n*e2;
 | 
			
		||||
 | 
			
		||||
	if ( ocb & cbmask ) {
 | 
			
		||||
	  cobj temp =compress(rhs._odata[so+o+b]);
 | 
			
		||||
@@ -171,10 +168,10 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
 | 
			
		||||
    
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension];
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int stride=rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  
 | 
			
		||||
  if ( cbmask ==0x3 ) {
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o   =n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
	int bo  =n*rhs._grid->_slice_block[dimension];
 | 
			
		||||
@@ -182,17 +179,21 @@ PARALLEL_NESTED_LOOP2
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    std::vector<std::pair<int,int> > table;
 | 
			
		||||
    int bo=0;
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o   =n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
	int bo  =n*rhs._grid->_slice_block[dimension];
 | 
			
		||||
	int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
 | 
			
		||||
	if ( ocb & cbmask ) {
 | 
			
		||||
	  rhs._odata[so+o+b]=buffer[bo++];
 | 
			
		||||
	  table.push_back(std::pair<int,int> (so+o+b,bo++));
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    parallel_for(int i=0;i<table.size();i++){
 | 
			
		||||
       //       std::cout << "Rcv"<< table[i].first << " " << table[i].second << " " <<buffer[table[i].second]<<std::endl;
 | 
			
		||||
       rhs._odata[table[i].first]=buffer[table[i].second];
 | 
			
		||||
     }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -213,8 +214,7 @@ PARALLEL_NESTED_LOOP2
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
 | 
			
		||||
  if(cbmask ==0x3 ) {
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o      = n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
	int offset = b+n*rhs._grid->_slice_block[dimension];
 | 
			
		||||
@@ -222,7 +222,11 @@ PARALLEL_NESTED_LOOP2
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    assert(0); // think this is buggy FIXME
 | 
			
		||||
 | 
			
		||||
    // Case of SIMD split AND checker dim cannot currently be hit, except in 
 | 
			
		||||
    // Test_cshift_red_black code.
 | 
			
		||||
    //    std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
 | 
			
		||||
    std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
	int o      = n*rhs._grid->_slice_stride[dimension];
 | 
			
		||||
@@ -254,8 +258,7 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
 | 
			
		||||
  int e2=rhs._grid->_slice_block[dimension];
 | 
			
		||||
  int stride = rhs._grid->_slice_stride[dimension];
 | 
			
		||||
  if(cbmask == 0x3 ){
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
 
 | 
			
		||||
        int o =n*stride+b;
 | 
			
		||||
@@ -264,8 +267,7 @@ PARALLEL_NESTED_LOOP2
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
    for(int n=0;n<e1;n++){
 | 
			
		||||
    parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
      for(int b=0;b<e2;b++){
 | 
			
		||||
 
 | 
			
		||||
        int o =n*stride+b;
 | 
			
		||||
@@ -295,8 +297,8 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
 | 
			
		||||
  int e1=rhs._grid->_slice_nblock[dimension];
 | 
			
		||||
  int e2=rhs._grid->_slice_block [dimension];
 | 
			
		||||
  int stride = rhs._grid->_slice_stride[dimension];
 | 
			
		||||
PARALLEL_NESTED_LOOP2
 | 
			
		||||
  for(int n=0;n<e1;n++){
 | 
			
		||||
 | 
			
		||||
  parallel_for_nest2(int n=0;n<e1;n++){
 | 
			
		||||
  for(int b=0;b<e2;b++){
 | 
			
		||||
 | 
			
		||||
      int o  =n*stride;
 | 
			
		||||
@@ -338,8 +340,8 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
  // Map to always positive shift modulo global full dimension.
 | 
			
		||||
  shift = (shift+fd)%fd;
 | 
			
		||||
 | 
			
		||||
  ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
 | 
			
		||||
  // the permute type
 | 
			
		||||
  ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
 | 
			
		||||
  int permute_dim =grid->PermuteDim(dimension);
 | 
			
		||||
  int permute_type=grid->PermuteType(dimension);
 | 
			
		||||
  int permute_type_dist;
 | 
			
		||||
@@ -348,7 +350,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
 | 
			
		||||
    int o   = 0;
 | 
			
		||||
    int bo  = x * grid->_ostride[dimension];
 | 
			
		||||
    
 | 
			
		||||
    int cb= (cbmask==0x2)? Odd : Even;
 | 
			
		||||
 | 
			
		||||
    int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
 | 
			
		||||
@@ -361,9 +362,23 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
    // wrap is whether sshift > rd.
 | 
			
		||||
    //  num is sshift mod rd.
 | 
			
		||||
    // 
 | 
			
		||||
    //  shift 7
 | 
			
		||||
    //
 | 
			
		||||
    //  XoXo YcYc 
 | 
			
		||||
    //  oXoX cYcY
 | 
			
		||||
    //  XoXo YcYc
 | 
			
		||||
    //  oXoX cYcY
 | 
			
		||||
    //
 | 
			
		||||
    //  sshift -- 
 | 
			
		||||
    //
 | 
			
		||||
    //  XX YY ; 3
 | 
			
		||||
    //  XX YY ; 0
 | 
			
		||||
    //  XX YY ; 3
 | 
			
		||||
    //  XX YY ; 0
 | 
			
		||||
    //
 | 
			
		||||
    int permute_slice=0;
 | 
			
		||||
    if(permute_dim){
 | 
			
		||||
      int wrap = sshift/rd;
 | 
			
		||||
      int wrap = sshift/rd; wrap=wrap % ly;
 | 
			
		||||
      int  num = sshift%rd;
 | 
			
		||||
 | 
			
		||||
      if ( x< rd-num ) permute_slice=wrap;
 | 
			
		||||
@@ -375,7 +390,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
 | 
			
		||||
      } else {
 | 
			
		||||
	permute_type_dist = permute_type;
 | 
			
		||||
      }
 | 
			
		||||
      
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
 | 
			
		||||
 
 | 
			
		||||
@@ -74,7 +74,6 @@ template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &r
 | 
			
		||||
  sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
 | 
			
		||||
 | 
			
		||||
  //  std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
 | 
			
		||||
 | 
			
		||||
  if ( sshift[0] == sshift[1] ) {
 | 
			
		||||
    //    std::cout << "Single pass Cshift_comms" <<std::endl;
 | 
			
		||||
    Cshift_comms(ret,rhs,dimension,shift,0x3);
 | 
			
		||||
@@ -154,10 +153,14 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
 | 
			
		||||
			   (void *)&recv_buf[0],
 | 
			
		||||
			   recv_from_rank,
 | 
			
		||||
			   bytes);
 | 
			
		||||
 | 
			
		||||
      //      for(int i=0;i<words;i++){
 | 
			
		||||
      //	std::cout << "SendRecv ["<<i<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << "  0x" << cbmask<<std::endl;
 | 
			
		||||
      //      }
 | 
			
		||||
      grid->Barrier();
 | 
			
		||||
      /*
 | 
			
		||||
      for(int i=0;i<send_buf.size();i++){
 | 
			
		||||
	assert(recv_buf.size()==buffer_size);
 | 
			
		||||
	assert(send_buf.size()==buffer_size);
 | 
			
		||||
	std::cout << "SendRecv_Cshift_comms ["<<i<<" "<< dimension<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << "  0x" << cbmask<<std::endl;
 | 
			
		||||
      }
 | 
			
		||||
      */
 | 
			
		||||
      Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
@@ -243,7 +246,14 @@ template<class vobj> void  Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
 | 
			
		||||
			     (void *)&recv_buf_extract[i][0],
 | 
			
		||||
			     recv_from_rank,
 | 
			
		||||
			     bytes);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	for(int w=0;w<recv_buf_extract[i].size();w++){
 | 
			
		||||
	  assert(recv_buf_extract[i].size()==buffer_size);
 | 
			
		||||
	  assert(send_buf_extract[i].size()==buffer_size);
 | 
			
		||||
	  std::cout << "SendRecv_Cshift_comms ["<<w<<" "<< dimension<<"] recv "<<recv_buf_extract[i][w]<<" send " << send_buf_extract[nbr_lane][w]  << cbmask<<std::endl;
 | 
			
		||||
	}
 | 
			
		||||
	*/	
 | 
			
		||||
	grid->Barrier();
 | 
			
		||||
	rpointers[i] = &recv_buf_extract[i][0];
 | 
			
		||||
      } else { 
 | 
			
		||||
	rpointers[i] = &send_buf_extract[nbr_lane][0];
 | 
			
		||||
 
 | 
			
		||||
@@ -39,8 +39,7 @@ namespace Grid {
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
    conformable(lhs,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
 | 
			
		||||
@@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
    conformable(lhs,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
 | 
			
		||||
@@ -73,8 +71,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
    conformable(lhs,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
 | 
			
		||||
@@ -89,8 +86,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
    conformable(lhs,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
 | 
			
		||||
@@ -108,8 +104,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(lhs,ret);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mult(&tmp,&lhs._odata[ss],&rhs);
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
@@ -120,8 +115,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,lhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mac(&tmp,&lhs._odata[ss],&rhs);
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
@@ -132,8 +126,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(ret,lhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      sub(&tmp,&lhs._odata[ss],&rhs);
 | 
			
		||||
@@ -147,8 +140,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
 | 
			
		||||
    ret.checkerboard = lhs.checkerboard;
 | 
			
		||||
    conformable(lhs,ret);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      add(&tmp,&lhs._odata[ss],&rhs);
 | 
			
		||||
@@ -166,8 +158,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mult(&tmp,&lhs,&rhs._odata[ss]);
 | 
			
		||||
@@ -182,8 +173,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      mac(&tmp,&lhs,&rhs._odata[ss]);
 | 
			
		||||
@@ -198,8 +188,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      sub(&tmp,&lhs,&rhs._odata[ss]);
 | 
			
		||||
@@ -213,8 +202,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      obj1 tmp;
 | 
			
		||||
      add(&tmp,&lhs,&rhs._odata[ss]);
 | 
			
		||||
@@ -230,8 +218,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    ret.checkerboard = x.checkerboard;
 | 
			
		||||
    conformable(ret,x);
 | 
			
		||||
    conformable(x,y);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<x._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<x._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = a*x._odata[ss]+y._odata[ss];
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
@@ -245,8 +232,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    ret.checkerboard = x.checkerboard;
 | 
			
		||||
    conformable(ret,x);
 | 
			
		||||
    conformable(x,y);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<x._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<x._grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = a*x._odata[ss]+b*y._odata[ss];
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
 
 | 
			
		||||
@@ -121,8 +121,7 @@ public:
 | 
			
		||||
    assert( (cb==Odd) || (cb==Even));
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = eval(ss,expr);
 | 
			
		||||
      vstream(_odata[ss] ,tmp);
 | 
			
		||||
@@ -144,8 +143,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    assert( (cb==Odd) || (cb==Even));
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = eval(ss,expr);
 | 
			
		||||
      vstream(_odata[ss] ,tmp);
 | 
			
		||||
@@ -167,8 +165,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    assert( (cb==Odd) || (cb==Even));
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      //vobj tmp = eval(ss,expr);
 | 
			
		||||
      vstream(_odata[ss] ,eval(ss,expr));
 | 
			
		||||
@@ -191,8 +188,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
    _odata.resize(_grid->oSites());
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = eval(ss,expr);
 | 
			
		||||
      vstream(_odata[ss] ,tmp);
 | 
			
		||||
@@ -213,8 +209,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
    _odata.resize(_grid->oSites());
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
#ifdef STREAMING_STORES
 | 
			
		||||
      vobj tmp = eval(ss,expr);
 | 
			
		||||
      vstream(_odata[ss] ,tmp);
 | 
			
		||||
@@ -235,8 +230,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    checkerboard=cb;
 | 
			
		||||
 | 
			
		||||
    _odata.resize(_grid->oSites());
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      vstream(_odata[ss] ,eval(ss,expr));
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
@@ -258,8 +252,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    	_grid = r._grid;
 | 
			
		||||
    	checkerboard = r.checkerboard;
 | 
			
		||||
    	_odata.resize(_grid->oSites());// essential
 | 
			
		||||
  		PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
	parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
            _odata[ss]=r._odata[ss];
 | 
			
		||||
        }  	
 | 
			
		||||
    }
 | 
			
		||||
@@ -269,8 +262,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    virtual ~Lattice(void) = default;
 | 
			
		||||
    
 | 
			
		||||
    template<class sobj> strong_inline Lattice<vobj> & operator = (const sobj & r){
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
            this->_odata[ss]=r;
 | 
			
		||||
        }
 | 
			
		||||
        return *this;
 | 
			
		||||
@@ -279,8 +271,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
      this->checkerboard = r.checkerboard;
 | 
			
		||||
      conformable(*this,r);
 | 
			
		||||
      
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<_grid->oSites();ss++){
 | 
			
		||||
            this->_odata[ss]=r._odata[ss];
 | 
			
		||||
        }
 | 
			
		||||
        return *this;
 | 
			
		||||
 
 | 
			
		||||
@@ -45,90 +45,87 @@ namespace Grid {
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class vfunctor,class lobj,class robj>  
 | 
			
		||||
    inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<vInteger> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	  ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<vInteger> ret(rhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // compare lattice to scalar
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class vfunctor,class lobj,class robj> 
 | 
			
		||||
  template<class vfunctor,class lobj,class robj> 
 | 
			
		||||
    inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<vInteger> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites(); ss++){
 | 
			
		||||
	  ret._odata[ss]=op(lhs._odata[ss],rhs);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<vInteger> ret(lhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
 | 
			
		||||
      ret._odata[ss]=op(lhs._odata[ss],rhs);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // compare scalar to lattice
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<class vfunctor,class lobj,class robj> 
 | 
			
		||||
  template<class vfunctor,class lobj,class robj> 
 | 
			
		||||
    inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<vInteger> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	  ret._odata[ss]=op(lhs._odata[ss],rhs);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<vInteger> ret(rhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      ret._odata[ss]=op(lhs._odata[ss],rhs);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Map to functors
 | 
			
		||||
  //////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Less than
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     return LSComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator < (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
   // Less than equal
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     return LSComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator <= (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
   // Greater than 
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(vgt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     return LSComparison(vgt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator > (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
  // Less than
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
    return LLComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
    return LSComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator < (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
    return SLComparison(vlt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Less than equal
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
    return LLComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
    return LSComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator <= (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
    return SLComparison(vle<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Greater than 
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
    return LLComparison(vgt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
    return LSComparison(vgt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
  }
 | 
			
		||||
  template<class lobj,class robj>
 | 
			
		||||
    inline Lattice<vInteger> operator > (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(vgt<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
   // Greater than equal
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  
 | 
			
		||||
  // Greater than equal
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator >= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator >= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(vge<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
@@ -136,38 +133,37 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
     return LSComparison(vge<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator >= (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator >= (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(vge<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
   
 | 
			
		||||
   // equal
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(veq<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     return LSComparison(veq<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator == (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator == (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(veq<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
   
 | 
			
		||||
   
 | 
			
		||||
   // not equal
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return LLComparison(vne<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const robj & rhs) {
 | 
			
		||||
     return LSComparison(vne<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
   template<class lobj,class robj>
 | 
			
		||||
   inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
 | 
			
		||||
     return SLComparison(vne<lobj,robj>(),lhs,rhs);
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -34,47 +34,42 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
    // Non site, reduced locally reduced routines
 | 
			
		||||
    /////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
    // localNorm2,
 | 
			
		||||
    template<class vobj>
 | 
			
		||||
  /////////////////////////////////////////////////////
 | 
			
		||||
  // Non site, reduced locally reduced routines
 | 
			
		||||
  /////////////////////////////////////////////////////
 | 
			
		||||
  
 | 
			
		||||
  // localNorm2,
 | 
			
		||||
  template<class vobj>
 | 
			
		||||
    inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	  ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
      parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // localInnerProduct
 | 
			
		||||
    template<class vobj>
 | 
			
		||||
  
 | 
			
		||||
  // localInnerProduct
 | 
			
		||||
  template<class vobj>
 | 
			
		||||
    inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
    }
 | 
			
		||||
    
 | 
			
		||||
    // outerProduct Scalar x Scalar -> Scalar
 | 
			
		||||
    //              Vector x Vector -> Matrix
 | 
			
		||||
    template<class ll,class rr>
 | 
			
		||||
  
 | 
			
		||||
  // outerProduct Scalar x Scalar -> Scalar
 | 
			
		||||
  //              Vector x Vector -> Matrix
 | 
			
		||||
  template<class ll,class rr>
 | 
			
		||||
    inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
 | 
			
		||||
    {
 | 
			
		||||
        Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
            ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
     }
 | 
			
		||||
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -37,8 +37,7 @@ namespace Grid {
 | 
			
		||||
  inline Lattice<vobj> operator -(const Lattice<vobj> &r)
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<vobj> ret(r._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<r._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<r._grid->oSites();ss++){
 | 
			
		||||
      vstream(ret._odata[ss], -r._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
@@ -74,8 +73,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
  inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss]; 
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
	   //      ret._odata[ss]=lhs*rhs._odata[ss];
 | 
			
		||||
@@ -86,8 +84,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];  
 | 
			
		||||
	vstream(ret._odata[ss],tmp);
 | 
			
		||||
	//	ret._odata[ss]=lhs+rhs._odata[ss];
 | 
			
		||||
@@ -98,11 +95,9 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];  
 | 
			
		||||
      vstream(ret._odata[ss],tmp);
 | 
			
		||||
      //      ret._odata[ss]=lhs-rhs._odata[ss];
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  }
 | 
			
		||||
@@ -110,8 +105,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
      inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<lhs._grid->oSites(); ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
 | 
			
		||||
	decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
 | 
			
		||||
	vstream(ret._odata[ss],tmp);
 | 
			
		||||
	//            ret._odata[ss]=lhs._odata[ss]*rhs;
 | 
			
		||||
@@ -122,8 +116,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
      inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
 | 
			
		||||
    {
 | 
			
		||||
        Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	  decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs; 
 | 
			
		||||
	  vstream(ret._odata[ss],tmp);
 | 
			
		||||
	  //	  ret._odata[ss]=lhs._odata[ss]+rhs;
 | 
			
		||||
@@ -134,15 +127,12 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
      inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
 | 
			
		||||
	  decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
 | 
			
		||||
	  vstream(ret._odata[ss],tmp);
 | 
			
		||||
	  //	ret._odata[ss]=lhs._odata[ss]-rhs;
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -44,22 +44,20 @@ namespace Grid {
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
 | 
			
		||||
      ret.checkerboard=lhs.checkerboard;
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	  ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
    };
 | 
			
		||||
    template<int Index,class vobj>
 | 
			
		||||
       auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))>
 | 
			
		||||
      auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
 | 
			
		||||
      ret.checkerboard=lhs.checkerboard;
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	  ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
@@ -68,25 +66,23 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    template<int Index,class vobj> 
 | 
			
		||||
    void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
 | 
			
		||||
    {
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	  pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
 | 
			
		||||
	}      
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
 | 
			
		||||
      }      
 | 
			
		||||
    }
 | 
			
		||||
    template<int Index,class vobj>
 | 
			
		||||
      void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
 | 
			
		||||
    {
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	  pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
 | 
			
		||||
	}      
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
 | 
			
		||||
      }      
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    // Poke a scalar object into the SIMD array
 | 
			
		||||
    //////////////////////////////////////////////////////
 | 
			
		||||
    template<class vobj,class sobj>
 | 
			
		||||
    void pokeSite(const sobj &s,Lattice<vobj> &l,std::vector<int> &site){
 | 
			
		||||
    void pokeSite(const sobj &s,Lattice<vobj> &l,const std::vector<int> &site){
 | 
			
		||||
 | 
			
		||||
      GridBase *grid=l._grid;
 | 
			
		||||
 | 
			
		||||
@@ -120,7 +116,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    // Peek a scalar object from the SIMD array
 | 
			
		||||
    //////////////////////////////////////////////////////////
 | 
			
		||||
    template<class vobj,class sobj>
 | 
			
		||||
      void peekSite(sobj &s,const Lattice<vobj> &l,std::vector<int> &site){
 | 
			
		||||
      void peekSite(sobj &s,const Lattice<vobj> &l,const std::vector<int> &site){
 | 
			
		||||
        
 | 
			
		||||
      GridBase *grid=l._grid;
 | 
			
		||||
 | 
			
		||||
@@ -131,9 +127,6 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
 | 
			
		||||
      assert( l.checkerboard == l._grid->CheckerBoard(site));
 | 
			
		||||
 | 
			
		||||
      // FIXME
 | 
			
		||||
      //      assert( sizeof(sobj)*Nsimd == sizeof(vobj));
 | 
			
		||||
 | 
			
		||||
      int rank,odx,idx;
 | 
			
		||||
      grid->GlobalCoorToRankIndex(rank,odx,idx,site);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -40,8 +40,7 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
    template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
 | 
			
		||||
        Lattice<vobj> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
            ret._odata[ss] = adj(lhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
@@ -49,13 +48,10 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
 | 
			
		||||
    template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
 | 
			
		||||
        Lattice<vobj> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
            ret._odata[ss] = conjugate(lhs._odata[ss]);
 | 
			
		||||
	parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	  ret._odata[ss] = conjugate(lhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -57,8 +57,7 @@ namespace Grid {
 | 
			
		||||
	sumarray[i]=zero;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
      parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
	int nwork, mywork, myoff;
 | 
			
		||||
	GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
 | 
			
		||||
	
 | 
			
		||||
@@ -68,7 +67,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
	}
 | 
			
		||||
	sumarray[thr]=TensorRemove(vnrm) ;
 | 
			
		||||
      }
 | 
			
		||||
    
 | 
			
		||||
      
 | 
			
		||||
      vector_type vvnrm; vvnrm=zero;  // sum across threads
 | 
			
		||||
      for(int i=0;i<grid->SumArraySize();i++){
 | 
			
		||||
	vvnrm = vvnrm+sumarray[i];
 | 
			
		||||
@@ -114,18 +113,17 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
	sumarray[i]=zero;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
      parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
 | 
			
		||||
	int nwork, mywork, myoff;
 | 
			
		||||
	GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
 | 
			
		||||
 | 
			
		||||
	
 | 
			
		||||
	vobj vvsum=zero;
 | 
			
		||||
        for(int ss=myoff;ss<mywork+myoff; ss++){
 | 
			
		||||
	  vvsum = vvsum + arg._odata[ss];
 | 
			
		||||
	}
 | 
			
		||||
	sumarray[thr]=vvsum;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
      vobj vsum=zero;  // sum across threads
 | 
			
		||||
      for(int i=0;i<grid->SumArraySize();i++){
 | 
			
		||||
	vsum = vsum+sumarray[i];
 | 
			
		||||
 
 | 
			
		||||
@@ -34,6 +34,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  //http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf ?
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////
 | 
			
		||||
  // Allow the RNG state to be less dense than the fine grid
 | 
			
		||||
@@ -69,6 +70,7 @@ namespace Grid {
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Wrap seed_seq to give common interface with random_device
 | 
			
		||||
  // Should rather wrap random_device and have a generate
 | 
			
		||||
  class fixedSeed {
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
@@ -76,20 +78,31 @@ namespace Grid {
 | 
			
		||||
 | 
			
		||||
    std::seed_seq src;
 | 
			
		||||
    
 | 
			
		||||
    fixedSeed(const std::vector<int> &seeds) : src(seeds.begin(),seeds.end()) {};
 | 
			
		||||
 | 
			
		||||
    result_type operator () (void){
 | 
			
		||||
 | 
			
		||||
      std::vector<result_type> list(1);
 | 
			
		||||
 | 
			
		||||
      src.generate(list.begin(),list.end());
 | 
			
		||||
 | 
			
		||||
      return list[0];
 | 
			
		||||
    template<class int_type> fixedSeed(const std::vector<int_type> &seeds) : src(seeds.begin(),seeds.end()) {};
 | 
			
		||||
 | 
			
		||||
    template< class RandomIt > void generate( RandomIt begin, RandomIt end ) {
 | 
			
		||||
      src.generate(begin,end);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  class deviceSeed {
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    std::random_device rd;
 | 
			
		||||
 | 
			
		||||
    typedef std::random_device::result_type result_type;
 | 
			
		||||
    
 | 
			
		||||
    deviceSeed(void) : rd(){};
 | 
			
		||||
 | 
			
		||||
    template< class RandomIt > void generate( RandomIt begin, RandomIt end ) {
 | 
			
		||||
      for(RandomIt it=begin; it!=end;it++){
 | 
			
		||||
	*it = rd();
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  // real scalars are one component
 | 
			
		||||
  template<class scalar,class distribution,class generator> void fillScalar(scalar &s,distribution &dist,generator & gen)
 | 
			
		||||
  {
 | 
			
		||||
@@ -127,7 +140,7 @@ namespace Grid {
 | 
			
		||||
    std::vector<RngEngine>                             _generators;
 | 
			
		||||
    std::vector<std::uniform_real_distribution<RealD>> _uniform;
 | 
			
		||||
    std::vector<std::normal_distribution<RealD>>       _gaussian;
 | 
			
		||||
    std::vector<std::discrete_distribution<int32_t>>     _bernoulli;
 | 
			
		||||
    std::vector<std::discrete_distribution<int32_t>>   _bernoulli;
 | 
			
		||||
 | 
			
		||||
    void GetState(std::vector<RngStateType> & saved,int gen) {
 | 
			
		||||
      saved.resize(RngStateCount);
 | 
			
		||||
@@ -155,13 +168,6 @@ namespace Grid {
 | 
			
		||||
    // FIXME ... do we require lockstep draws of randoms 
 | 
			
		||||
    // from all nodes keeping seeds consistent.
 | 
			
		||||
    // place a barrier/broadcast in the fill routine
 | 
			
		||||
    template<class source> void Seed(source &src)
 | 
			
		||||
    {
 | 
			
		||||
      typename source::result_type init = src();
 | 
			
		||||
      CartesianCommunicator::BroadcastWorld(0,(void *)&init,sizeof(init));
 | 
			
		||||
      _generators[0] = RngEngine(init);
 | 
			
		||||
      _seeded=1;
 | 
			
		||||
    }    
 | 
			
		||||
 | 
			
		||||
    GridSerialRNG() : GridRNGbase() {
 | 
			
		||||
      _generators.resize(1);
 | 
			
		||||
@@ -244,12 +250,17 @@ namespace Grid {
 | 
			
		||||
      CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l));
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    template<class source> void Seed(source &src)
 | 
			
		||||
    {
 | 
			
		||||
      _generators[0] = RngEngine(src);
 | 
			
		||||
      _seeded=1;
 | 
			
		||||
    }    
 | 
			
		||||
    void SeedRandomDevice(void){
 | 
			
		||||
      std::random_device rd;
 | 
			
		||||
      Seed(rd);
 | 
			
		||||
      deviceSeed src;
 | 
			
		||||
      Seed(src);
 | 
			
		||||
    }
 | 
			
		||||
    void SeedFixedIntegers(const std::vector<int> &seeds){
 | 
			
		||||
      CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
 | 
			
		||||
      fixedSeed src(seeds);
 | 
			
		||||
      Seed(src);
 | 
			
		||||
    }
 | 
			
		||||
@@ -278,46 +289,6 @@ namespace Grid {
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    // This loop could be made faster to avoid the Ahmdahl by
 | 
			
		||||
    // i)  seed generators on each timeslice, for x=y=z=0;
 | 
			
		||||
    // ii) seed generators on each z for x=y=0
 | 
			
		||||
    // iii)seed generators on each y,z for x=0
 | 
			
		||||
    // iv) seed generators on each y,z,x 
 | 
			
		||||
    // made possible by physical indexing.
 | 
			
		||||
    template<class source> void Seed(source &src)
 | 
			
		||||
    {
 | 
			
		||||
      std::vector<int> gcoor;
 | 
			
		||||
 | 
			
		||||
      int gsites = _grid->_gsites;
 | 
			
		||||
 | 
			
		||||
      typename source::result_type init = src();
 | 
			
		||||
      RngEngine pseeder(init);
 | 
			
		||||
      std::uniform_int_distribution<uint64_t> ui;
 | 
			
		||||
 | 
			
		||||
      for(int gidx=0;gidx<gsites;gidx++){
 | 
			
		||||
 | 
			
		||||
	int rank,o_idx,i_idx;
 | 
			
		||||
	_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
 | 
			
		||||
	_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
 | 
			
		||||
        
 | 
			
		||||
	int l_idx=generator_idx(o_idx,i_idx);
 | 
			
		||||
 | 
			
		||||
	const int num_rand_seed=16;
 | 
			
		||||
	std::vector<int> site_seeds(num_rand_seed);
 | 
			
		||||
	for(int i=0;i<site_seeds.size();i++){
 | 
			
		||||
	  site_seeds[i]= ui(pseeder);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_grid->Broadcast(0,(void *)&site_seeds[0],sizeof(int)*site_seeds.size());
 | 
			
		||||
 | 
			
		||||
	if( rank == _grid->ThisRank() ){
 | 
			
		||||
	  fixedSeed ssrc(site_seeds);
 | 
			
		||||
	  typename source::result_type sinit = ssrc();
 | 
			
		||||
	  _generators[l_idx] = RngEngine(sinit);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      _seeded=1;
 | 
			
		||||
    }    
 | 
			
		||||
 | 
			
		||||
    //FIXME implement generic IO and create state save/restore
 | 
			
		||||
    //void SaveState(const std::string<char> &file);
 | 
			
		||||
@@ -336,8 +307,7 @@ namespace Grid {
 | 
			
		||||
      int words=sizeof(scalar_object)/sizeof(scalar_type);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<osites;ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<osites;ss++){
 | 
			
		||||
 | 
			
		||||
	std::vector<scalar_object> buf(Nsimd);
 | 
			
		||||
	for(int m=0;m<multiplicity;m++) {// Draw from same generator multiplicity times
 | 
			
		||||
@@ -359,11 +329,75 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    // This loop could be made faster to avoid the Ahmdahl by
 | 
			
		||||
    // i)  seed generators on each timeslice, for x=y=z=0;
 | 
			
		||||
    // ii) seed generators on each z for x=y=0
 | 
			
		||||
    // iii)seed generators on each y,z for x=0
 | 
			
		||||
    // iv) seed generators on each y,z,x 
 | 
			
		||||
    // made possible by physical indexing.
 | 
			
		||||
    template<class source> void Seed(source &src)
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
      typedef typename source::result_type seed_t;
 | 
			
		||||
      std::uniform_int_distribution<seed_t> uid;
 | 
			
		||||
 | 
			
		||||
      int numseed=4;
 | 
			
		||||
      int gsites = _grid->_gsites;
 | 
			
		||||
      std::vector<seed_t> site_init(numseed);
 | 
			
		||||
      std::vector<int> gcoor;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
      // Master RngEngine
 | 
			
		||||
      std::vector<seed_t> master_init(numseed);  src.generate(master_init.begin(),master_init.end());
 | 
			
		||||
      _grid->Broadcast(0,(void *)&master_init[0],sizeof(seed_t)*numseed);
 | 
			
		||||
      fixedSeed master_seed(master_init);
 | 
			
		||||
      RngEngine master_engine(master_seed);
 | 
			
		||||
 | 
			
		||||
      // Per node RngEngine
 | 
			
		||||
      std::vector<seed_t> node_init(numseed);
 | 
			
		||||
      for(int r=0;r<_grid->ProcessorCount();r++) {
 | 
			
		||||
 | 
			
		||||
	std::vector<seed_t> rank_init(numseed);
 | 
			
		||||
	for(int i=0;i<numseed;i++) rank_init[i] = uid(master_engine);
 | 
			
		||||
 | 
			
		||||
	std::cout << GridLogMessage << "SeedSeq for rank "<<r;
 | 
			
		||||
	for(int i=0;i<numseed;i++) std::cout<<" "<<rank_init[i];
 | 
			
		||||
	std::cout <<std::endl;
 | 
			
		||||
 | 
			
		||||
	if ( r==_grid->ThisRank() ) { 
 | 
			
		||||
	  for(int i=0;i<numseed;i++) node_init[i] = rank_init[i];
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      // Set up a seed_seq wrapper with these 8 words
 | 
			
		||||
      // and draw for each site within node.
 | 
			
		||||
      ////////////////////////////////////////////////////
 | 
			
		||||
      fixedSeed node_seed(node_init);
 | 
			
		||||
      RngEngine node_engine(node_seed);
 | 
			
		||||
 | 
			
		||||
      for(int gidx=0;gidx<gsites;gidx++){
 | 
			
		||||
	int rank,o_idx,i_idx;
 | 
			
		||||
 | 
			
		||||
	_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
 | 
			
		||||
	_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
 | 
			
		||||
 | 
			
		||||
	if( rank == _grid->ThisRank() ){
 | 
			
		||||
	  int l_idx=generator_idx(o_idx,i_idx);
 | 
			
		||||
	  for(int i=0;i<numseed;i++)  site_init[i] = uid(node_engine);
 | 
			
		||||
	  fixedSeed site_seed(site_init);
 | 
			
		||||
	  _generators[l_idx] = RngEngine(site_seed);
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      _seeded=1;
 | 
			
		||||
    }    
 | 
			
		||||
    void SeedRandomDevice(void){
 | 
			
		||||
      std::random_device rd;
 | 
			
		||||
      Seed(rd);
 | 
			
		||||
      deviceSeed src;
 | 
			
		||||
      Seed(src);
 | 
			
		||||
    }
 | 
			
		||||
    void SeedFixedIntegers(const std::vector<int> &seeds){
 | 
			
		||||
      CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
 | 
			
		||||
      fixedSeed src(seeds);
 | 
			
		||||
      Seed(src);
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -42,8 +42,7 @@ namespace Grid {
 | 
			
		||||
      -> Lattice<decltype(trace(lhs._odata[0]))>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
            ret._odata[ss] = trace(lhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
@@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
	ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
 | 
			
		||||
      }
 | 
			
		||||
      return ret;
 | 
			
		||||
 
 | 
			
		||||
@@ -51,7 +51,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
 | 
			
		||||
  template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
 | 
			
		||||
    half.checkerboard = cb;
 | 
			
		||||
    int ssh=0;
 | 
			
		||||
    //PARALLEL_FOR_LOOP
 | 
			
		||||
    //parallel_for
 | 
			
		||||
    for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
      std::vector<int> coor;
 | 
			
		||||
      int cbos;
 | 
			
		||||
@@ -68,7 +68,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
 | 
			
		||||
  template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
 | 
			
		||||
    int cb = half.checkerboard;
 | 
			
		||||
    int ssh=0;
 | 
			
		||||
    //PARALLEL_FOR_LOOP
 | 
			
		||||
    //parallel_for
 | 
			
		||||
    for(int ss=0;ss<full._grid->oSites();ss++){
 | 
			
		||||
      std::vector<int> coor;
 | 
			
		||||
      int cbos;
 | 
			
		||||
@@ -153,8 +153,7 @@ inline void blockZAXPY(Lattice<vobj> &fineZ,
 | 
			
		||||
    assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
  parallel_for(int sf=0;sf<fine->oSites();sf++){
 | 
			
		||||
    
 | 
			
		||||
    int sc;
 | 
			
		||||
    std::vector<int> coor_c(_ndimension);
 | 
			
		||||
@@ -186,8 +185,7 @@ template<class vobj,class CComplex>
 | 
			
		||||
 | 
			
		||||
  fine_inner = localInnerProduct(fineX,fineY);
 | 
			
		||||
  blockSum(coarse_inner,fine_inner);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<coarse->oSites();ss++){
 | 
			
		||||
  parallel_for(int ss=0;ss<coarse->oSites();ss++){
 | 
			
		||||
    CoarseInner._odata[ss] = coarse_inner._odata[ss];
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -333,9 +331,6 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  typedef typename vvobj::scalar_object ssobj;
 | 
			
		||||
 | 
			
		||||
  sobj s;
 | 
			
		||||
  ssobj ss;
 | 
			
		||||
 | 
			
		||||
  GridBase *ig = in._grid;
 | 
			
		||||
  GridBase *og = out._grid;
 | 
			
		||||
 | 
			
		||||
@@ -347,10 +342,13 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
 | 
			
		||||
  for(int d=0;d<no;d++){
 | 
			
		||||
    assert(ig->_processors[d]  == og->_processors[d]);
 | 
			
		||||
    assert(ig->_ldimensions[d] == og->_ldimensions[d]);
 | 
			
		||||
    assert(ig->lSites() == og->lSites());
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  //PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int idx=0;idx<ig->lSites();idx++){
 | 
			
		||||
  parallel_for(int idx=0;idx<ig->lSites();idx++){
 | 
			
		||||
    sobj s;
 | 
			
		||||
    ssobj ss;
 | 
			
		||||
 | 
			
		||||
    std::vector<int> lcoor(ni);
 | 
			
		||||
    ig->LocalIndexToLocalCoor(idx,lcoor);
 | 
			
		||||
    peekLocalSite(s,in,lcoor);
 | 
			
		||||
@@ -364,7 +362,6 @@ template<class vobj>
 | 
			
		||||
void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  sobj s;
 | 
			
		||||
 | 
			
		||||
  GridBase *lg = lowDim._grid;
 | 
			
		||||
  GridBase *hg = higherDim._grid;
 | 
			
		||||
@@ -386,17 +383,16 @@ void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  // Guido: check the threading here
 | 
			
		||||
  //PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
    sobj s;
 | 
			
		||||
    std::vector<int> lcoor(nl);
 | 
			
		||||
    std::vector<int> hcoor(nh);
 | 
			
		||||
    lg->LocalIndexToLocalCoor(idx,lcoor);
 | 
			
		||||
    dl=0;
 | 
			
		||||
    int ddl=0;
 | 
			
		||||
    hcoor[orthog] = slice;
 | 
			
		||||
    for(int d=0;d<nh;d++){
 | 
			
		||||
      if ( d!=orthog ) { 
 | 
			
		||||
	hcoor[d]=lcoor[dl++];
 | 
			
		||||
	hcoor[d]=lcoor[ddl++];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    peekLocalSite(s,lowDim,lcoor);
 | 
			
		||||
@@ -408,7 +404,6 @@ template<class vobj>
 | 
			
		||||
void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, int orthog)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  sobj s;
 | 
			
		||||
 | 
			
		||||
  GridBase *lg = lowDim._grid;
 | 
			
		||||
  GridBase *hg = higherDim._grid;
 | 
			
		||||
@@ -429,16 +424,16 @@ void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, in
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  //PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
    sobj s;
 | 
			
		||||
    std::vector<int> lcoor(nl);
 | 
			
		||||
    std::vector<int> hcoor(nh);
 | 
			
		||||
    lg->LocalIndexToLocalCoor(idx,lcoor);
 | 
			
		||||
    dl=0;
 | 
			
		||||
    int ddl=0;
 | 
			
		||||
    hcoor[orthog] = slice;
 | 
			
		||||
    for(int d=0;d<nh;d++){
 | 
			
		||||
      if ( d!=orthog ) { 
 | 
			
		||||
	hcoor[d]=lcoor[dl++];
 | 
			
		||||
	hcoor[d]=lcoor[ddl++];
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
    peekLocalSite(s,higherDim,hcoor);
 | 
			
		||||
@@ -452,7 +447,6 @@ template<class vobj>
 | 
			
		||||
void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  sobj s;
 | 
			
		||||
 | 
			
		||||
  GridBase *lg = lowDim._grid;
 | 
			
		||||
  GridBase *hg = higherDim._grid;
 | 
			
		||||
@@ -469,8 +463,8 @@ void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  //PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
    sobj s;
 | 
			
		||||
    std::vector<int> lcoor(nl);
 | 
			
		||||
    std::vector<int> hcoor(nh);
 | 
			
		||||
    lg->LocalIndexToLocalCoor(idx,lcoor);
 | 
			
		||||
@@ -488,7 +482,6 @@ template<class vobj>
 | 
			
		||||
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
 | 
			
		||||
{
 | 
			
		||||
  typedef typename vobj::scalar_object sobj;
 | 
			
		||||
  sobj s;
 | 
			
		||||
 | 
			
		||||
  GridBase *lg = lowDim._grid;
 | 
			
		||||
  GridBase *hg = higherDim._grid;
 | 
			
		||||
@@ -505,8 +498,8 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slic
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // the above should guarantee that the operations are local
 | 
			
		||||
  //PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
  parallel_for(int idx=0;idx<lg->lSites();idx++){
 | 
			
		||||
    sobj s;
 | 
			
		||||
    std::vector<int> lcoor(nl);
 | 
			
		||||
    std::vector<int> hcoor(nh);
 | 
			
		||||
    lg->LocalIndexToLocalCoor(idx,lcoor);
 | 
			
		||||
@@ -574,8 +567,7 @@ typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>
 | 
			
		||||
    in_grid->iCoorFromIindex(in_icoor[lane], lane);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
 | 
			
		||||
  parallel_for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
 | 
			
		||||
    //Assemble vector of pointers to output elements
 | 
			
		||||
    std::vector<sobj*> out_ptrs(in_nsimd);
 | 
			
		||||
 | 
			
		||||
@@ -623,8 +615,7 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
 | 
			
		||||
  std::vector<SobjOut> in_slex_conv(in_grid->lSites());
 | 
			
		||||
  unvectorizeToLexOrdArray(in_slex_conv, in);
 | 
			
		||||
    
 | 
			
		||||
  PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
 | 
			
		||||
  parallel_for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
 | 
			
		||||
    std::vector<int> out_ocoor(ndim);
 | 
			
		||||
    out_grid->oCoorFromOindex(out_ocoor, out_oidx);
 | 
			
		||||
 | 
			
		||||
@@ -642,10 +633,6 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
 | 
			
		||||
    merge(out._odata[out_oidx], ptrs, 0);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -40,27 +40,24 @@ namespace Grid {
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<class vobj>
 | 
			
		||||
    inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
 | 
			
		||||
        Lattice<vobj> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
            ret._odata[ss] = transpose(lhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
    };
 | 
			
		||||
    Lattice<vobj> ret(lhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss] = transpose(lhs._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  };
 | 
			
		||||
    
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    // Index level dependent transpose
 | 
			
		||||
    ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
    template<int Index,class vobj>
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Index level dependent transpose
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template<int Index,class vobj>
 | 
			
		||||
    inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
 | 
			
		||||
    {
 | 
			
		||||
      Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
        for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
            ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
 | 
			
		||||
        }
 | 
			
		||||
        return ret;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
  {
 | 
			
		||||
    Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
 | 
			
		||||
    parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
  };
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -37,8 +37,7 @@ namespace Grid {
 | 
			
		||||
    Lattice<obj> ret(rhs._grid);
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss]=pow(rhs._odata[ss],y);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
@@ -47,8 +46,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    Lattice<obj> ret(rhs._grid);
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss]=mod(rhs._odata[ss],y);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
@@ -58,8 +56,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    Lattice<obj> ret(rhs._grid);
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss]=div(rhs._odata[ss],y);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
@@ -69,8 +66,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    Lattice<obj> ret(rhs._grid);
 | 
			
		||||
    ret.checkerboard = rhs.checkerboard;
 | 
			
		||||
    conformable(ret,rhs);
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
    for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
    parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
 | 
			
		||||
      ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp);
 | 
			
		||||
    }
 | 
			
		||||
    return ret;
 | 
			
		||||
 
 | 
			
		||||
@@ -56,8 +56,7 @@ inline void whereWolf(Lattice<vobj> &ret,const Lattice<iobj> &predicate,Lattice<
 | 
			
		||||
  std::vector<scalar_object> truevals (Nsimd);
 | 
			
		||||
  std::vector<scalar_object> falsevals(Nsimd);
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<iftrue._grid->oSites(); ss++){
 | 
			
		||||
  parallel_for(int ss=0;ss<iftrue._grid->oSites(); ss++){
 | 
			
		||||
 | 
			
		||||
    extract(iftrue._odata[ss]   ,truevals);
 | 
			
		||||
    extract(iffalse._odata[ss]  ,falsevals);
 | 
			
		||||
 
 | 
			
		||||
@@ -29,9 +29,10 @@ See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
 | 
			
		||||
#include <cxxabi.h>
 | 
			
		||||
#include <memory>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
@@ -35,37 +35,27 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#endif
 | 
			
		||||
#include <arpa/inet.h>
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
// 64bit endian swap is a portability pain
 | 
			
		||||
#ifndef __has_builtin         // Optional of course.
 | 
			
		||||
#define __has_builtin(x) 0  // Compatibility with non-clang compilers.
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if HAVE_DECL_BE64TOH 
 | 
			
		||||
#undef Grid_ntohll
 | 
			
		||||
#define Grid_ntohll be64toh
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#if HAVE_DECL_NTOHLL
 | 
			
		||||
#undef  Grid_ntohll
 | 
			
		||||
#define Grid_ntohll ntohll
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifndef Grid_ntohll
 | 
			
		||||
inline uint32_t byte_reverse32(uint32_t f) { 
 | 
			
		||||
      f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; 
 | 
			
		||||
      return f;
 | 
			
		||||
}
 | 
			
		||||
inline uint64_t byte_reverse64(uint64_t f) { 
 | 
			
		||||
  uint64_t g;
 | 
			
		||||
  g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; 
 | 
			
		||||
  g = g << 32;
 | 
			
		||||
  f = f >> 32;
 | 
			
		||||
  g|= ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ; 
 | 
			
		||||
  return g;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if BYTE_ORDER == BIG_ENDIAN 
 | 
			
		||||
 | 
			
		||||
#define Grid_ntohll(A) (A)
 | 
			
		||||
 | 
			
		||||
#else 
 | 
			
		||||
 | 
			
		||||
#if __has_builtin(__builtin_bswap64)
 | 
			
		||||
#define Grid_ntohll(A) __builtin_bswap64(A)
 | 
			
		||||
inline uint64_t Grid_ntohll(uint64_t A) { return A; }
 | 
			
		||||
#else
 | 
			
		||||
#error
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
inline uint64_t Grid_ntohll(uint64_t A) { 
 | 
			
		||||
  return byte_reverse64(A);
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
@@ -195,7 +185,7 @@ class BinaryIO {
 | 
			
		||||
      std::vector<int> site({x,y,z,t});
 | 
			
		||||
 | 
			
		||||
      if (grid->IsBoss()) {
 | 
			
		||||
        fin.read((char *)&file_object, sizeof(file_object));
 | 
			
		||||
        fin.read((char *)&file_object, sizeof(file_object));assert( fin.fail()==0);
 | 
			
		||||
        bytes += sizeof(file_object);
 | 
			
		||||
        if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object));
 | 
			
		||||
        if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object));
 | 
			
		||||
@@ -211,11 +201,13 @@ class BinaryIO {
 | 
			
		||||
    std::cout<<GridLogPerformance<<"readObjectSerial: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
 | 
			
		||||
       << (double)bytes/ (double)timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
 | 
			
		||||
    grid->Broadcast(0,(void *)&csum,sizeof(csum));
 | 
			
		||||
    return csum;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class vobj,class fobj,class munger> 
 | 
			
		||||
  static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
 | 
			
		||||
  static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
 | 
			
		||||
					   const std::string & format)
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
@@ -231,7 +223,7 @@ class BinaryIO {
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
    std::cout<< GridLogMessage<< "Serial write I/O "<< file<<std::endl;
 | 
			
		||||
    GridStopWatch timer; timer.Start();
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    std::ofstream fout;
 | 
			
		||||
    if ( grid->IsBoss() ) {
 | 
			
		||||
      fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
 | 
			
		||||
@@ -255,23 +247,24 @@ class BinaryIO {
 | 
			
		||||
      
 | 
			
		||||
      if ( grid->IsBoss() ) {
 | 
			
		||||
  
 | 
			
		||||
  if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
  if(ieee32)    htole32_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
  if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
  if(ieee64)    htole64_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
	if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
	if(ieee32)    htole32_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
	if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
	if(ieee64)    htole64_v((void *)&file_object,sizeof(file_object));
 | 
			
		||||
 | 
			
		||||
  // NB could gather an xstrip as an optimisation.
 | 
			
		||||
  fout.write((char *)&file_object,sizeof(file_object));
 | 
			
		||||
  bytes+=sizeof(file_object);
 | 
			
		||||
	// NB could gather an xstrip as an optimisation.
 | 
			
		||||
	fout.write((char *)&file_object,sizeof(file_object));assert( fout.fail()==0);
 | 
			
		||||
	bytes+=sizeof(file_object);
 | 
			
		||||
      }
 | 
			
		||||
    }}}}
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
    std::cout<<GridLogPerformance<<"writeObjectSerial: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
 | 
			
		||||
       << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
 | 
			
		||||
	     << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    grid->Broadcast(0,(void *)&csum,sizeof(csum));
 | 
			
		||||
    return csum;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
  static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename GridSerialRNG::RngStateType RngStateType;
 | 
			
		||||
@@ -305,23 +298,23 @@ class BinaryIO {
 | 
			
		||||
      int l_idx=parallel.generator_idx(o_idx,i_idx);
 | 
			
		||||
 | 
			
		||||
      if( rank == grid->ThisRank() ){
 | 
			
		||||
  //  std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
 | 
			
		||||
  parallel.GetState(saved,l_idx);
 | 
			
		||||
	//  std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
 | 
			
		||||
	parallel.GetState(saved,l_idx);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      grid->Broadcast(rank,(void *)&saved[0],bytes);
 | 
			
		||||
 | 
			
		||||
      if ( grid->IsBoss() ) {
 | 
			
		||||
  Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
  fout.write((char *)&saved[0],bytes);
 | 
			
		||||
	Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
	fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ( grid->IsBoss() ) {
 | 
			
		||||
      serial.GetState(saved,0);
 | 
			
		||||
      Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
      fout.write((char *)&saved[0],bytes);
 | 
			
		||||
      fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
 | 
			
		||||
    }
 | 
			
		||||
    grid->Broadcast(0,(void *)&csum,sizeof(csum));
 | 
			
		||||
    return csum;
 | 
			
		||||
@@ -355,20 +348,20 @@ class BinaryIO {
 | 
			
		||||
      int l_idx=parallel.generator_idx(o_idx,i_idx);
 | 
			
		||||
 | 
			
		||||
      if ( grid->IsBoss() ) {
 | 
			
		||||
  fin.read((char *)&saved[0],bytes);
 | 
			
		||||
  Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
	fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
 | 
			
		||||
	Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      grid->Broadcast(0,(void *)&saved[0],bytes);
 | 
			
		||||
 | 
			
		||||
      if( rank == grid->ThisRank() ){
 | 
			
		||||
  parallel.SetState(saved,l_idx);
 | 
			
		||||
	parallel.SetState(saved,l_idx);
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if ( grid->IsBoss() ) {
 | 
			
		||||
      fin.read((char *)&saved[0],bytes);
 | 
			
		||||
      fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
 | 
			
		||||
      serial.SetState(saved,0);
 | 
			
		||||
      Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
 | 
			
		||||
    }
 | 
			
		||||
@@ -380,7 +373,8 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  template<class vobj,class fobj,class munger>
 | 
			
		||||
  static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
 | 
			
		||||
  static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
 | 
			
		||||
					    const std::string &format)
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
 | 
			
		||||
@@ -415,15 +409,15 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
      if ( d == 0 ) parallel[d] = 0;
 | 
			
		||||
      if (parallel[d]) {
 | 
			
		||||
  range[d] = grid->_ldimensions[d];
 | 
			
		||||
  start[d] = grid->_processor_coor[d]*range[d];
 | 
			
		||||
  ioproc[d]= grid->_processor_coor[d];
 | 
			
		||||
	range[d] = grid->_ldimensions[d];
 | 
			
		||||
	start[d] = grid->_processor_coor[d]*range[d];
 | 
			
		||||
	ioproc[d]= grid->_processor_coor[d];
 | 
			
		||||
      } else {
 | 
			
		||||
  range[d] = grid->_gdimensions[d];
 | 
			
		||||
  start[d] = 0;
 | 
			
		||||
  ioproc[d]= 0;
 | 
			
		||||
 | 
			
		||||
  if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
 | 
			
		||||
	range[d] = grid->_gdimensions[d];
 | 
			
		||||
	start[d] = 0;
 | 
			
		||||
	ioproc[d]= 0;
 | 
			
		||||
	
 | 
			
		||||
	if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
 | 
			
		||||
      }
 | 
			
		||||
      slice_vol = slice_vol * range[d];
 | 
			
		||||
    }
 | 
			
		||||
@@ -434,9 +428,9 @@ class BinaryIO {
 | 
			
		||||
      std::cout<< std::dec ;
 | 
			
		||||
      std::cout<< GridLogMessage<< "Parallel read I/O to "<< file << " with " <<tmp<< " IOnodes for subslice ";
 | 
			
		||||
      for(int d=0;d<grid->_ndimension;d++){
 | 
			
		||||
  std::cout<< range[d];
 | 
			
		||||
  if( d< grid->_ndimension-1 ) 
 | 
			
		||||
    std::cout<< " x ";
 | 
			
		||||
	std::cout<< range[d];
 | 
			
		||||
	if( d< grid->_ndimension-1 ) 
 | 
			
		||||
	  std::cout<< " x ";
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
@@ -472,8 +466,8 @@ class BinaryIO {
 | 
			
		||||
      Lexicographic::CoorFromIndex(tsite,tlex,range);
 | 
			
		||||
 | 
			
		||||
      for(int d=0;d<nd;d++){
 | 
			
		||||
  lsite[d] = tsite[d]%grid->_ldimensions[d];  // local site
 | 
			
		||||
  gsite[d] = tsite[d]+start[d];               // global site
 | 
			
		||||
	lsite[d] = tsite[d]%grid->_ldimensions[d];  // local site
 | 
			
		||||
	gsite[d] = tsite[d]+start[d];               // global site
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      /////////////////////////
 | 
			
		||||
@@ -488,28 +482,28 @@ class BinaryIO {
 | 
			
		||||
      ////////////////////////////////
 | 
			
		||||
      if (myrank == iorank) {
 | 
			
		||||
  
 | 
			
		||||
  fin.seekg(offset+g_idx*sizeof(fileObj));
 | 
			
		||||
  fin.read((char *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  bytes+=sizeof(fileObj);
 | 
			
		||||
	fin.seekg(offset+g_idx*sizeof(fileObj));
 | 
			
		||||
	fin.read((char *)&fileObj,sizeof(fileObj));assert( fin.fail()==0);
 | 
			
		||||
	bytes+=sizeof(fileObj);
 | 
			
		||||
  
 | 
			
		||||
  if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee32)    le32toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee64)    le64toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  
 | 
			
		||||
  munge(fileObj,siteObj,csum);
 | 
			
		||||
 | 
			
		||||
	if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee32)    le32toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee64)    le64toh_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	
 | 
			
		||||
	munge(fileObj,siteObj,csum);
 | 
			
		||||
	
 | 
			
		||||
      } 
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
      // Possibly do transport through pt2pt 
 | 
			
		||||
      if ( rank != iorank ) { 
 | 
			
		||||
  if ( (myrank == rank) || (myrank==iorank) ) {
 | 
			
		||||
    grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj));
 | 
			
		||||
  }
 | 
			
		||||
	if ( (myrank == rank) || (myrank==iorank) ) {
 | 
			
		||||
	  grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj));
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      // Poke at destination
 | 
			
		||||
      if ( myrank == rank ) {
 | 
			
		||||
    pokeLocalSite(siteObj,Umu,lsite);
 | 
			
		||||
	pokeLocalSite(siteObj,Umu,lsite);
 | 
			
		||||
      }
 | 
			
		||||
      grid->Barrier(); // necessary?
 | 
			
		||||
    }
 | 
			
		||||
@@ -520,7 +514,7 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
    std::cout<<GridLogPerformance<<"readObjectParallel: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
 | 
			
		||||
       << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
	     << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    return csum;
 | 
			
		||||
  }
 | 
			
		||||
@@ -529,7 +523,8 @@ class BinaryIO {
 | 
			
		||||
  // Parallel writer
 | 
			
		||||
  //////////////////////////////////////////////////////////
 | 
			
		||||
  template<class vobj,class fobj,class munger>
 | 
			
		||||
  static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
 | 
			
		||||
  static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
 | 
			
		||||
					     const std::string & format)
 | 
			
		||||
  {
 | 
			
		||||
    typedef typename vobj::scalar_object sobj;
 | 
			
		||||
    GridBase *grid = Umu._grid;
 | 
			
		||||
@@ -558,15 +553,15 @@ class BinaryIO {
 | 
			
		||||
      if ( d!= grid->_ndimension-1 ) parallel[d] = 0;
 | 
			
		||||
 | 
			
		||||
      if (parallel[d]) {
 | 
			
		||||
  range[d] = grid->_ldimensions[d];
 | 
			
		||||
  start[d] = grid->_processor_coor[d]*range[d];
 | 
			
		||||
  ioproc[d]= grid->_processor_coor[d];
 | 
			
		||||
	range[d] = grid->_ldimensions[d];
 | 
			
		||||
	start[d] = grid->_processor_coor[d]*range[d];
 | 
			
		||||
	ioproc[d]= grid->_processor_coor[d];
 | 
			
		||||
      } else {
 | 
			
		||||
  range[d] = grid->_gdimensions[d];
 | 
			
		||||
  start[d] = 0;
 | 
			
		||||
  ioproc[d]= 0;
 | 
			
		||||
	range[d] = grid->_gdimensions[d];
 | 
			
		||||
	start[d] = 0;
 | 
			
		||||
	ioproc[d]= 0;
 | 
			
		||||
 | 
			
		||||
  if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
 | 
			
		||||
	if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      slice_vol = slice_vol * range[d];
 | 
			
		||||
@@ -577,13 +572,13 @@ class BinaryIO {
 | 
			
		||||
      grid->GlobalSum(tmp);
 | 
			
		||||
      std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <<tmp<< " IOnodes for subslice ";
 | 
			
		||||
      for(int d=0;d<grid->_ndimension;d++){
 | 
			
		||||
  std::cout<< range[d];
 | 
			
		||||
  if( d< grid->_ndimension-1 ) 
 | 
			
		||||
    std::cout<< " x ";
 | 
			
		||||
	std::cout<< range[d];
 | 
			
		||||
	if( d< grid->_ndimension-1 ) 
 | 
			
		||||
	  std::cout<< " x ";
 | 
			
		||||
      }
 | 
			
		||||
      std::cout << std::endl;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    GridStopWatch timer; timer.Start();
 | 
			
		||||
    uint64_t bytes=0;
 | 
			
		||||
 | 
			
		||||
@@ -619,8 +614,8 @@ class BinaryIO {
 | 
			
		||||
      Lexicographic::CoorFromIndex(tsite,tlex,range);
 | 
			
		||||
 | 
			
		||||
      for(int d=0;d<nd;d++){
 | 
			
		||||
  lsite[d] = tsite[d]%grid->_ldimensions[d];  // local site
 | 
			
		||||
  gsite[d] = tsite[d]+start[d];               // global site
 | 
			
		||||
	lsite[d] = tsite[d]%grid->_ldimensions[d];  // local site
 | 
			
		||||
	gsite[d] = tsite[d]+start[d];               // global site
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -640,36 +635,36 @@ class BinaryIO {
 | 
			
		||||
 | 
			
		||||
      // Pair of nodes may need to do pt2pt send
 | 
			
		||||
      if ( rank != iorank ) { // comms is necessary
 | 
			
		||||
  if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
 | 
			
		||||
    // Send to IOrank 
 | 
			
		||||
    grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
 | 
			
		||||
  }
 | 
			
		||||
	if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
 | 
			
		||||
	  // Send to IOrank 
 | 
			
		||||
	  grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
 | 
			
		||||
      grid->Barrier(); // necessary?
 | 
			
		||||
 | 
			
		||||
      if (myrank == iorank) {
 | 
			
		||||
  
 | 
			
		||||
  munge(siteObj,fileObj,csum);
 | 
			
		||||
 | 
			
		||||
  if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee32)    htole32_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  if(ieee64)    htole64_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  
 | 
			
		||||
  fout.seekp(offset+g_idx*sizeof(fileObj));
 | 
			
		||||
  fout.write((char *)&fileObj,sizeof(fileObj));
 | 
			
		||||
  bytes+=sizeof(fileObj);
 | 
			
		||||
	munge(siteObj,fileObj,csum);
 | 
			
		||||
	
 | 
			
		||||
	if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee32)    htole32_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	if(ieee64)    htole64_v((void *)&fileObj,sizeof(fileObj));
 | 
			
		||||
	
 | 
			
		||||
	fout.seekp(offset+g_idx*sizeof(fileObj));
 | 
			
		||||
	fout.write((char *)&fileObj,sizeof(fileObj));assert( fout.fail()==0);
 | 
			
		||||
	bytes+=sizeof(fileObj);
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    grid->GlobalSum(csum);
 | 
			
		||||
    grid->GlobalSum(bytes);
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    timer.Stop();
 | 
			
		||||
    std::cout<<GridLogPerformance<<"writeObjectParallel: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
 | 
			
		||||
       << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
 | 
			
		||||
	     << (double)bytes/timer.useconds() <<" MB/s "  <<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    return csum;
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -242,7 +242,6 @@ class NerscIO : public BinaryIO {
 | 
			
		||||
  static inline unsigned int writeHeader(NerscField &field,std::string file)
 | 
			
		||||
  {
 | 
			
		||||
    std::ofstream fout(file,std::ios::out|std::ios::in);
 | 
			
		||||
  
 | 
			
		||||
    fout.seekp(0,std::ios::beg);
 | 
			
		||||
    dump_nersc_header(field, fout);
 | 
			
		||||
    field.data_start = fout.tellp();
 | 
			
		||||
@@ -264,10 +263,13 @@ static inline int readHeader(std::string file,GridBase *grid,  NerscField &field
 | 
			
		||||
  getline(fin,line); // read one line and insist is 
 | 
			
		||||
 | 
			
		||||
  removeWhitespace(line);
 | 
			
		||||
  std::cout << GridLogMessage << "* " << line << std::endl;
 | 
			
		||||
 | 
			
		||||
  assert(line==std::string("BEGIN_HEADER"));
 | 
			
		||||
 | 
			
		||||
  do {
 | 
			
		||||
    getline(fin,line); // read one line
 | 
			
		||||
    std::cout << GridLogMessage << "* "<<line<< std::endl;
 | 
			
		||||
    int eq = line.find("=");
 | 
			
		||||
    if(eq >0) {
 | 
			
		||||
      std::string key=line.substr(0,eq);
 | 
			
		||||
@@ -322,6 +324,8 @@ static inline int readHeader(std::string file,GridBase *grid,  NerscField &field
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Now the meat: the object readers
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#define PARALLEL_READ
 | 
			
		||||
#define PARALLEL_WRITE
 | 
			
		||||
 | 
			
		||||
template<class vsimd>
 | 
			
		||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
 | 
			
		||||
@@ -345,25 +349,41 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
 | 
			
		||||
  // munger is a function of <floating point, Real, data_type>
 | 
			
		||||
  if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
 | 
			
		||||
    if ( ieee32 || ieee32big ) {
 | 
			
		||||
      //      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F> 
 | 
			
		||||
	csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F> 
 | 
			
		||||
#ifdef PARALLEL_READ
 | 
			
		||||
      csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F> 
 | 
			
		||||
	(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
 | 
			
		||||
#else
 | 
			
		||||
      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F> 
 | 
			
		||||
	(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
    if ( ieee64 || ieee64big ) {
 | 
			
		||||
      //csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D> 
 | 
			
		||||
#ifdef PARALLEL_READ
 | 
			
		||||
      csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D> 
 | 
			
		||||
      	(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#else 
 | 
			
		||||
      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D> 
 | 
			
		||||
      	(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
  } else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
 | 
			
		||||
    if ( ieee32 || ieee32big ) {
 | 
			
		||||
      //csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
 | 
			
		||||
#ifdef PARALLEL_READ
 | 
			
		||||
      csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
 | 
			
		||||
	(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#else
 | 
			
		||||
      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
 | 
			
		||||
	(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
    if ( ieee64 || ieee64big ) {
 | 
			
		||||
      //      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
 | 
			
		||||
#ifdef PARALLEL_READ
 | 
			
		||||
      csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
 | 
			
		||||
	(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#else
 | 
			
		||||
      csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
 | 
			
		||||
	(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
 | 
			
		||||
#endif
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    assert(0);
 | 
			
		||||
@@ -371,12 +391,17 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
 | 
			
		||||
 | 
			
		||||
  NerscStatistics<GaugeField>(Umu,clone);
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<<            csum<< std::dec
 | 
			
		||||
	                                                  <<" header   "<<std::hex<<header.checksum<<std::dec <<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" plaquette "<<clone.plaquette
 | 
			
		||||
	                                                  <<" header    "<<header.plaquette<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" link_trace "<<clone.link_trace
 | 
			
		||||
	                                                  <<" header    "<<header.link_trace<<std::endl;
 | 
			
		||||
  assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
 | 
			
		||||
  assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
 | 
			
		||||
 | 
			
		||||
  assert(csum == header.checksum );
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage <<"Read NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
 | 
			
		||||
  std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class vsimd>
 | 
			
		||||
@@ -416,19 +441,11 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
 | 
			
		||||
    Nersc3x2unmunger<fobj2D,sobj> munge;
 | 
			
		||||
    BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum);
 | 
			
		||||
    offset = writeHeader(header,file);
 | 
			
		||||
#ifdef PARALLEL_WRITE
 | 
			
		||||
    csum=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
 | 
			
		||||
#else
 | 
			
		||||
    csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
 | 
			
		||||
 | 
			
		||||
    std::string file1 = file+"para";
 | 
			
		||||
    int offset1 = writeHeader(header,file1);
 | 
			
		||||
    int csum1=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
 | 
			
		||||
    //int csum1=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    std::cout << GridLogMessage << " TESTING PARALLEL WRITE offsets " << offset1 << " "<< offset << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << " TESTING PARALLEL WRITE csums   " << csum1 << " "<<std::hex<< csum << std::dec<< std::endl;
 | 
			
		||||
 | 
			
		||||
    assert(offset1==offset);  
 | 
			
		||||
    assert(csum1==csum);  
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
  } else { 
 | 
			
		||||
    header.floating_point = std::string("IEEE64BIG");
 | 
			
		||||
@@ -436,8 +453,11 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
 | 
			
		||||
    NerscSimpleUnmunger<fobj3D,sobj> munge;
 | 
			
		||||
    BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum);
 | 
			
		||||
    offset = writeHeader(header,file);
 | 
			
		||||
    //    csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
 | 
			
		||||
#ifdef PARALLEL_WRITE
 | 
			
		||||
    csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
 | 
			
		||||
#else
 | 
			
		||||
    csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
 | 
			
		||||
#endif
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage <<"Written NERSC Configuration "<<file<< " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl;
 | 
			
		||||
@@ -511,8 +531,6 @@ static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel
 | 
			
		||||
  // munger is a function of <floating point, Real, data_type>
 | 
			
		||||
  uint32_t csum=BinaryIO::readRNGSerial(serial,parallel,file,offset);
 | 
			
		||||
 | 
			
		||||
  std::cerr<<" Csum "<< csum << " "<< header.checksum <<std::endl;
 | 
			
		||||
 | 
			
		||||
  assert(csum == header.checksum );
 | 
			
		||||
 | 
			
		||||
  std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
 | 
			
		||||
 
 | 
			
		||||
@@ -26,8 +26,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/PerfCount.h>
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/perfmon/PerfCount.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
@@ -172,7 +172,7 @@ public:
 | 
			
		||||
    const char * name = PerformanceCounterConfigs[PCT].name;
 | 
			
		||||
    fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
 | 
			
		||||
    if (fd == -1) {
 | 
			
		||||
      fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
 | 
			
		||||
      fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
 | 
			
		||||
      perror("Error is");
 | 
			
		||||
    }
 | 
			
		||||
    int norm = PerformanceCounterConfigs[PCT].normalisation;
 | 
			
		||||
@@ -181,7 +181,7 @@ public:
 | 
			
		||||
    name = PerformanceCounterConfigs[norm].name;
 | 
			
		||||
    cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
 | 
			
		||||
    if (cyclefd == -1) {
 | 
			
		||||
      fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
 | 
			
		||||
      fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
 | 
			
		||||
      perror("Error is");
 | 
			
		||||
    }
 | 
			
		||||
#endif
 | 
			
		||||
@@ -1,11 +1,9 @@
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/PerfCount.h>
 | 
			
		||||
#include <Grid/Stat.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/perfmon/PerfCount.h>
 | 
			
		||||
#include <Grid/perfmon/Stat.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid { 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
bool PmuStat::pmu_initialized=false;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -29,8 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_QCD_H
 | 
			
		||||
#define GRID_QCD_H
 | 
			
		||||
#ifndef GRID_QCD_BASE_H
 | 
			
		||||
#define GRID_QCD_BASE_H
 | 
			
		||||
namespace Grid{
 | 
			
		||||
 | 
			
		||||
namespace QCD {
 | 
			
		||||
@@ -62,7 +62,6 @@ namespace QCD {
 | 
			
		||||
    #define SpinIndex    1
 | 
			
		||||
    #define LorentzIndex 0
 | 
			
		||||
 | 
			
		||||
  
 | 
			
		||||
    // Also should make these a named enum type
 | 
			
		||||
    static const int DaggerNo=0;
 | 
			
		||||
    static const int DaggerYes=1;
 | 
			
		||||
@@ -494,26 +493,5 @@ namespace QCD {
 | 
			
		||||
} // Grid
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/utils/SpaceTimeGrid.h>
 | 
			
		||||
#include <Grid/qcd/spin/Dirac.h>
 | 
			
		||||
#include <Grid/qcd/spin/TwoSpinor.h>
 | 
			
		||||
#include <Grid/qcd/utils/LinalgUtils.h>
 | 
			
		||||
#include <Grid/qcd/utils/CovariantCshift.h>
 | 
			
		||||
 | 
			
		||||
// Include representations 	
 | 
			
		||||
#include <Grid/qcd/utils/SUn.h>
 | 
			
		||||
#include <Grid/qcd/utils/SUnAdjoint.h>
 | 
			
		||||
#include <Grid/qcd/utils/SUnTwoIndex.h>
 | 
			
		||||
#include <Grid/qcd/representations/hmc_types.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/Actions.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/smearing/Smearing.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/hmc/integrators/Integrator.h>
 | 
			
		||||
#include <Grid/qcd/hmc/integrators/Integrator_algorithm.h>
 | 
			
		||||
#include <Grid/qcd/hmc/HMC.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										50
									
								
								lib/qcd/action/Action.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								lib/qcd/action/Action.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,50 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/Actions.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: neo <cossu@post.kek.jp>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_QCD_ACTION_H
 | 
			
		||||
#define GRID_QCD_ACTION_H
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Abstract base interface
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/ActionCore.h>
 | 
			
		||||
////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Fermion actions; prevent coupling fermion.cc files to other headers
 | 
			
		||||
////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/Fermion.h>
 | 
			
		||||
////////////////////////////////////////
 | 
			
		||||
// Pseudo fermion combinations for HMC
 | 
			
		||||
////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/PseudoFermion.h>
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -150,4 +150,5 @@ using ActionSet = std::vector<ActionLevel<GaugeField, R> >;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										45
									
								
								lib/qcd/action/ActionCore.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								lib/qcd/action/ActionCore.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,45 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/ActionCore.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: neo <cossu@post.kek.jp>
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef QCD_ACTION_CORE
 | 
			
		||||
#define QCD_ACTION_CORE
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/ActionBase.h>
 | 
			
		||||
#include <Grid/qcd/action/ActionParams.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Gauge Actions
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/gauge/Gauge.h>
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Fermion prereqs
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -45,6 +45,10 @@ namespace QCD {
 | 
			
		||||
      WilsonImplParams() : overlapCommsCompute(false) {};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    struct StaggeredImplParams {
 | 
			
		||||
      StaggeredImplParams()  {};
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    struct OneFlavourRationalParams { 
 | 
			
		||||
      RealD  lo;
 | 
			
		||||
      RealD  hi;
 | 
			
		||||
 
 | 
			
		||||
@@ -30,8 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/Eigen/Dense>
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
@@ -57,10 +57,23 @@ void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
 | 
			
		||||
  this->DW(psi,this->tmp(),DaggerNo);
 | 
			
		||||
  FermionField tmp_f(this->FermionGrid());
 | 
			
		||||
  this->DW(psi,tmp_f,DaggerNo);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
    axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
 | 
			
		||||
    axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>  
 | 
			
		||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
 | 
			
		||||
  FermionField tmp_f(this->FermionGrid());
 | 
			
		||||
  this->DW(psi,tmp_f,DaggerYes);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
    axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -107,17 +120,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class Impl>  
 | 
			
		||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
 | 
			
		||||
  this->DW(psi,this->tmp(),DaggerYes);
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
    axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>  
 | 
			
		||||
void CayleyFermion5D<Impl>::M5D   (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
@@ -168,7 +170,6 @@ void CayleyFermion5D<Impl>::Mooee       (const FermionField &psi, FermionField &
 | 
			
		||||
  lower[0]   =-mass*lower[0];
 | 
			
		||||
  M5D(psi,psi,chi,lower,diag,upper);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeDag    (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
@@ -190,7 +191,12 @@ void CayleyFermion5D<Impl>::MooeeDag    (const FermionField &psi, FermionField &
 | 
			
		||||
      lower[s]=-cee[s-1];
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Conjugate the terms 
 | 
			
		||||
  for (int s=0;s<Ls;s++){
 | 
			
		||||
    diag[s] =conjugate(diag[s]);
 | 
			
		||||
    upper[s]=conjugate(upper[s]);
 | 
			
		||||
    lower[s]=conjugate(lower[s]);
 | 
			
		||||
  }
 | 
			
		||||
  M5Ddag(psi,psi,chi,lower,diag,upper);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -212,9 +218,23 @@ void CayleyFermion5D<Impl>::MeooeDag5D    (const FermionField &psi, FermionField
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
  std::vector<Coeff_t> diag =bs;
 | 
			
		||||
  std::vector<Coeff_t> upper=cs;
 | 
			
		||||
  std::vector<Coeff_t> lower=cs;
 | 
			
		||||
  upper[Ls-1]=-mass*upper[Ls-1];
 | 
			
		||||
  lower[0]   =-mass*lower[0];
 | 
			
		||||
  std::vector<Coeff_t> lower=cs; 
 | 
			
		||||
 | 
			
		||||
  for (int s=0;s<Ls;s++){
 | 
			
		||||
    if ( s== 0 ) {
 | 
			
		||||
      upper[s] = cs[s+1];
 | 
			
		||||
      lower[s] =-mass*cs[Ls-1];
 | 
			
		||||
    } else if ( s==(Ls-1) ) { 
 | 
			
		||||
      upper[s] =-mass*cs[0];
 | 
			
		||||
      lower[s] = cs[s-1];
 | 
			
		||||
    } else { 
 | 
			
		||||
      upper[s] = cs[s+1];
 | 
			
		||||
      lower[s] = cs[s-1];
 | 
			
		||||
    }
 | 
			
		||||
    upper[s] = conjugate(upper[s]);
 | 
			
		||||
    lower[s] = conjugate(lower[s]);
 | 
			
		||||
    diag[s]  = conjugate(diag[s]);
 | 
			
		||||
  }
 | 
			
		||||
  M5Ddag(psi,psi,Din,lower,diag,upper);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -300,7 +320,7 @@ void CayleyFermion5D<Impl>::MDeriv  (GaugeField &mat,const FermionField &U,const
 | 
			
		||||
    this->DhopDeriv(mat,U,Din,dag);
 | 
			
		||||
  } else {
 | 
			
		||||
    //      U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
 | 
			
		||||
    Meooe5D(U,Din);
 | 
			
		||||
    MeooeDag5D(U,Din);
 | 
			
		||||
    this->DhopDeriv(mat,Din,V,dag);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
@@ -315,7 +335,7 @@ void CayleyFermion5D<Impl>::MoeDeriv(GaugeField &mat,const FermionField &U,const
 | 
			
		||||
    this->DhopDerivOE(mat,U,Din,dag);
 | 
			
		||||
  } else {
 | 
			
		||||
    //      U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
 | 
			
		||||
      Meooe5D(U,Din);
 | 
			
		||||
      MeooeDag5D(U,Din);
 | 
			
		||||
      this->DhopDerivOE(mat,Din,V,dag);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
@@ -330,7 +350,7 @@ void CayleyFermion5D<Impl>::MeoDeriv(GaugeField &mat,const FermionField &U,const
 | 
			
		||||
    this->DhopDerivEO(mat,U,Din,dag);
 | 
			
		||||
  } else {
 | 
			
		||||
    //      U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
 | 
			
		||||
    Meooe5D(U,Din);
 | 
			
		||||
    MeooeDag5D(U,Din);
 | 
			
		||||
    this->DhopDerivEO(mat,Din,V,dag);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_CAYLEY_FERMION_H
 | 
			
		||||
#define  GRID_QCD_CAYLEY_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
@@ -192,7 +194,9 @@ template void CayleyFermion5D< A >::M5Ddag(const FermionField &psi,const Fermion
 | 
			
		||||
template void CayleyFermion5D< A >::MooeeInv    (const FermionField &psi, FermionField &chi); \
 | 
			
		||||
template void CayleyFermion5D< A >::MooeeInvDag (const FermionField &psi, FermionField &chi);
 | 
			
		||||
 | 
			
		||||
#define CAYLEY_DPERP_CACHE
 | 
			
		||||
#undef  CAYLEY_DPERP_DENSE
 | 
			
		||||
#define  CAYLEY_DPERP_CACHE
 | 
			
		||||
#undef  CAYLEY_DPERP_LINALG
 | 
			
		||||
#define CAYLEY_DPERP_VEC
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
@@ -54,8 +55,8 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
 | 
			
		||||
  // Flops = 6.0*(Nc*Ns) *Ls*vol
 | 
			
		||||
  M5Dcalls++;
 | 
			
		||||
  M5Dtime-=usecond();
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      auto tmp = psi._odata[0];
 | 
			
		||||
      if ( s==0 ) {
 | 
			
		||||
@@ -98,8 +99,8 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
 | 
			
		||||
  // Flops = 6.0*(Nc*Ns) *Ls*vol
 | 
			
		||||
  M5Dcalls++;
 | 
			
		||||
  M5Dtime-=usecond();
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
    auto tmp = psi._odata[0];
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      if ( s==0 ) {
 | 
			
		||||
@@ -137,8 +138,7 @@ void CayleyFermion5D<Impl>::MooeeInv    (const FermionField &psi, FermionField &
 | 
			
		||||
  MooeeInvCalls++;
 | 
			
		||||
  MooeeInvTime-=usecond();
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
    auto tmp = psi._odata[0];
 | 
			
		||||
 | 
			
		||||
    // flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls  = 12*Ls * (9) = 108*Ls flops
 | 
			
		||||
@@ -181,11 +181,22 @@ void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &
 | 
			
		||||
  assert(psi.checkerboard == psi.checkerboard);
 | 
			
		||||
  chi.checkerboard=psi.checkerboard;
 | 
			
		||||
 | 
			
		||||
  std::vector<Coeff_t> ueec(Ls);
 | 
			
		||||
  std::vector<Coeff_t> deec(Ls);
 | 
			
		||||
  std::vector<Coeff_t> leec(Ls);
 | 
			
		||||
  std::vector<Coeff_t> ueemc(Ls);
 | 
			
		||||
  std::vector<Coeff_t> leemc(Ls);
 | 
			
		||||
  for(int s=0;s<ueec.size();s++){
 | 
			
		||||
    ueec[s] = conjugate(uee[s]);
 | 
			
		||||
    deec[s] = conjugate(dee[s]);
 | 
			
		||||
    leec[s] = conjugate(lee[s]);
 | 
			
		||||
    ueemc[s]= conjugate(ueem[s]);
 | 
			
		||||
    leemc[s]= conjugate(leem[s]);
 | 
			
		||||
  }
 | 
			
		||||
  MooeeInvCalls++;
 | 
			
		||||
  MooeeInvTime-=usecond();
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
 | 
			
		||||
 | 
			
		||||
    auto tmp = psi._odata[0];
 | 
			
		||||
 | 
			
		||||
@@ -193,25 +204,25 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
    chi[ss]=psi[ss];
 | 
			
		||||
    for (int s=1;s<Ls;s++){
 | 
			
		||||
                            spProj5m(tmp,chi[ss+s-1]);
 | 
			
		||||
      chi[ss+s] = psi[ss+s]-uee[s-1]*tmp;
 | 
			
		||||
      chi[ss+s] = psi[ss+s]-ueec[s-1]*tmp;
 | 
			
		||||
    }
 | 
			
		||||
    // U_m^{-\dagger} 
 | 
			
		||||
    for (int s=0;s<Ls-1;s++){
 | 
			
		||||
                                   spProj5p(tmp,chi[ss+s]);
 | 
			
		||||
      chi[ss+Ls-1] = chi[ss+Ls-1] - ueem[s]*tmp;
 | 
			
		||||
      chi[ss+Ls-1] = chi[ss+Ls-1] - ueemc[s]*tmp;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // L_m^{-\dagger} D^{-dagger}
 | 
			
		||||
    for (int s=0;s<Ls-1;s++){
 | 
			
		||||
      spProj5m(tmp,chi[ss+Ls-1]);
 | 
			
		||||
      chi[ss+s] = (1.0/dee[s])*chi[ss+s]-(leem[s]/dee[Ls-1])*tmp;
 | 
			
		||||
      chi[ss+s] = (1.0/deec[s])*chi[ss+s]-(leemc[s]/deec[Ls-1])*tmp;
 | 
			
		||||
    }	
 | 
			
		||||
    chi[ss+Ls-1]= (1.0/dee[Ls-1])*chi[ss+Ls-1];
 | 
			
		||||
    chi[ss+Ls-1]= (1.0/deec[Ls-1])*chi[ss+Ls-1];
 | 
			
		||||
  
 | 
			
		||||
    // Apply L^{-dagger}
 | 
			
		||||
    for (int s=Ls-2;s>=0;s--){
 | 
			
		||||
      spProj5p(tmp,chi[ss+s+1]);
 | 
			
		||||
      chi[ss+s] = chi[ss+s] - lee[s]*tmp;
 | 
			
		||||
      chi[ss+s] = chi[ss+s] - leec[s]*tmp;
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -30,7 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/Eigen/Dense>
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
@@ -38,20 +39,17 @@ namespace QCD {
 | 
			
		||||
  /*
 | 
			
		||||
   * Dense matrix versions of routines
 | 
			
		||||
   */
 | 
			
		||||
 | 
			
		||||
  /*
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  this->MooeeInternal(psi,chi,DaggerYes,InverseYes);
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  this->MooeeInternal(psi,chi,DaggerNo,InverseYes);
 | 
			
		||||
}
 | 
			
		||||
  */
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv)
 | 
			
		||||
{
 | 
			
		||||
@@ -125,9 +123,20 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CAYLEY_DPERP_DENSE
 | 
			
		||||
INSTANTIATE_DPERP(GparityWilsonImplF);
 | 
			
		||||
INSTANTIATE_DPERP(GparityWilsonImplD);
 | 
			
		||||
INSTANTIATE_DPERP(WilsonImplF);
 | 
			
		||||
INSTANTIATE_DPERP(WilsonImplD);
 | 
			
		||||
INSTANTIATE_DPERP(ZWilsonImplF);
 | 
			
		||||
INSTANTIATE_DPERP(ZWilsonImplD);
 | 
			
		||||
 | 
			
		||||
template void CayleyFermion5D<GparityWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
template void CayleyFermion5D<GparityWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
template void CayleyFermion5D<WilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
template void CayleyFermion5D<WilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
template void CayleyFermion5D<ZWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
template void CayleyFermion5D<ZWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
@@ -47,17 +48,18 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
 | 
			
		||||
				std::vector<Coeff_t> &diag,
 | 
			
		||||
				std::vector<Coeff_t> &upper)
 | 
			
		||||
{
 | 
			
		||||
  Coeff_t one(1.0);
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
    if ( s==0 ) {
 | 
			
		||||
      axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
 | 
			
		||||
      axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,Ls-1);
 | 
			
		||||
      axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,Ls-1);
 | 
			
		||||
    } else if ( s==(Ls-1)) { 
 | 
			
		||||
      axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,0);
 | 
			
		||||
      axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,s-1);
 | 
			
		||||
      axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,s-1);
 | 
			
		||||
    } else {
 | 
			
		||||
      axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
 | 
			
		||||
      axpby_ssp_pplus(chi,1.0,chi,lower[s],psi,s,s-1);
 | 
			
		||||
      axpby_ssp_pplus(chi,one,chi,lower[s],psi,s,s-1);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -69,17 +71,18 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
 | 
			
		||||
				   std::vector<Coeff_t> &diag,
 | 
			
		||||
				   std::vector<Coeff_t> &upper)
 | 
			
		||||
{
 | 
			
		||||
  Coeff_t one(1.0);
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
  for(int s=0;s<Ls;s++){
 | 
			
		||||
    if ( s==0 ) {
 | 
			
		||||
      axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
 | 
			
		||||
      axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,Ls-1);
 | 
			
		||||
      axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,Ls-1);
 | 
			
		||||
    } else if ( s==(Ls-1)) { 
 | 
			
		||||
      axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,0);
 | 
			
		||||
      axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
 | 
			
		||||
      axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
 | 
			
		||||
    } else {
 | 
			
		||||
      axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
 | 
			
		||||
      axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
 | 
			
		||||
      axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
@@ -87,62 +90,68 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeInv    (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  Coeff_t one(1.0);
 | 
			
		||||
  Coeff_t czero(0.0);
 | 
			
		||||
  chi.checkerboard=psi.checkerboard;
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
  // Apply (L^{\prime})^{-1}
 | 
			
		||||
  axpby_ssp (chi,1.0,psi,     0.0,psi,0,0);      // chi[0]=psi[0]
 | 
			
		||||
  axpby_ssp (chi,one,psi,     czero,psi,0,0);      // chi[0]=psi[0]
 | 
			
		||||
  for (int s=1;s<Ls;s++){
 | 
			
		||||
    axpby_ssp_pplus(chi,1.0,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
 | 
			
		||||
    axpby_ssp_pplus(chi,one,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
 | 
			
		||||
  }
 | 
			
		||||
  // L_m^{-1} 
 | 
			
		||||
  for (int s=0;s<Ls-1;s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
 | 
			
		||||
    axpby_ssp_pminus(chi,1.0,chi,-leem[s],chi,Ls-1,s);
 | 
			
		||||
    axpby_ssp_pminus(chi,one,chi,-leem[s],chi,Ls-1,s);
 | 
			
		||||
  }
 | 
			
		||||
  // U_m^{-1} D^{-1}
 | 
			
		||||
  for (int s=0;s<Ls-1;s++){
 | 
			
		||||
    // Chi[s] + 1/d chi[s] 
 | 
			
		||||
    axpby_ssp_pplus(chi,1.0/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
 | 
			
		||||
    axpby_ssp_pplus(chi,one/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
 | 
			
		||||
  }	
 | 
			
		||||
  axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable 
 | 
			
		||||
  axpby_ssp(chi,one/dee[Ls-1],chi,czero,chi,Ls-1,Ls-1); // Modest avoidable 
 | 
			
		||||
  
 | 
			
		||||
  // Apply U^{-1}
 | 
			
		||||
  for (int s=Ls-2;s>=0;s--){
 | 
			
		||||
    axpby_ssp_pminus (chi,1.0,chi,-uee[s],chi,s,s+1);  // chi[Ls]
 | 
			
		||||
    axpby_ssp_pminus (chi,one,chi,-uee[s],chi,s,s+1);  // chi[Ls]
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
 | 
			
		||||
{
 | 
			
		||||
  Coeff_t one(1.0);
 | 
			
		||||
  Coeff_t czero(0.0);
 | 
			
		||||
  chi.checkerboard=psi.checkerboard;
 | 
			
		||||
  int Ls=this->Ls;
 | 
			
		||||
  // Apply (U^{\prime})^{-dagger}
 | 
			
		||||
  axpby_ssp (chi,1.0,psi,     0.0,psi,0,0);      // chi[0]=psi[0]
 | 
			
		||||
  axpby_ssp (chi,one,psi,     czero,psi,0,0);      // chi[0]=psi[0]
 | 
			
		||||
  for (int s=1;s<Ls;s++){
 | 
			
		||||
    axpby_ssp_pminus(chi,1.0,psi,-uee[s-1],chi,s,s-1);
 | 
			
		||||
    axpby_ssp_pminus(chi,one,psi,-conjugate(uee[s-1]),chi,s,s-1);
 | 
			
		||||
  }
 | 
			
		||||
  // U_m^{-\dagger} 
 | 
			
		||||
  for (int s=0;s<Ls-1;s++){
 | 
			
		||||
    axpby_ssp_pplus(chi,1.0,chi,-ueem[s],chi,Ls-1,s);
 | 
			
		||||
    axpby_ssp_pplus(chi,one,chi,-conjugate(ueem[s]),chi,Ls-1,s);
 | 
			
		||||
  }
 | 
			
		||||
  // L_m^{-\dagger} D^{-dagger}
 | 
			
		||||
  for (int s=0;s<Ls-1;s++){
 | 
			
		||||
    axpby_ssp_pminus(chi,1.0/dee[s],chi,-leem[s]/dee[Ls-1],chi,s,Ls-1);
 | 
			
		||||
    axpby_ssp_pminus(chi,one/conjugate(dee[s]),chi,-conjugate(leem[s]/dee[Ls-1]),chi,s,Ls-1);
 | 
			
		||||
  }	
 | 
			
		||||
  axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable 
 | 
			
		||||
  axpby_ssp(chi,one/conjugate(dee[Ls-1]),chi,czero,chi,Ls-1,Ls-1); // Modest avoidable 
 | 
			
		||||
  
 | 
			
		||||
  // Apply L^{-dagger}
 | 
			
		||||
  for (int s=Ls-2;s>=0;s--){
 | 
			
		||||
    axpby_ssp_pplus (chi,1.0,chi,-lee[s],chi,s,s+1);  // chi[Ls]
 | 
			
		||||
    axpby_ssp_pplus (chi,one,chi,-conjugate(lee[s]),chi,s,s+1);  // chi[Ls]
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef CAYLEY_DPERP_LINALG
 | 
			
		||||
  INSTANTIATE(WilsonImplF);
 | 
			
		||||
  INSTANTIATE(WilsonImplD);
 | 
			
		||||
  INSTANTIATE(GparityWilsonImplF);
 | 
			
		||||
  INSTANTIATE(GparityWilsonImplD);
 | 
			
		||||
  INSTANTIATE_DPERP(WilsonImplF);
 | 
			
		||||
  INSTANTIATE_DPERP(WilsonImplD);
 | 
			
		||||
  INSTANTIATE_DPERP(GparityWilsonImplF);
 | 
			
		||||
  INSTANTIATE_DPERP(GparityWilsonImplD);
 | 
			
		||||
  INSTANTIATE_DPERP(ZWilsonImplF);
 | 
			
		||||
  INSTANTIATE_DPERP(ZWilsonImplD);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -30,11 +30,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {  /*
 | 
			
		||||
namespace QCD {  
 | 
			
		||||
  /*
 | 
			
		||||
   * Dense matrix versions of routines
 | 
			
		||||
   */
 | 
			
		||||
template<class Impl>
 | 
			
		||||
@@ -91,8 +93,7 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
 | 
			
		||||
 | 
			
		||||
  assert(Nc==3);
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
 | 
			
		||||
#if 0
 | 
			
		||||
      alignas(64) SiteHalfSpinor hp;
 | 
			
		||||
      alignas(64) SiteHalfSpinor hm;
 | 
			
		||||
@@ -232,8 +233,7 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
 | 
			
		||||
 | 
			
		||||
  M5Dcalls++;
 | 
			
		||||
  M5Dtime-=usecond();
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
 | 
			
		||||
  parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
 | 
			
		||||
#if 0
 | 
			
		||||
    alignas(64) SiteHalfSpinor hp;
 | 
			
		||||
    alignas(64) SiteHalfSpinor hm;
 | 
			
		||||
@@ -792,13 +792,11 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
 | 
			
		||||
  MooeeInvTime-=usecond();
 | 
			
		||||
 | 
			
		||||
  if ( switcheroo<Coeff_t>::iscomplex() ) {
 | 
			
		||||
  PARALLEL_FOR_LOOP
 | 
			
		||||
    for(auto site=0;site<vol;site++){
 | 
			
		||||
    parallel_for(auto site=0;site<vol;site++){
 | 
			
		||||
      MooeeInternalZAsm(psi,chi,LLs,site,*_Matp,*_Matm);
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
  PARALLEL_FOR_LOOP
 | 
			
		||||
    for(auto site=0;site<vol;site++){
 | 
			
		||||
    parallel_for(auto site=0;site<vol;site++){
 | 
			
		||||
      MooeeInternalAsm(psi,chi,LLs,site,*_Matp,*_Matm);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
@@ -26,7 +26,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
 
 | 
			
		||||
@@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_CONTINUED_FRACTION_H
 | 
			
		||||
#define  GRID_QCD_CONTINUED_FRACTION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_DOMAIN_WALL_FERMION_H
 | 
			
		||||
#define  GRID_QCD_DOMAIN_WALL_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -2,16 +2,11 @@
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/Actions.h
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
 | 
			
		||||
Author: neo <cossu@post.kek.jp>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
@@ -30,67 +25,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef GRID_QCD_ACTIONS_H
 | 
			
		||||
#define GRID_QCD_ACTIONS_H
 | 
			
		||||
 | 
			
		||||
// * Linear operators             (Hermitian and non-hermitian)  .. my LinearOperator
 | 
			
		||||
// * System solvers               (Hermitian and non-hermitian)  .. my OperatorFunction
 | 
			
		||||
// * MultiShift System solvers    (Hermitian and non-hermitian)  .. my OperatorFunction
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Abstract base interface
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/ActionBase.h>
 | 
			
		||||
#include <Grid/qcd/action/ActionParams.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Utility functions
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/gauge/GaugeImpl.h>
 | 
			
		||||
#include <Grid/qcd/utils/WilsonLoops.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonCompressor.h>     //used by all wilson type fermions
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionOperatorImpl.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionOperator.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonKernels.h>        //used by all wilson type fermions
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Gauge Actions
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/gauge/WilsonGaugeAction.h>
 | 
			
		||||
#include <Grid/qcd/action/gauge/PlaqPlusRectangleAction.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
typedef WilsonGaugeAction<PeriodicGimplR>          WilsonGaugeActionR;
 | 
			
		||||
typedef WilsonGaugeAction<PeriodicGimplF>          WilsonGaugeActionF;
 | 
			
		||||
typedef WilsonGaugeAction<PeriodicGimplD>          WilsonGaugeActionD;
 | 
			
		||||
typedef PlaqPlusRectangleAction<PeriodicGimplR>    PlaqPlusRectangleActionR;
 | 
			
		||||
typedef PlaqPlusRectangleAction<PeriodicGimplF>    PlaqPlusRectangleActionF;
 | 
			
		||||
typedef PlaqPlusRectangleAction<PeriodicGimplD>    PlaqPlusRectangleActionD;
 | 
			
		||||
typedef IwasakiGaugeAction<PeriodicGimplR>         IwasakiGaugeActionR;
 | 
			
		||||
typedef IwasakiGaugeAction<PeriodicGimplF>         IwasakiGaugeActionF;
 | 
			
		||||
typedef IwasakiGaugeAction<PeriodicGimplD>         IwasakiGaugeActionD;
 | 
			
		||||
typedef SymanzikGaugeAction<PeriodicGimplR>        SymanzikGaugeActionR;
 | 
			
		||||
typedef SymanzikGaugeAction<PeriodicGimplF>        SymanzikGaugeActionF;
 | 
			
		||||
typedef SymanzikGaugeAction<PeriodicGimplD>        SymanzikGaugeActionD;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
typedef WilsonGaugeAction<ConjugateGimplR>          ConjugateWilsonGaugeActionR;
 | 
			
		||||
typedef WilsonGaugeAction<ConjugateGimplF>          ConjugateWilsonGaugeActionF;
 | 
			
		||||
typedef WilsonGaugeAction<ConjugateGimplD>          ConjugateWilsonGaugeActionD;
 | 
			
		||||
typedef PlaqPlusRectangleAction<ConjugateGimplR>    ConjugatePlaqPlusRectangleActionR;
 | 
			
		||||
typedef PlaqPlusRectangleAction<ConjugateGimplF>    ConjugatePlaqPlusRectangleActionF;
 | 
			
		||||
typedef PlaqPlusRectangleAction<ConjugateGimplD>    ConjugatePlaqPlusRectangleActionD;
 | 
			
		||||
typedef IwasakiGaugeAction<ConjugateGimplR>         ConjugateIwasakiGaugeActionR;
 | 
			
		||||
typedef IwasakiGaugeAction<ConjugateGimplF>         ConjugateIwasakiGaugeActionF;
 | 
			
		||||
typedef IwasakiGaugeAction<ConjugateGimplD>         ConjugateIwasakiGaugeActionD;
 | 
			
		||||
typedef SymanzikGaugeAction<ConjugateGimplR>        ConjugateSymanzikGaugeActionR;
 | 
			
		||||
typedef SymanzikGaugeAction<ConjugateGimplF>        ConjugateSymanzikGaugeActionF;
 | 
			
		||||
typedef SymanzikGaugeAction<ConjugateGimplD>        ConjugateSymanzikGaugeActionD;
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
#ifndef  GRID_QCD_FERMION_H
 | 
			
		||||
#define  GRID_QCD_FERMION_H
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Explicit explicit template instantiation is still required in the .cc files
 | 
			
		||||
@@ -107,36 +43,6 @@ typedef SymanzikGaugeAction<ConjugateGimplD>        ConjugateSymanzikGaugeAction
 | 
			
		||||
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define FermOp4dVecTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonImplF>;		\
 | 
			
		||||
  template class A<WilsonImplD>;		\
 | 
			
		||||
  template class A<ZWilsonImplF>;		\
 | 
			
		||||
  template class A<ZWilsonImplD>;		\
 | 
			
		||||
  template class A<GparityWilsonImplF>;		\
 | 
			
		||||
  template class A<GparityWilsonImplD>;		
 | 
			
		||||
 | 
			
		||||
#define AdjointFermOpTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonAdjImplF>; \
 | 
			
		||||
  template class A<WilsonAdjImplD>; 
 | 
			
		||||
 | 
			
		||||
#define TwoIndexFermOpTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonTwoIndexSymmetricImplF>; \
 | 
			
		||||
  template class A<WilsonTwoIndexSymmetricImplD>; 
 | 
			
		||||
 | 
			
		||||
#define FermOp5dVecTemplateInstantiate(A) \
 | 
			
		||||
  template class A<DomainWallVec5dImplF>;	\
 | 
			
		||||
  template class A<DomainWallVec5dImplD>;	\
 | 
			
		||||
  template class A<ZDomainWallVec5dImplF>;	\
 | 
			
		||||
  template class A<ZDomainWallVec5dImplD>;	
 | 
			
		||||
 | 
			
		||||
#define FermOpTemplateInstantiate(A) \
 | 
			
		||||
 FermOp4dVecTemplateInstantiate(A) \
 | 
			
		||||
 FermOp5dVecTemplateInstantiate(A) 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define GparityFermOpTemplateInstantiate(A) 
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Fermion operators / actions
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
@@ -144,9 +50,9 @@ typedef SymanzikGaugeAction<ConjugateGimplD>        ConjugateSymanzikGaugeAction
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion.h>       // 4d wilson like
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonTMFermion.h>       // 4d wilson like
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>     // 5d base used by all 5d overlap types
 | 
			
		||||
 | 
			
		||||
//#include <Grid/qcd/action/fermion/CloverFermion.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>     // Cayley types
 | 
			
		||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
 | 
			
		||||
@@ -157,14 +63,16 @@ typedef SymanzikGaugeAction<ConjugateGimplD>        ConjugateSymanzikGaugeAction
 | 
			
		||||
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h>               // Continued fraction
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h>                 // Partial fraction
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h>
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
 | 
			
		||||
@@ -268,27 +176,19 @@ typedef MobiusFermion<GparityWilsonImplR> GparityMobiusFermionR;
 | 
			
		||||
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
 | 
			
		||||
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
 | 
			
		||||
 | 
			
		||||
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
 | 
			
		||||
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
 | 
			
		||||
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
 | 
			
		||||
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredImplR> ImprovedStaggeredFermion5DR;
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
 | 
			
		||||
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplR> ImprovedStaggeredFermionVec5dR;
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplF> ImprovedStaggeredFermionVec5dF;
 | 
			
		||||
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplD> ImprovedStaggeredFermionVec5dD;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  }}
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
 | 
			
		||||
///////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////
 | 
			
		||||
// Pseudo fermion combinations for HMC
 | 
			
		||||
////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/EvenOddSchurDifferentiable.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/TwoFlavour.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourRatio.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOdd.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOddRatio.h>
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRational.h>
 | 
			
		||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRationalRatio.h>
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										80
									
								
								lib/qcd/action/fermion/FermionCore.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								lib/qcd/action/fermion/FermionCore.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,80 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef  GRID_QCD_FERMION_CORE_H
 | 
			
		||||
#define  GRID_QCD_FERMION_CORE_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/GridCore.h>
 | 
			
		||||
#include <Grid/GridQCDcore.h>
 | 
			
		||||
#include <Grid/qcd/action/ActionCore.h>
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Fermion prereqs
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonCompressor.h>     //used by all wilson type fermions
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionOperatorImpl.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionOperator.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonKernels.h>        //used by all wilson type fermions
 | 
			
		||||
#include <Grid/qcd/action/fermion/StaggeredKernels.h>        //used by all wilson type fermions
 | 
			
		||||
 | 
			
		||||
#define FermOpStaggeredTemplateInstantiate(A) \
 | 
			
		||||
  template class A<StaggeredImplF>; \
 | 
			
		||||
  template class A<StaggeredImplD>; 
 | 
			
		||||
 | 
			
		||||
#define FermOpStaggeredVec5dTemplateInstantiate(A) \
 | 
			
		||||
  template class A<StaggeredVec5dImplF>; \
 | 
			
		||||
  template class A<StaggeredVec5dImplD>; 
 | 
			
		||||
 | 
			
		||||
#define FermOp4dVecTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonImplF>;		\
 | 
			
		||||
  template class A<WilsonImplD>;		\
 | 
			
		||||
  template class A<ZWilsonImplF>;		\
 | 
			
		||||
  template class A<ZWilsonImplD>;		\
 | 
			
		||||
  template class A<GparityWilsonImplF>;		\
 | 
			
		||||
  template class A<GparityWilsonImplD>;		
 | 
			
		||||
 | 
			
		||||
#define AdjointFermOpTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonAdjImplF>; \
 | 
			
		||||
  template class A<WilsonAdjImplD>; 
 | 
			
		||||
 | 
			
		||||
#define TwoIndexFermOpTemplateInstantiate(A) \
 | 
			
		||||
  template class A<WilsonTwoIndexSymmetricImplF>; \
 | 
			
		||||
  template class A<WilsonTwoIndexSymmetricImplD>; 
 | 
			
		||||
 | 
			
		||||
#define FermOp5dVecTemplateInstantiate(A) \
 | 
			
		||||
  template class A<DomainWallVec5dImplF>;	\
 | 
			
		||||
  template class A<DomainWallVec5dImplD>;	\
 | 
			
		||||
  template class A<ZDomainWallVec5dImplF>;	\
 | 
			
		||||
  template class A<ZDomainWallVec5dImplD>;	
 | 
			
		||||
 | 
			
		||||
#define FermOpTemplateInstantiate(A) \
 | 
			
		||||
 FermOp4dVecTemplateInstantiate(A) \
 | 
			
		||||
 FermOp5dVecTemplateInstantiate(A) 
 | 
			
		||||
 | 
			
		||||
#define GparityFermOpTemplateInstantiate(A) 
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -194,8 +194,7 @@ namespace QCD {
 | 
			
		||||
      GaugeLinkField tmp(mat._grid);
 | 
			
		||||
      tmp = zero;
 | 
			
		||||
      
 | 
			
		||||
      PARALLEL_FOR_LOOP
 | 
			
		||||
      for(int sss=0;sss<tmp._grid->oSites();sss++){
 | 
			
		||||
      parallel_for(int sss=0;sss<tmp._grid->oSites();sss++){
 | 
			
		||||
	int sU=sss;
 | 
			
		||||
	for(int s=0;s<Ls;s++){
 | 
			
		||||
	  int sF = s+Ls*sU;
 | 
			
		||||
@@ -235,11 +234,13 @@ class DomainWallVec5dImpl :  public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
 | 
			
		||||
  typedef Lattice<SiteSpinor> FermionField;
 | 
			
		||||
  typedef Lattice<SitePropagator> PropagatorField;
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////
 | 
			
		||||
  // Make the doubled gauge field a *scalar*
 | 
			
		||||
  /////////////////////////////////////////////////
 | 
			
		||||
  typedef iImplDoubledGaugeField<typename Simd::scalar_type>  SiteDoubledGaugeField;  // This is a scalar
 | 
			
		||||
  typedef iImplGaugeField<typename Simd::scalar_type>         SiteScalarGaugeField;  // scalar
 | 
			
		||||
  typedef iImplGaugeLink<typename Simd::scalar_type>          SiteScalarGaugeLink;  // scalar
 | 
			
		||||
      
 | 
			
		||||
  typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
 | 
			
		||||
      
 | 
			
		||||
  typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
 | 
			
		||||
@@ -271,11 +272,11 @@ class DomainWallVec5dImpl :  public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
 | 
			
		||||
      
 | 
			
		||||
  inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu) 
 | 
			
		||||
  {
 | 
			
		||||
    SiteScalarGaugeField ScalarUmu;
 | 
			
		||||
    SiteScalarGaugeField  ScalarUmu;
 | 
			
		||||
    SiteDoubledGaugeField ScalarUds;
 | 
			
		||||
    
 | 
			
		||||
    GaugeLinkField U(Umu._grid);
 | 
			
		||||
    GaugeField Uadj(Umu._grid);
 | 
			
		||||
    GaugeField  Uadj(Umu._grid);
 | 
			
		||||
    for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
      U = PeekIndex<LorentzIndex>(Umu, mu);
 | 
			
		||||
      U = adj(Cshift(U, mu, -1));
 | 
			
		||||
@@ -333,7 +334,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
 | 
			
		||||
 typedef iImplPropagator<Simd> SitePropagator;
 | 
			
		||||
 typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
 | 
			
		||||
 typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
 | 
			
		||||
 
 | 
			
		||||
 | 
			
		||||
 typedef Lattice<SiteSpinor> FermionField;
 | 
			
		||||
 typedef Lattice<SitePropagator> PropagatorField;
 | 
			
		||||
 typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
 | 
			
		||||
@@ -356,7 +357,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
 | 
			
		||||
		      StencilImpl &St) {
 | 
			
		||||
 | 
			
		||||
  typedef SiteHalfSpinor vobj;
 | 
			
		||||
   typedef typename SiteHalfSpinor::scalar_object sobj;
 | 
			
		||||
  typedef typename SiteHalfSpinor::scalar_object sobj;
 | 
			
		||||
	
 | 
			
		||||
   vobj vtmp;
 | 
			
		||||
   sobj stmp;
 | 
			
		||||
@@ -445,8 +446,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
 | 
			
		||||
       Uconj = where(coor==neglink,-Uconj,Uconj);
 | 
			
		||||
     }
 | 
			
		||||
	  
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
     for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
     parallel_for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
       Uds[ss](0)(mu) = U[ss]();
 | 
			
		||||
       Uds[ss](1)(mu) = Uconj[ss]();
 | 
			
		||||
     }
 | 
			
		||||
@@ -459,8 +459,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
       Utmp = where(coor==0,Uconj,Utmp);
 | 
			
		||||
     }
 | 
			
		||||
	  
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
     for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
     parallel_for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
       Uds[ss](0)(mu+4) = Utmp[ss]();
 | 
			
		||||
     }
 | 
			
		||||
	  
 | 
			
		||||
@@ -469,8 +468,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
       Utmp = where(coor==0,U,Utmp);
 | 
			
		||||
     }
 | 
			
		||||
	  
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
     for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
     parallel_for(auto ss=U.begin();ss<U.end();ss++){
 | 
			
		||||
       Uds[ss](1)(mu+4) = Utmp[ss]();
 | 
			
		||||
     }
 | 
			
		||||
	  
 | 
			
		||||
@@ -484,8 +482,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
   GaugeLinkField link(mat._grid);
 | 
			
		||||
   // use lorentz for flavour as hack.
 | 
			
		||||
   auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
   for (auto ss = tmp.begin(); ss < tmp.end(); ss++) {
 | 
			
		||||
   parallel_for(auto ss = tmp.begin(); ss < tmp.end(); ss++) {
 | 
			
		||||
     link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
 | 
			
		||||
   }
 | 
			
		||||
   PokeIndex<LorentzIndex>(mat, link, mu);
 | 
			
		||||
@@ -498,8 +495,7 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
	
 | 
			
		||||
   GaugeLinkField tmp(mat._grid);
 | 
			
		||||
   tmp = zero;
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
   for (int ss = 0; ss < tmp._grid->oSites(); ss++) {
 | 
			
		||||
   parallel_for(int ss = 0; ss < tmp._grid->oSites(); ss++) {
 | 
			
		||||
     for (int s = 0; s < Ls; s++) {
 | 
			
		||||
       int sF = s + Ls * ss;
 | 
			
		||||
       auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
 | 
			
		||||
@@ -512,6 +508,323 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Single flavour one component spinors with colour index
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template <class S, class Representation = FundamentalRepresentation >
 | 
			
		||||
  class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
 | 
			
		||||
 | 
			
		||||
    public:
 | 
			
		||||
 | 
			
		||||
    typedef RealD  _Coeff_t ;
 | 
			
		||||
    static const int Dimension = Representation::Dimension;
 | 
			
		||||
    typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
 | 
			
		||||
      
 | 
			
		||||
    //Necessary?
 | 
			
		||||
    constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
 | 
			
		||||
    
 | 
			
		||||
    const bool LsVectorised=false;
 | 
			
		||||
    typedef _Coeff_t Coeff_t;
 | 
			
		||||
 | 
			
		||||
    INHERIT_GIMPL_TYPES(Gimpl);
 | 
			
		||||
      
 | 
			
		||||
    template <typename vtype> using iImplScalar            = iScalar<iScalar<iScalar<vtype> > >;
 | 
			
		||||
    template <typename vtype> using iImplSpinor            = iScalar<iScalar<iVector<vtype, Dimension> > >;
 | 
			
		||||
    template <typename vtype> using iImplHalfSpinor        = iScalar<iScalar<iVector<vtype, Dimension> > >;
 | 
			
		||||
    template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
 | 
			
		||||
    template <typename vtype> using iImplPropagator        = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
 | 
			
		||||
    
 | 
			
		||||
    typedef iImplScalar<Simd>            SiteComplex;
 | 
			
		||||
    typedef iImplSpinor<Simd>            SiteSpinor;
 | 
			
		||||
    typedef iImplHalfSpinor<Simd>        SiteHalfSpinor;
 | 
			
		||||
    typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
 | 
			
		||||
    typedef iImplPropagator<Simd>        SitePropagator;
 | 
			
		||||
    
 | 
			
		||||
    typedef Lattice<SiteComplex>           ComplexField;
 | 
			
		||||
    typedef Lattice<SiteSpinor>            FermionField;
 | 
			
		||||
    typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
 | 
			
		||||
    typedef Lattice<SitePropagator> PropagatorField;
 | 
			
		||||
    
 | 
			
		||||
    typedef SimpleCompressor<SiteSpinor> Compressor;
 | 
			
		||||
    typedef StaggeredImplParams ImplParams;
 | 
			
		||||
    typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
 | 
			
		||||
    
 | 
			
		||||
    ImplParams Params;
 | 
			
		||||
    
 | 
			
		||||
    StaggeredImpl(const ImplParams &p = ImplParams()) : Params(p){};
 | 
			
		||||
      
 | 
			
		||||
    inline void multLink(SiteSpinor &phi,
 | 
			
		||||
			 const SiteDoubledGaugeField &U,
 | 
			
		||||
			 const SiteSpinor &chi,
 | 
			
		||||
			 int mu){
 | 
			
		||||
      mult(&phi(), &U(mu), &chi());
 | 
			
		||||
    }
 | 
			
		||||
    inline void multLinkAdd(SiteSpinor &phi,
 | 
			
		||||
			    const SiteDoubledGaugeField &U,
 | 
			
		||||
			    const SiteSpinor &chi,
 | 
			
		||||
			    int mu){
 | 
			
		||||
      mac(&phi(), &U(mu), &chi());
 | 
			
		||||
    }
 | 
			
		||||
      
 | 
			
		||||
    template <class ref>
 | 
			
		||||
    inline void loadLinkElement(Simd ®, ref &memory) {
 | 
			
		||||
      reg = memory;
 | 
			
		||||
    }
 | 
			
		||||
      
 | 
			
		||||
    inline void DoubleStore(GridBase *GaugeGrid,
 | 
			
		||||
			    DoubledGaugeField &UUUds, // for Naik term
 | 
			
		||||
			    DoubledGaugeField &Uds,
 | 
			
		||||
			    const GaugeField &Uthin,
 | 
			
		||||
			    const GaugeField &Ufat) {
 | 
			
		||||
      conformable(Uds._grid, GaugeGrid);
 | 
			
		||||
      conformable(Uthin._grid, GaugeGrid);
 | 
			
		||||
      conformable(Ufat._grid, GaugeGrid);
 | 
			
		||||
      GaugeLinkField U(GaugeGrid);
 | 
			
		||||
      GaugeLinkField UU(GaugeGrid);
 | 
			
		||||
      GaugeLinkField UUU(GaugeGrid);
 | 
			
		||||
      GaugeLinkField Udag(GaugeGrid);
 | 
			
		||||
      GaugeLinkField UUUdag(GaugeGrid);
 | 
			
		||||
      for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
	// Staggered Phase.
 | 
			
		||||
	Lattice<iScalar<vInteger> > coor(GaugeGrid);
 | 
			
		||||
	Lattice<iScalar<vInteger> > x(GaugeGrid); LatticeCoordinate(x,0);
 | 
			
		||||
	Lattice<iScalar<vInteger> > y(GaugeGrid); LatticeCoordinate(y,1);
 | 
			
		||||
	Lattice<iScalar<vInteger> > z(GaugeGrid); LatticeCoordinate(z,2);
 | 
			
		||||
	Lattice<iScalar<vInteger> > t(GaugeGrid); LatticeCoordinate(t,3);
 | 
			
		||||
 | 
			
		||||
	Lattice<iScalar<vInteger> > lin_z(GaugeGrid); lin_z=x+y;
 | 
			
		||||
	Lattice<iScalar<vInteger> > lin_t(GaugeGrid); lin_t=x+y+z;
 | 
			
		||||
 | 
			
		||||
	ComplexField phases(GaugeGrid);	phases=1.0;
 | 
			
		||||
 | 
			
		||||
	if ( mu == 1 ) phases = where( mod(x    ,2)==(Integer)0, phases,-phases);
 | 
			
		||||
	if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
 | 
			
		||||
	if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
 | 
			
		||||
 | 
			
		||||
	// 1 hop based on fat links
 | 
			
		||||
	U      = PeekIndex<LorentzIndex>(Ufat, mu);
 | 
			
		||||
	Udag   = adj( Cshift(U, mu, -1));
 | 
			
		||||
 | 
			
		||||
	U    = U    *phases;
 | 
			
		||||
	Udag = Udag *phases;
 | 
			
		||||
 | 
			
		||||
	PokeIndex<LorentzIndex>(Uds, U, mu);
 | 
			
		||||
	PokeIndex<LorentzIndex>(Uds, Udag, mu + 4);
 | 
			
		||||
 | 
			
		||||
	// 3 hop based on thin links. Crazy huh ?
 | 
			
		||||
	U  = PeekIndex<LorentzIndex>(Uthin, mu);
 | 
			
		||||
	UU = Gimpl::CovShiftForward(U,mu,U);
 | 
			
		||||
	UUU= Gimpl::CovShiftForward(U,mu,UU);
 | 
			
		||||
	
 | 
			
		||||
	UUUdag = adj( Cshift(UUU, mu, -3));
 | 
			
		||||
 | 
			
		||||
	UUU    = UUU    *phases;
 | 
			
		||||
	UUUdag = UUUdag *phases;
 | 
			
		||||
 | 
			
		||||
	PokeIndex<LorentzIndex>(UUUds, UUU, mu);
 | 
			
		||||
	PokeIndex<LorentzIndex>(UUUds, UUUdag, mu+4);
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
 | 
			
		||||
      GaugeLinkField link(mat._grid);
 | 
			
		||||
      link = TraceIndex<SpinIndex>(outerProduct(Btilde,A)); 
 | 
			
		||||
      PokeIndex<LorentzIndex>(mat,link,mu);
 | 
			
		||||
    }   
 | 
			
		||||
      
 | 
			
		||||
    inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
 | 
			
		||||
      assert (0); 
 | 
			
		||||
      // Must never hit
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Single flavour one component spinors with colour index. 5d vec
 | 
			
		||||
  /////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  template <class S, class Representation = FundamentalRepresentation >
 | 
			
		||||
  class StaggeredVec5dImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
 | 
			
		||||
 | 
			
		||||
    public:
 | 
			
		||||
 | 
			
		||||
    typedef RealD  _Coeff_t ;
 | 
			
		||||
    static const int Dimension = Representation::Dimension;
 | 
			
		||||
    typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
 | 
			
		||||
      
 | 
			
		||||
    //Necessary?
 | 
			
		||||
    constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
 | 
			
		||||
    
 | 
			
		||||
    const bool LsVectorised=true;
 | 
			
		||||
 | 
			
		||||
    typedef _Coeff_t Coeff_t;
 | 
			
		||||
 | 
			
		||||
    INHERIT_GIMPL_TYPES(Gimpl);
 | 
			
		||||
 | 
			
		||||
    template <typename vtype> using iImplScalar            = iScalar<iScalar<iScalar<vtype> > >;
 | 
			
		||||
    template <typename vtype> using iImplSpinor            = iScalar<iScalar<iVector<vtype, Dimension> > >;
 | 
			
		||||
    template <typename vtype> using iImplHalfSpinor        = iScalar<iScalar<iVector<vtype, Dimension> > >;
 | 
			
		||||
    template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
 | 
			
		||||
    template <typename vtype> using iImplGaugeField        = iVector<iScalar<iMatrix<vtype, Dimension> >, Nd>;
 | 
			
		||||
    template <typename vtype> using iImplGaugeLink         = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
 | 
			
		||||
    template <typename vtype> using iImplPropagator        = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
 | 
			
		||||
 | 
			
		||||
    // Make the doubled gauge field a *scalar*
 | 
			
		||||
    typedef iImplDoubledGaugeField<typename Simd::scalar_type>  SiteDoubledGaugeField;  // This is a scalar
 | 
			
		||||
    typedef iImplGaugeField<typename Simd::scalar_type>         SiteScalarGaugeField;  // scalar
 | 
			
		||||
    typedef iImplGaugeLink<typename Simd::scalar_type>          SiteScalarGaugeLink;  // scalar
 | 
			
		||||
    typedef iImplPropagator<Simd>        SitePropagator;
 | 
			
		||||
 | 
			
		||||
    typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
 | 
			
		||||
    typedef Lattice<SitePropagator> PropagatorField;
 | 
			
		||||
    
 | 
			
		||||
    typedef iImplScalar<Simd>            SiteComplex;
 | 
			
		||||
    typedef iImplSpinor<Simd>            SiteSpinor;
 | 
			
		||||
    typedef iImplHalfSpinor<Simd>        SiteHalfSpinor;
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    typedef Lattice<SiteComplex>           ComplexField;
 | 
			
		||||
    typedef Lattice<SiteSpinor>            FermionField;
 | 
			
		||||
    
 | 
			
		||||
    typedef SimpleCompressor<SiteSpinor> Compressor;
 | 
			
		||||
    typedef StaggeredImplParams ImplParams;
 | 
			
		||||
    typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
 | 
			
		||||
    
 | 
			
		||||
    ImplParams Params;
 | 
			
		||||
    
 | 
			
		||||
    StaggeredVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){};
 | 
			
		||||
 | 
			
		||||
    template <class ref>
 | 
			
		||||
    inline void loadLinkElement(Simd ®, ref &memory) {
 | 
			
		||||
      vsplat(reg, memory);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
 | 
			
		||||
			 const SiteHalfSpinor &chi, int mu) {
 | 
			
		||||
      SiteGaugeLink UU;
 | 
			
		||||
      for (int i = 0; i < Dimension; i++) {
 | 
			
		||||
	for (int j = 0; j < Dimension; j++) {
 | 
			
		||||
	  vsplat(UU()()(i, j), U(mu)()(i, j));
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      mult(&phi(), &UU(), &chi());
 | 
			
		||||
    }
 | 
			
		||||
    inline void multLinkAdd(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
 | 
			
		||||
			    const SiteHalfSpinor &chi, int mu) {
 | 
			
		||||
      SiteGaugeLink UU;
 | 
			
		||||
      for (int i = 0; i < Dimension; i++) {
 | 
			
		||||
	for (int j = 0; j < Dimension; j++) {
 | 
			
		||||
	  vsplat(UU()()(i, j), U(mu)()(i, j));
 | 
			
		||||
	}
 | 
			
		||||
      }
 | 
			
		||||
      mac(&phi(), &UU(), &chi());
 | 
			
		||||
    }
 | 
			
		||||
      
 | 
			
		||||
    inline void DoubleStore(GridBase *GaugeGrid,
 | 
			
		||||
			    DoubledGaugeField &UUUds, // for Naik term
 | 
			
		||||
			    DoubledGaugeField &Uds,
 | 
			
		||||
			    const GaugeField &Uthin,
 | 
			
		||||
			    const GaugeField &Ufat) 
 | 
			
		||||
    {
 | 
			
		||||
 | 
			
		||||
      GridBase * InputGrid = Uthin._grid;
 | 
			
		||||
      conformable(InputGrid,Ufat._grid);
 | 
			
		||||
 | 
			
		||||
      GaugeLinkField U(InputGrid);
 | 
			
		||||
      GaugeLinkField UU(InputGrid);
 | 
			
		||||
      GaugeLinkField UUU(InputGrid);
 | 
			
		||||
      GaugeLinkField Udag(InputGrid);
 | 
			
		||||
      GaugeLinkField UUUdag(InputGrid);
 | 
			
		||||
 | 
			
		||||
      for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
	// Staggered Phase.
 | 
			
		||||
	Lattice<iScalar<vInteger> > coor(InputGrid);
 | 
			
		||||
	Lattice<iScalar<vInteger> > x(InputGrid); LatticeCoordinate(x,0);
 | 
			
		||||
	Lattice<iScalar<vInteger> > y(InputGrid); LatticeCoordinate(y,1);
 | 
			
		||||
	Lattice<iScalar<vInteger> > z(InputGrid); LatticeCoordinate(z,2);
 | 
			
		||||
	Lattice<iScalar<vInteger> > t(InputGrid); LatticeCoordinate(t,3);
 | 
			
		||||
 | 
			
		||||
	Lattice<iScalar<vInteger> > lin_z(InputGrid); lin_z=x+y;
 | 
			
		||||
	Lattice<iScalar<vInteger> > lin_t(InputGrid); lin_t=x+y+z;
 | 
			
		||||
 | 
			
		||||
	ComplexField phases(InputGrid);	phases=1.0;
 | 
			
		||||
 | 
			
		||||
	if ( mu == 1 ) phases = where( mod(x    ,2)==(Integer)0, phases,-phases);
 | 
			
		||||
	if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
 | 
			
		||||
	if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
 | 
			
		||||
 | 
			
		||||
	// 1 hop based on fat links
 | 
			
		||||
	U      = PeekIndex<LorentzIndex>(Ufat, mu);
 | 
			
		||||
	Udag   = adj( Cshift(U, mu, -1));
 | 
			
		||||
 | 
			
		||||
	U    = U    *phases;
 | 
			
		||||
	Udag = Udag *phases;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
 | 
			
		||||
	  SiteScalarGaugeLink   ScalarU;
 | 
			
		||||
	  SiteDoubledGaugeField ScalarUds;
 | 
			
		||||
	  
 | 
			
		||||
	  std::vector<int> lcoor;
 | 
			
		||||
	  GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
 | 
			
		||||
	  peekLocalSite(ScalarUds, Uds, lcoor);
 | 
			
		||||
 | 
			
		||||
	  peekLocalSite(ScalarU, U, lcoor);
 | 
			
		||||
	  ScalarUds(mu) = ScalarU();
 | 
			
		||||
 | 
			
		||||
	  peekLocalSite(ScalarU, Udag, lcoor);
 | 
			
		||||
	  ScalarUds(mu + 4) = ScalarU();
 | 
			
		||||
 | 
			
		||||
	  pokeLocalSite(ScalarUds, Uds, lcoor);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 3 hop based on thin links. Crazy huh ?
 | 
			
		||||
	U  = PeekIndex<LorentzIndex>(Uthin, mu);
 | 
			
		||||
	UU = Gimpl::CovShiftForward(U,mu,U);
 | 
			
		||||
	UUU= Gimpl::CovShiftForward(U,mu,UU);
 | 
			
		||||
	
 | 
			
		||||
	UUUdag = adj( Cshift(UUU, mu, -3));
 | 
			
		||||
 | 
			
		||||
	UUU    = UUU    *phases;
 | 
			
		||||
	UUUdag = UUUdag *phases;
 | 
			
		||||
 | 
			
		||||
	for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
 | 
			
		||||
 | 
			
		||||
	  SiteScalarGaugeLink  ScalarU;
 | 
			
		||||
	  SiteDoubledGaugeField ScalarUds;
 | 
			
		||||
	  
 | 
			
		||||
	  std::vector<int> lcoor;
 | 
			
		||||
	  GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
 | 
			
		||||
      
 | 
			
		||||
	  peekLocalSite(ScalarUds, UUUds, lcoor);
 | 
			
		||||
 | 
			
		||||
	  peekLocalSite(ScalarU, UUU, lcoor);
 | 
			
		||||
	  ScalarUds(mu) = ScalarU();
 | 
			
		||||
 | 
			
		||||
	  peekLocalSite(ScalarU, UUUdag, lcoor);
 | 
			
		||||
	  ScalarUds(mu + 4) = ScalarU();
 | 
			
		||||
	  
 | 
			
		||||
	  pokeLocalSite(ScalarUds, UUUds, lcoor);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
 | 
			
		||||
      assert(0);
 | 
			
		||||
    }   
 | 
			
		||||
      
 | 
			
		||||
    inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
 | 
			
		||||
      assert (0); 
 | 
			
		||||
    }
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 typedef WilsonImpl<vComplex,  FundamentalRepresentation > WilsonImplR;   // Real.. whichever prec
 | 
			
		||||
 typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF;  // Float
 | 
			
		||||
 typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD;  // Double
 | 
			
		||||
@@ -540,6 +853,14 @@ PARALLEL_FOR_LOOP
 | 
			
		||||
 typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF;  // Float
 | 
			
		||||
 typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD;  // Double
 | 
			
		||||
 | 
			
		||||
 typedef StaggeredImpl<vComplex,  FundamentalRepresentation > StaggeredImplR;   // Real.. whichever prec
 | 
			
		||||
 typedef StaggeredImpl<vComplexF, FundamentalRepresentation > StaggeredImplF;  // Float
 | 
			
		||||
 typedef StaggeredImpl<vComplexD, FundamentalRepresentation > StaggeredImplD;  // Double
 | 
			
		||||
 | 
			
		||||
 typedef StaggeredVec5dImpl<vComplex,  FundamentalRepresentation > StaggeredVec5dImplR;   // Real.. whichever prec
 | 
			
		||||
 typedef StaggeredVec5dImpl<vComplexF, FundamentalRepresentation > StaggeredVec5dImplF;  // Float
 | 
			
		||||
 typedef StaggeredVec5dImpl<vComplexD, FundamentalRepresentation > StaggeredVec5dImplD;  // Double
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										403
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										403
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,403 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi, Peter Boyle
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
const std::vector<int> 
 | 
			
		||||
ImprovedStaggeredFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3});
 | 
			
		||||
const std::vector<int> 
 | 
			
		||||
ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
// Constructor and gauge import
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, 
 | 
			
		||||
							 RealD _mass,
 | 
			
		||||
							 const ImplParams &p)
 | 
			
		||||
    : Kernels(p),
 | 
			
		||||
      _grid(&Fgrid),
 | 
			
		||||
      _cbgrid(&Hgrid),
 | 
			
		||||
      Stencil(&Fgrid, npoint, Even, directions, displacements),
 | 
			
		||||
      StencilEven(&Hgrid, npoint, Even, directions, displacements),  // source is Even
 | 
			
		||||
      StencilOdd(&Hgrid, npoint, Odd, directions, displacements),  // source is Odd
 | 
			
		||||
      mass(_mass),
 | 
			
		||||
      Lebesgue(_grid),
 | 
			
		||||
      LebesgueEvenOdd(_cbgrid),
 | 
			
		||||
      Umu(&Fgrid),
 | 
			
		||||
      UmuEven(&Hgrid),
 | 
			
		||||
      UmuOdd(&Hgrid),
 | 
			
		||||
      UUUmu(&Fgrid),
 | 
			
		||||
      UUUmuEven(&Hgrid),
 | 
			
		||||
      UUUmuOdd(&Hgrid) ,
 | 
			
		||||
      _tmp(&Hgrid)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
 | 
			
		||||
							 GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
							 RealD _c1, RealD _c2,RealD _u0,
 | 
			
		||||
							 const ImplParams &p)
 | 
			
		||||
  : ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
 | 
			
		||||
{
 | 
			
		||||
  c1=_c1;
 | 
			
		||||
  c2=_c2;
 | 
			
		||||
  u0=_u0;
 | 
			
		||||
  ImportGauge(_Uthin,_Ufat);
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin,GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
 | 
			
		||||
							 GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
							 const ImplParams &p)
 | 
			
		||||
  : ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
 | 
			
		||||
{
 | 
			
		||||
  ImportGaugeSimple(_Utriple,_Ufat);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
  // Momentum space propagator should be 
 | 
			
		||||
  // https://arxiv.org/pdf/hep-lat/9712010.pdf
 | 
			
		||||
  //
 | 
			
		||||
  // mom space action.
 | 
			
		||||
  //   gamma_mu i ( c1 sin pmu + c2 sin 3 pmu ) + m
 | 
			
		||||
  //
 | 
			
		||||
  // must track through staggered flavour/spin reduction in literature to 
 | 
			
		||||
  // turn to free propagator for the one component chi field, a la page 4/5
 | 
			
		||||
  // of above link to implmement fourier based solver.
 | 
			
		||||
  ////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin) 
 | 
			
		||||
{
 | 
			
		||||
  ImportGauge(_Uthin,_Uthin);
 | 
			
		||||
};
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const GaugeField &_Utriple,const GaugeField &_Ufat) 
 | 
			
		||||
{
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Trivial import; phases and fattening and such like preapplied
 | 
			
		||||
  /////////////////////////////////////////////////////////////////
 | 
			
		||||
  GaugeLinkField U(GaugeGrid());
 | 
			
		||||
 | 
			
		||||
  for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(_Utriple, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, U, mu );
 | 
			
		||||
 | 
			
		||||
    U = adj( Cshift(U, mu, -3));
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, -U, mu+4 );
 | 
			
		||||
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(_Ufat, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, U, mu);
 | 
			
		||||
 | 
			
		||||
    U = adj( Cshift(U, mu, -1));
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, -U, mu+4);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
  pickCheckerboard(Even, UmuEven,  Umu);
 | 
			
		||||
  pickCheckerboard(Odd,  UmuOdd ,  Umu);
 | 
			
		||||
  pickCheckerboard(Even, UUUmuEven,UUUmu);
 | 
			
		||||
  pickCheckerboard(Odd,  UUUmuOdd, UUUmu);
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat) 
 | 
			
		||||
{
 | 
			
		||||
  GaugeLinkField U(GaugeGrid());
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Double Store should take two fields for Naik and one hop separately.
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Apply scale factors to get the right fermion Kinetic term
 | 
			
		||||
  // Could pass coeffs into the double store to save work.
 | 
			
		||||
  // 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) ) 
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(Umu, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
 | 
			
		||||
    
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(Umu, mu+4);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
 | 
			
		||||
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(UUUmu, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
 | 
			
		||||
    
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  std::cout << " Umu " << Umu._odata[0]<<std::endl;
 | 
			
		||||
  std::cout << " UUUmu " << UUUmu._odata[0]<<std::endl;
 | 
			
		||||
  pickCheckerboard(Even, UmuEven, Umu);
 | 
			
		||||
  pickCheckerboard(Odd,  UmuOdd , Umu);
 | 
			
		||||
  pickCheckerboard(Even, UUUmuEven, UUUmu);
 | 
			
		||||
  pickCheckerboard(Odd,   UUUmuOdd, UUUmu);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/////////////////////////////
 | 
			
		||||
// Implement the interface
 | 
			
		||||
/////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
RealD ImprovedStaggeredFermion<Impl>::M(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Dhop(in, out, DaggerNo);
 | 
			
		||||
  return axpy_norm(out, mass, in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
RealD ImprovedStaggeredFermion<Impl>::Mdag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Dhop(in, out, DaggerYes);
 | 
			
		||||
  return axpy_norm(out, mass, in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::Meooe(const FermionField &in, FermionField &out) {
 | 
			
		||||
  if (in.checkerboard == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerNo);
 | 
			
		||||
  } else {
 | 
			
		||||
    DhopOE(in, out, DaggerNo);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  if (in.checkerboard == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerYes);
 | 
			
		||||
  } else {
 | 
			
		||||
    DhopOE(in, out, DaggerYes);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  typename FermionField::scalar_type scal(mass);
 | 
			
		||||
  out = scal * in;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Mooee(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  out = (1.0 / (mass)) * in;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::MooeeInvDag(const FermionField &in,
 | 
			
		||||
                                      FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  MooeeInv(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
// Internal
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, 
 | 
			
		||||
						   GaugeField & mat,
 | 
			
		||||
						   const FermionField &A, const FermionField &B, int dag) {
 | 
			
		||||
  assert((dag == DaggerNo) || (dag == DaggerYes));
 | 
			
		||||
 | 
			
		||||
  Compressor compressor;
 | 
			
		||||
 | 
			
		||||
  FermionField Btilde(B._grid);
 | 
			
		||||
  FermionField Atilde(B._grid);
 | 
			
		||||
  Atilde = A;
 | 
			
		||||
 | 
			
		||||
  st.HaloExchange(B, compressor);
 | 
			
		||||
 | 
			
		||||
  for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
    ////////////////////////
 | 
			
		||||
    // Call the single hop
 | 
			
		||||
    ////////////////////////
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < B._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopDir(st, U, UUU, st.CommBuf(), sss, sss, B, Btilde, mu,1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Force in three link terms
 | 
			
		||||
    //
 | 
			
		||||
    //    Impl::InsertForce4D(mat, Btilde, Atilde, mu);
 | 
			
		||||
    //
 | 
			
		||||
    // dU_ac(x)/dt = i p_ab U_bc(x)
 | 
			
		||||
    //
 | 
			
		||||
    // => dS_f/dt = dS_f/dU_ac(x) . dU_ac(x)/dt =  i p_ab U_bc(x) dS_f/dU_ac(x) 
 | 
			
		||||
    //
 | 
			
		||||
    // One link: form fragments S_f = A U B 
 | 
			
		||||
    //
 | 
			
		||||
    //         write Btilde = U(x) B(x+mu)
 | 
			
		||||
    //
 | 
			
		||||
    // mat+= TraceIndex<SpinIndex>(outerProduct(Btilde,A)); 
 | 
			
		||||
    // 
 | 
			
		||||
    // Three link: form fragments S_f = A UUU B 
 | 
			
		||||
    //
 | 
			
		||||
    // mat+= outer ( A, UUUB) <-- Best take DhopDeriv with one linke or identity matrix
 | 
			
		||||
    // mat+= outer ( AU, UUB) <-- and then use covariant cshift?
 | 
			
		||||
    // mat+= outer ( AUU, UB) <-- Returned from call to DhopDir
 | 
			
		||||
 | 
			
		||||
    assert(0);// need to figure out the force interface with a blasted three link term.
 | 
			
		||||
    
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
 | 
			
		||||
 | 
			
		||||
  conformable(U._grid, _grid);
 | 
			
		||||
  conformable(U._grid, V._grid);
 | 
			
		||||
  conformable(U._grid, mat._grid);
 | 
			
		||||
 | 
			
		||||
  mat.checkerboard = U.checkerboard;
 | 
			
		||||
 | 
			
		||||
  DerivInternal(Stencil, Umu, UUUmu, mat, U, V, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
 | 
			
		||||
 | 
			
		||||
  conformable(U._grid, _cbgrid);
 | 
			
		||||
  conformable(U._grid, V._grid);
 | 
			
		||||
  conformable(U._grid, mat._grid);
 | 
			
		||||
 | 
			
		||||
  assert(V.checkerboard == Even);
 | 
			
		||||
  assert(U.checkerboard == Odd);
 | 
			
		||||
  mat.checkerboard = Odd;
 | 
			
		||||
 | 
			
		||||
  DerivInternal(StencilEven, UmuOdd, UUUmuOdd, mat, U, V, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
 | 
			
		||||
 | 
			
		||||
  conformable(U._grid, _cbgrid);
 | 
			
		||||
  conformable(U._grid, V._grid);
 | 
			
		||||
  conformable(U._grid, mat._grid);
 | 
			
		||||
 | 
			
		||||
  assert(V.checkerboard == Odd);
 | 
			
		||||
  assert(U.checkerboard == Even);
 | 
			
		||||
  mat.checkerboard = Even;
 | 
			
		||||
 | 
			
		||||
  DerivInternal(StencilOdd, UmuEven, UUUmuEven, mat, U, V, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
 | 
			
		||||
  conformable(in._grid, _grid);  // verifies full grid
 | 
			
		||||
  conformable(in._grid, out._grid);
 | 
			
		||||
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(Stencil, Lebesgue, Umu, UUUmu, in, out, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
 | 
			
		||||
  conformable(in._grid, _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in._grid, out._grid);  // drops the cb check
 | 
			
		||||
 | 
			
		||||
  assert(in.checkerboard == Even);
 | 
			
		||||
  out.checkerboard = Odd;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(StencilEven, LebesgueEvenOdd, UmuOdd, UUUmuOdd, in, out, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopEO(const FermionField &in, FermionField &out, int dag) {
 | 
			
		||||
  conformable(in._grid, _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in._grid, out._grid);  // drops the cb check
 | 
			
		||||
 | 
			
		||||
  assert(in.checkerboard == Odd);
 | 
			
		||||
  out.checkerboard = Even;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(StencilOdd, LebesgueEvenOdd, UmuEven, UUUmuEven, in, out, dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
 | 
			
		||||
  DhopDir(in, out, dir, disp);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) {
 | 
			
		||||
 | 
			
		||||
  Compressor compressor;
 | 
			
		||||
  Stencil.HaloExchange(in, compressor);
 | 
			
		||||
 | 
			
		||||
  PARALLEL_FOR_LOOP
 | 
			
		||||
  for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
    Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sss, sss, in, out, dir, disp);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
						  DoubledGaugeField &U,
 | 
			
		||||
						  DoubledGaugeField &UUU,
 | 
			
		||||
						  const FermionField &in,
 | 
			
		||||
						  FermionField &out, int dag) {
 | 
			
		||||
  assert((dag == DaggerNo) || (dag == DaggerYes));
 | 
			
		||||
 | 
			
		||||
  Compressor compressor;
 | 
			
		||||
  st.HaloExchange(in, compressor);
 | 
			
		||||
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion);
 | 
			
		||||
 | 
			
		||||
  //AdjointFermOpTemplateInstantiate(ImprovedStaggeredFermion);
 | 
			
		||||
  //TwoIndexFermOpTemplateInstantiate(ImprovedStaggeredFermion);
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
							
								
								
									
										167
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										167
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,167 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/ImprovedStaggered.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi, Peter Boyle
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_QCD_IMPR_STAG_FERMION_H
 | 
			
		||||
#define GRID_QCD_IMPR_STAG_FERMION_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
class ImprovedStaggeredFermionStatic {
 | 
			
		||||
 public:
 | 
			
		||||
  static const std::vector<int> directions;
 | 
			
		||||
  static const std::vector<int> displacements;
 | 
			
		||||
  static const int npoint = 16;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedStaggeredFermionStatic {
 | 
			
		||||
 public:
 | 
			
		||||
  INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
  typedef StaggeredKernels<Impl> Kernels;
 | 
			
		||||
 | 
			
		||||
  FermionField _tmp;
 | 
			
		||||
  FermionField &tmp(void) { return _tmp; }
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Implement the abstract base
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  GridBase *GaugeGrid(void) { return _grid; }
 | 
			
		||||
  GridBase *GaugeRedBlackGrid(void) { return _cbgrid; }
 | 
			
		||||
  GridBase *FermionGrid(void) { return _grid; }
 | 
			
		||||
  GridBase *FermionRedBlackGrid(void) { return _cbgrid; }
 | 
			
		||||
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  // override multiply; cut number routines if pass dagger argument
 | 
			
		||||
  // and also make interface more uniformly consistent
 | 
			
		||||
  //////////////////////////////////////////////////////////////////
 | 
			
		||||
  RealD M(const FermionField &in, FermionField &out);
 | 
			
		||||
  RealD Mdag(const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
  /////////////////////////////////////////////////////////
 | 
			
		||||
  // half checkerboard operations
 | 
			
		||||
  /////////////////////////////////////////////////////////
 | 
			
		||||
  void Meooe(const FermionField &in, FermionField &out);
 | 
			
		||||
  void MeooeDag(const FermionField &in, FermionField &out);
 | 
			
		||||
  void Mooee(const FermionField &in, FermionField &out);
 | 
			
		||||
  void MooeeDag(const FermionField &in, FermionField &out);
 | 
			
		||||
  void MooeeInv(const FermionField &in, FermionField &out);
 | 
			
		||||
  void MooeeInvDag(const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
  ////////////////////////
 | 
			
		||||
  // Derivative interface
 | 
			
		||||
  ////////////////////////
 | 
			
		||||
  // Interface calls an internal routine
 | 
			
		||||
  void DhopDeriv  (GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
 | 
			
		||||
  void DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
 | 
			
		||||
  void DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // non-hermitian hopping term; half cb or both
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  void Dhop  (const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
  void DhopOE(const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
  void DhopEO(const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Multigrid assistance; force term uses too
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  void Mdir(const FermionField &in, FermionField &out, int dir, int disp);
 | 
			
		||||
  void DhopDir(const FermionField &in, FermionField &out, int dir, int disp);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Extra methods added by derived
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  void DerivInternal(StencilImpl &st, 
 | 
			
		||||
		     DoubledGaugeField &U,DoubledGaugeField &UUU,
 | 
			
		||||
		     GaugeField &mat, 
 | 
			
		||||
		     const FermionField &A, const FermionField &B, int dag);
 | 
			
		||||
 | 
			
		||||
  void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
 | 
			
		||||
                    const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
 | 
			
		||||
  // Constructor
 | 
			
		||||
  ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
 | 
			
		||||
			   GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
			   RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
 | 
			
		||||
			   const ImplParams &p = ImplParams());
 | 
			
		||||
 | 
			
		||||
  ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
 | 
			
		||||
			   GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
			   const ImplParams &p = ImplParams());
 | 
			
		||||
 | 
			
		||||
  ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass,
 | 
			
		||||
			   const ImplParams &p = ImplParams());
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // DoubleStore impl dependent
 | 
			
		||||
  void ImportGaugeSimple(const GaugeField &_Utriple, const GaugeField &_Ufat);
 | 
			
		||||
  void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat);
 | 
			
		||||
  void ImportGauge(const GaugeField &_Uthin);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
  // Data members require to support the functionality
 | 
			
		||||
  ///////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
  //    protected:
 | 
			
		||||
 public:
 | 
			
		||||
  // any other parameters of action ???
 | 
			
		||||
 | 
			
		||||
  RealD mass;
 | 
			
		||||
  RealD u0;
 | 
			
		||||
  RealD c1;
 | 
			
		||||
  RealD c2;
 | 
			
		||||
 | 
			
		||||
  GridBase *_grid;
 | 
			
		||||
  GridBase *_cbgrid;
 | 
			
		||||
 | 
			
		||||
  // Defines the stencils for even and odd
 | 
			
		||||
  StencilImpl Stencil;
 | 
			
		||||
  StencilImpl StencilEven;
 | 
			
		||||
  StencilImpl StencilOdd;
 | 
			
		||||
 | 
			
		||||
  // Copy of the gauge field , with even and odd subsets
 | 
			
		||||
  DoubledGaugeField Umu;
 | 
			
		||||
  DoubledGaugeField UmuEven;
 | 
			
		||||
  DoubledGaugeField UmuOdd;
 | 
			
		||||
 | 
			
		||||
  DoubledGaugeField UUUmu;
 | 
			
		||||
  DoubledGaugeField UUUmuEven;
 | 
			
		||||
  DoubledGaugeField UUUmuOdd;
 | 
			
		||||
 | 
			
		||||
  LebesgueOrder Lebesgue;
 | 
			
		||||
  LebesgueOrder LebesgueEvenOdd;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
 | 
			
		||||
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										355
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										355
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,355 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
 | 
			
		||||
#include <Grid/perfmon/PerfCount.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
  
 | 
			
		||||
// S-direction is INNERMOST and takes no part in the parity.
 | 
			
		||||
const std::vector<int> 
 | 
			
		||||
ImprovedStaggeredFermion5DStatic::directions({1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4});
 | 
			
		||||
const std::vector<int> 
 | 
			
		||||
ImprovedStaggeredFermion5DStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
 | 
			
		||||
 | 
			
		||||
  // 5d lattice for DWF.
 | 
			
		||||
template<class Impl>
 | 
			
		||||
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat,
 | 
			
		||||
							     GridCartesian         &FiveDimGrid,
 | 
			
		||||
							     GridRedBlackCartesian &FiveDimRedBlackGrid,
 | 
			
		||||
							     GridCartesian         &FourDimGrid,
 | 
			
		||||
							     GridRedBlackCartesian &FourDimRedBlackGrid,
 | 
			
		||||
							     RealD _mass,
 | 
			
		||||
							     RealD _c1,RealD _c2, RealD _u0,
 | 
			
		||||
							     const ImplParams &p) :
 | 
			
		||||
  Kernels(p),
 | 
			
		||||
  _FiveDimGrid        (&FiveDimGrid),
 | 
			
		||||
  _FiveDimRedBlackGrid(&FiveDimRedBlackGrid),
 | 
			
		||||
  _FourDimGrid        (&FourDimGrid),
 | 
			
		||||
  _FourDimRedBlackGrid(&FourDimRedBlackGrid),
 | 
			
		||||
  Stencil    (&FiveDimGrid,npoint,Even,directions,displacements),
 | 
			
		||||
  StencilEven(&FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even
 | 
			
		||||
  StencilOdd (&FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd
 | 
			
		||||
  mass(_mass),
 | 
			
		||||
  c1(_c1),
 | 
			
		||||
  c2(_c2),
 | 
			
		||||
  u0(_u0),
 | 
			
		||||
  Umu(&FourDimGrid),
 | 
			
		||||
  UmuEven(&FourDimRedBlackGrid),
 | 
			
		||||
  UmuOdd (&FourDimRedBlackGrid),
 | 
			
		||||
  UUUmu(&FourDimGrid),
 | 
			
		||||
  UUUmuEven(&FourDimRedBlackGrid),
 | 
			
		||||
  UUUmuOdd(&FourDimRedBlackGrid),
 | 
			
		||||
  Lebesgue(&FourDimGrid),
 | 
			
		||||
  LebesgueEvenOdd(&FourDimRedBlackGrid),
 | 
			
		||||
  _tmp(&FiveDimRedBlackGrid)
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
  // some assertions
 | 
			
		||||
  assert(FiveDimGrid._ndimension==5);
 | 
			
		||||
  assert(FourDimGrid._ndimension==4);
 | 
			
		||||
  assert(FourDimRedBlackGrid._ndimension==4);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._ndimension==5);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
 | 
			
		||||
 | 
			
		||||
  // extent of fifth dim and not spread out
 | 
			
		||||
  Ls=FiveDimGrid._fdimensions[0];
 | 
			
		||||
  assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
 | 
			
		||||
  assert(FiveDimGrid._processors[0]         ==1);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._processors[0] ==1);
 | 
			
		||||
 | 
			
		||||
  // Other dimensions must match the decomposition of the four-D fields 
 | 
			
		||||
  for(int d=0;d<4;d++){
 | 
			
		||||
    assert(FiveDimGrid._processors[d+1]         ==FourDimGrid._processors[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._processors[d]   ==FourDimGrid._processors[d]);
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimGrid._fdimensions[d+1]        ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._fdimensions[d]  ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[d+1]        ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._simd_layout[d]  ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (Impl::LsVectorised) { 
 | 
			
		||||
 | 
			
		||||
    int nsimd = Simd::Nsimd();
 | 
			
		||||
    
 | 
			
		||||
    // Dimension zero of the five-d is the Ls direction
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[0]        ==nsimd);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
 | 
			
		||||
 | 
			
		||||
    for(int d=0;d<4;d++){
 | 
			
		||||
      assert(FourDimGrid._simd_layout[d]=1);
 | 
			
		||||
      assert(FourDimRedBlackGrid._simd_layout[d]=1);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  } else {
 | 
			
		||||
    
 | 
			
		||||
    // Dimension zero of the five-d is the Ls direction
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[0]==1);
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[0]        ==1);
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Allocate the required comms buffer
 | 
			
		||||
  ImportGauge(_Uthin,_Ufat);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin) 
 | 
			
		||||
{
 | 
			
		||||
  ImportGauge(_Uthin,_Uthin);
 | 
			
		||||
};
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
 | 
			
		||||
{
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Double Store should take two fields for Naik and one hop separately.
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  // Apply scale factors to get the right fermion Kinetic term
 | 
			
		||||
  // Could pass coeffs into the double store to save work.
 | 
			
		||||
  // 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) ) 
 | 
			
		||||
  ////////////////////////////////////////////////////////
 | 
			
		||||
  for (int mu = 0; mu < Nd; mu++) {
 | 
			
		||||
 | 
			
		||||
    auto U = PeekIndex<LorentzIndex>(Umu, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
 | 
			
		||||
    
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(Umu, mu+4);
 | 
			
		||||
    PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
 | 
			
		||||
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(UUUmu, mu);
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
 | 
			
		||||
    
 | 
			
		||||
    U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
 | 
			
		||||
    PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  pickCheckerboard(Even, UmuEven, Umu);
 | 
			
		||||
  pickCheckerboard(Odd,  UmuOdd , Umu);
 | 
			
		||||
  pickCheckerboard(Even, UUUmuEven, UUUmu);
 | 
			
		||||
  pickCheckerboard(Odd,  UUUmuOdd, UUUmu);
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,int dir5,int disp)
 | 
			
		||||
{
 | 
			
		||||
  int dir = dir5-1; // Maps to the ordering above in "directions" that is passed to stencil
 | 
			
		||||
                    // we drop off the innermost fifth dimension
 | 
			
		||||
 | 
			
		||||
  Compressor compressor;
 | 
			
		||||
  Stencil.HaloExchange(in,compressor);
 | 
			
		||||
 | 
			
		||||
  parallel_for(int ss=0;ss<Umu._grid->oSites();ss++){
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      int sU=ss;
 | 
			
		||||
      int sF = s+Ls*sU; 
 | 
			
		||||
      Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sF, sU, in, out, dir, disp);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DerivInternal(StencilImpl & st,
 | 
			
		||||
            DoubledGaugeField & U,
 | 
			
		||||
            DoubledGaugeField & UUU,
 | 
			
		||||
            GaugeField &mat,
 | 
			
		||||
            const FermionField &A,
 | 
			
		||||
            const FermionField &B,
 | 
			
		||||
            int dag)
 | 
			
		||||
{
 | 
			
		||||
  // No force terms in multi-rhs solver staggered
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopDeriv(GaugeField &mat,
 | 
			
		||||
				      const FermionField &A,
 | 
			
		||||
				      const FermionField &B,
 | 
			
		||||
				      int dag)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopDerivEO(GaugeField &mat,
 | 
			
		||||
					const FermionField &A,
 | 
			
		||||
					const FermionField &B,
 | 
			
		||||
					int dag)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
 | 
			
		||||
					const FermionField &A,
 | 
			
		||||
					const FermionField &B,
 | 
			
		||||
					int dag)
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
 | 
			
		||||
						    DoubledGaugeField & U,DoubledGaugeField & UUU,
 | 
			
		||||
						    const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  Compressor compressor;
 | 
			
		||||
  int LLs = in._grid->_rdimensions[0];
 | 
			
		||||
  st.HaloExchange(in,compressor);
 | 
			
		||||
  
 | 
			
		||||
  // Dhop takes the 4d grid from U, and makes a 5d index for fermion
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
      int sU=ss;
 | 
			
		||||
      Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out);
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
      int sU=ss;
 | 
			
		||||
	Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in._grid,FermionRedBlackGrid());    // verifies half grid
 | 
			
		||||
  conformable(in._grid,out._grid); // drops the cb check
 | 
			
		||||
 | 
			
		||||
  assert(in.checkerboard==Even);
 | 
			
		||||
  out.checkerboard = Odd;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(StencilEven,LebesgueEvenOdd,UmuOdd,UUUmuOdd,in,out,dag);
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in._grid,FermionRedBlackGrid());    // verifies half grid
 | 
			
		||||
  conformable(in._grid,out._grid); // drops the cb check
 | 
			
		||||
 | 
			
		||||
  assert(in.checkerboard==Odd);
 | 
			
		||||
  out.checkerboard = Even;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(StencilOdd,LebesgueEvenOdd,UmuEven,UUUmuEven,in,out,dag);
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  conformable(in._grid,FermionGrid()); // verifies full grid
 | 
			
		||||
  conformable(in._grid,out._grid);
 | 
			
		||||
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
 | 
			
		||||
  DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////
 | 
			
		||||
// Implement the general interface. Here we use SAME mass on all slices
 | 
			
		||||
/////////////////////////////////////////////////////////////////////////
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
 | 
			
		||||
  DhopDir(in, out, dir, disp);
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
RealD ImprovedStaggeredFermion5D<Impl>::M(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Dhop(in, out, DaggerNo);
 | 
			
		||||
  return axpy_norm(out, mass, in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
RealD ImprovedStaggeredFermion5D<Impl>::Mdag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Dhop(in, out, DaggerYes);
 | 
			
		||||
  return axpy_norm(out, mass, in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::Meooe(const FermionField &in, FermionField &out) {
 | 
			
		||||
  if (in.checkerboard == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerNo);
 | 
			
		||||
  } else {
 | 
			
		||||
    DhopOE(in, out, DaggerNo);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  if (in.checkerboard == Odd) {
 | 
			
		||||
    DhopEO(in, out, DaggerYes);
 | 
			
		||||
  } else {
 | 
			
		||||
    DhopOE(in, out, DaggerYes);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::Mooee(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  typename FermionField::scalar_type scal(mass);
 | 
			
		||||
  out = scal * in;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Mooee(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  out = (1.0 / (mass)) * in;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void ImprovedStaggeredFermion5D<Impl>::MooeeInvDag(const FermionField &in,
 | 
			
		||||
                                      FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  MooeeInv(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D);
 | 
			
		||||
FermOpStaggeredVec5dTemplateInstantiate(ImprovedStaggeredFermion5D);
 | 
			
		||||
  
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										167
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										167
									
								
								lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,167 @@
 | 
			
		||||
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: AzusaYamaguchi <ayamaguc@staffmail.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#ifndef  GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
 | 
			
		||||
#define  GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // This is the 4d red black case appropriate to support
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
    class ImprovedStaggeredFermion5DStatic { 
 | 
			
		||||
    public:
 | 
			
		||||
      // S-direction is INNERMOST and takes no part in the parity.
 | 
			
		||||
      static const std::vector<int> directions;
 | 
			
		||||
      static const std::vector<int> displacements;
 | 
			
		||||
      const int npoint = 16;
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    template<class Impl>
 | 
			
		||||
    class ImprovedStaggeredFermion5D :  public StaggeredKernels<Impl>, public ImprovedStaggeredFermion5DStatic 
 | 
			
		||||
    {
 | 
			
		||||
    public:
 | 
			
		||||
      INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
      typedef StaggeredKernels<Impl> Kernels;
 | 
			
		||||
 | 
			
		||||
      FermionField _tmp;
 | 
			
		||||
      FermionField &tmp(void) { return _tmp; }
 | 
			
		||||
 | 
			
		||||
      ///////////////////////////////////////////////////////////////
 | 
			
		||||
      // Implement the abstract base
 | 
			
		||||
      ///////////////////////////////////////////////////////////////
 | 
			
		||||
      GridBase *GaugeGrid(void)              { return _FourDimGrid ;}
 | 
			
		||||
      GridBase *GaugeRedBlackGrid(void)      { return _FourDimRedBlackGrid ;}
 | 
			
		||||
      GridBase *FermionGrid(void)            { return _FiveDimGrid;}
 | 
			
		||||
      GridBase *FermionRedBlackGrid(void)    { return _FiveDimRedBlackGrid;}
 | 
			
		||||
 | 
			
		||||
      // full checkerboard operations; leave unimplemented as abstract for now
 | 
			
		||||
      RealD  M    (const FermionField &in, FermionField &out);
 | 
			
		||||
      RealD  Mdag (const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
      // half checkerboard operations
 | 
			
		||||
      void   Meooe       (const FermionField &in, FermionField &out);
 | 
			
		||||
      void   Mooee       (const FermionField &in, FermionField &out);
 | 
			
		||||
      void   MooeeInv    (const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
      void   MeooeDag    (const FermionField &in, FermionField &out);
 | 
			
		||||
      void   MooeeDag    (const FermionField &in, FermionField &out);
 | 
			
		||||
      void   MooeeInvDag (const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
      void   Mdir   (const FermionField &in, FermionField &out,int dir,int disp);
 | 
			
		||||
      void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
 | 
			
		||||
 | 
			
		||||
      // These can be overridden by fancy 5d chiral action
 | 
			
		||||
      void DhopDeriv  (GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
 | 
			
		||||
      void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
 | 
			
		||||
      void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
 | 
			
		||||
 | 
			
		||||
      // Implement hopping term non-hermitian hopping term; half cb or both
 | 
			
		||||
      void Dhop  (const FermionField &in, FermionField &out,int dag);
 | 
			
		||||
      void DhopOE(const FermionField &in, FermionField &out,int dag);
 | 
			
		||||
      void DhopEO(const FermionField &in, FermionField &out,int dag);
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    // New methods added 
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    void DerivInternal(StencilImpl & st,
 | 
			
		||||
		       DoubledGaugeField & U,
 | 
			
		||||
		       DoubledGaugeField & UUU,
 | 
			
		||||
		       GaugeField &mat,
 | 
			
		||||
		       const FermionField &A,
 | 
			
		||||
		       const FermionField &B,
 | 
			
		||||
		       int dag);
 | 
			
		||||
    
 | 
			
		||||
    void DhopInternal(StencilImpl & st,
 | 
			
		||||
		      LebesgueOrder &lo,
 | 
			
		||||
		      DoubledGaugeField &U,
 | 
			
		||||
		      DoubledGaugeField &UUU,
 | 
			
		||||
		      const FermionField &in, 
 | 
			
		||||
		      FermionField &out,
 | 
			
		||||
		      int dag);
 | 
			
		||||
    
 | 
			
		||||
    // Constructors
 | 
			
		||||
    ImprovedStaggeredFermion5D(GaugeField &_Uthin,
 | 
			
		||||
			       GaugeField &_Ufat,
 | 
			
		||||
			       GridCartesian         &FiveDimGrid,
 | 
			
		||||
			       GridRedBlackCartesian &FiveDimRedBlackGrid,
 | 
			
		||||
			       GridCartesian         &FourDimGrid,
 | 
			
		||||
			       GridRedBlackCartesian &FourDimRedBlackGrid,
 | 
			
		||||
			       double _mass,
 | 
			
		||||
			       RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
 | 
			
		||||
			       const ImplParams &p= ImplParams());
 | 
			
		||||
    
 | 
			
		||||
    // DoubleStore
 | 
			
		||||
    void ImportGauge(const GaugeField &_U);
 | 
			
		||||
    void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat);
 | 
			
		||||
    
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
    // Data members require to support the functionality
 | 
			
		||||
    ///////////////////////////////////////////////////////////////
 | 
			
		||||
  public:
 | 
			
		||||
    
 | 
			
		||||
    GridBase *_FourDimGrid;
 | 
			
		||||
    GridBase *_FourDimRedBlackGrid;
 | 
			
		||||
    GridBase *_FiveDimGrid;
 | 
			
		||||
    GridBase *_FiveDimRedBlackGrid;
 | 
			
		||||
    
 | 
			
		||||
    RealD mass;
 | 
			
		||||
    RealD c1;
 | 
			
		||||
    RealD c2;
 | 
			
		||||
    RealD u0;
 | 
			
		||||
    int Ls;
 | 
			
		||||
    
 | 
			
		||||
    //Defines the stencils for even and odd
 | 
			
		||||
    StencilImpl Stencil; 
 | 
			
		||||
    StencilImpl StencilEven; 
 | 
			
		||||
    StencilImpl StencilOdd; 
 | 
			
		||||
    
 | 
			
		||||
    // Copy of the gauge field , with even and odd subsets
 | 
			
		||||
    DoubledGaugeField Umu;
 | 
			
		||||
    DoubledGaugeField UmuEven;
 | 
			
		||||
    DoubledGaugeField UmuOdd;
 | 
			
		||||
 | 
			
		||||
    DoubledGaugeField UUUmu;
 | 
			
		||||
    DoubledGaugeField UUUmuEven;
 | 
			
		||||
    DoubledGaugeField UUUmuOdd;
 | 
			
		||||
    
 | 
			
		||||
    LebesgueOrder Lebesgue;
 | 
			
		||||
    LebesgueOrder LebesgueEvenOdd;
 | 
			
		||||
    
 | 
			
		||||
    // Comms buffer
 | 
			
		||||
    std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> >  comm_buf;
 | 
			
		||||
    
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_MOBIUS_FERMION_H
 | 
			
		||||
#define  GRID_QCD_MOBIUS_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
 | 
			
		||||
#define  GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
 | 
			
		||||
#define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
 | 
			
		||||
#define  OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
 | 
			
		||||
#define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
 | 
			
		||||
#define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
 | 
			
		||||
#define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
 | 
			
		||||
#define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -26,7 +26,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_PARTIAL_FRACTION_H
 | 
			
		||||
#define  GRID_QCD_PARTIAL_FRACTION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
  namespace QCD {
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_SCALED_SHAMIR_FERMION_H
 | 
			
		||||
#define  GRID_QCD_SCALED_SHAMIR_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
 | 
			
		||||
#define  GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										276
									
								
								lib/qcd/action/fermion/StaggeredKernels.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										276
									
								
								lib/qcd/action/fermion/StaggeredKernels.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,276 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi, Peter Boyle
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
int StaggeredKernelsStatic::Opt= StaggeredKernelsStatic::OptGeneric;
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
// Generic implementation; move to different file?
 | 
			
		||||
////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
 | 
			
		||||
					   SiteSpinor *buf, int sF,
 | 
			
		||||
					   int sU, const FermionField &in, SiteSpinor &out,int threeLink) {
 | 
			
		||||
  const SiteSpinor *chi_p;
 | 
			
		||||
  SiteSpinor chi;
 | 
			
		||||
  SiteSpinor Uchi;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
  int ptype;
 | 
			
		||||
  int skew = 0;
 | 
			
		||||
  if (threeLink) skew=8;
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Xp
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
 | 
			
		||||
  SE = st.GetEntry(ptype, Xp+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLink(Uchi, U._odata[sU], *chi_p, Xp);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Yp
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Yp+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Yp);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Zp
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Zp+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zp);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Tp
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Tp+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tp);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Xm
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Xm+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Xm);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Ym
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Ym+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Ym);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Zm
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Zm+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zm);
 | 
			
		||||
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  // Tm
 | 
			
		||||
  ///////////////////////////
 | 
			
		||||
  SE = st.GetEntry(ptype, Tm+skew, sF);
 | 
			
		||||
  if (SE->_is_local) {
 | 
			
		||||
    if (SE->_permute) {
 | 
			
		||||
      chi_p = χ
 | 
			
		||||
      permute(chi,  in._odata[SE->_offset], ptype);
 | 
			
		||||
    } else {
 | 
			
		||||
      chi_p = &in._odata[SE->_offset];
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    chi_p = &buf[SE->_offset];
 | 
			
		||||
  }
 | 
			
		||||
  Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tm);
 | 
			
		||||
 | 
			
		||||
  vstream(out, Uchi);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
 | 
			
		||||
						  SiteSpinor *buf, int LLs, int sU,
 | 
			
		||||
						  const FermionField &in, FermionField &out) {
 | 
			
		||||
  SiteSpinor naik;
 | 
			
		||||
  SiteSpinor naive;
 | 
			
		||||
  int oneLink  =0;
 | 
			
		||||
  int threeLink=1;
 | 
			
		||||
  int dag=1;
 | 
			
		||||
  switch(Opt) {
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  //FIXME; move the sign into the Asm routine
 | 
			
		||||
  case OptInlineAsm:
 | 
			
		||||
    DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
 | 
			
		||||
    for(int s=0;s<LLs;s++) {
 | 
			
		||||
      int sF=s+LLs*sU;
 | 
			
		||||
      out._odata[sF]=-out._odata[sF];
 | 
			
		||||
    }
 | 
			
		||||
    break;
 | 
			
		||||
#endif
 | 
			
		||||
  case OptHandUnroll:
 | 
			
		||||
    DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
 | 
			
		||||
    break;
 | 
			
		||||
  case OptGeneric:
 | 
			
		||||
    for(int s=0;s<LLs;s++){
 | 
			
		||||
       int sF=s+LLs*sU;
 | 
			
		||||
       DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
 | 
			
		||||
       DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
 | 
			
		||||
       out._odata[sF] =-naive-naik; 
 | 
			
		||||
     }
 | 
			
		||||
    break;
 | 
			
		||||
  default:
 | 
			
		||||
    std::cout<<"Oops Opt = "<<Opt<<std::endl;
 | 
			
		||||
    assert(0);
 | 
			
		||||
    break;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
 | 
			
		||||
				      SiteSpinor *buf, int LLs,
 | 
			
		||||
				      int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
  int oneLink  =0;
 | 
			
		||||
  int threeLink=1;
 | 
			
		||||
  SiteSpinor naik;
 | 
			
		||||
  SiteSpinor naive;
 | 
			
		||||
  int dag=0;
 | 
			
		||||
  switch(Opt) {
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  case OptInlineAsm:
 | 
			
		||||
    DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
 | 
			
		||||
    break;
 | 
			
		||||
#endif
 | 
			
		||||
  case OptHandUnroll:
 | 
			
		||||
    DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
 | 
			
		||||
    break;
 | 
			
		||||
  case OptGeneric:
 | 
			
		||||
    for(int s=0;s<LLs;s++){
 | 
			
		||||
      int sF=LLs*sU+s;
 | 
			
		||||
      //      assert(sF<in._odata.size());
 | 
			
		||||
      //      assert(sU< U._odata.size());
 | 
			
		||||
      //      assert(sF>=0);      assert(sU>=0);
 | 
			
		||||
      DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
 | 
			
		||||
      DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
 | 
			
		||||
      out._odata[sF] =naive+naik;
 | 
			
		||||
    }
 | 
			
		||||
    break;
 | 
			
		||||
  default:
 | 
			
		||||
    std::cout<<"Oops Opt = "<<Opt<<std::endl;
 | 
			
		||||
    assert(0);
 | 
			
		||||
    break;
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopDir( StencilImpl &st, DoubledGaugeField &U,  DoubledGaugeField &UUU, SiteSpinor *buf, int sF,
 | 
			
		||||
				      int sU, const FermionField &in, FermionField &out, int dir, int disp) 
 | 
			
		||||
{
 | 
			
		||||
  // Disp should be either +1,-1,+3,-3
 | 
			
		||||
  // What about "dag" ?
 | 
			
		||||
  // Because we work out pU . dS/dU 
 | 
			
		||||
  // U
 | 
			
		||||
  assert(0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
FermOpStaggeredTemplateInstantiate(StaggeredKernels);
 | 
			
		||||
FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels);
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										83
									
								
								lib/qcd/action/fermion/StaggeredKernels.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								lib/qcd/action/fermion/StaggeredKernels.h
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,83 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
 | 
			
		||||
Source file: ./lib/qcd/action/fermion/StaggeredKernels.h
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Azusa Yamaguchi, Peter Boyle
 | 
			
		||||
 | 
			
		||||
This program is free software; you can redistribute it and/or modify
 | 
			
		||||
it under the terms of the GNU General Public License as published by
 | 
			
		||||
the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
(at your option) any later version.
 | 
			
		||||
 | 
			
		||||
This program is distributed in the hope that it will be useful,
 | 
			
		||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
You should have received a copy of the GNU General Public License along
 | 
			
		||||
with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#ifndef GRID_QCD_STAGGERED_KERNELS_H
 | 
			
		||||
#define GRID_QCD_STAGGERED_KERNELS_H
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
  // Helper routines that implement Staggered stencil for a single site.
 | 
			
		||||
  ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
class StaggeredKernelsStatic { 
 | 
			
		||||
 public:
 | 
			
		||||
  enum { OptGeneric, OptHandUnroll, OptInlineAsm };
 | 
			
		||||
  // S-direction is INNERMOST and takes no part in the parity.
 | 
			
		||||
  static int Opt;  // these are a temporary hack
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , public StaggeredKernelsStatic { 
 | 
			
		||||
 public:
 | 
			
		||||
   
 | 
			
		||||
  INHERIT_IMPL_TYPES(Impl);
 | 
			
		||||
  typedef FermionOperator<Impl> Base;
 | 
			
		||||
   
 | 
			
		||||
public:
 | 
			
		||||
    
 | 
			
		||||
   void DhopDir(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
 | 
			
		||||
		      int sF, int sU, const FermionField &in, FermionField &out, int dir,int disp);
 | 
			
		||||
 | 
			
		||||
   void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
 | 
			
		||||
		     int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
   void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
 | 
			
		||||
		     int sF, int sU, const FermionField &in, SiteSpinor&out,int threeLink);
 | 
			
		||||
 | 
			
		||||
   void DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf,
 | 
			
		||||
		     int LLs, int sU, const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
 | 
			
		||||
   void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf,
 | 
			
		||||
			 int LLs, int sU, const FermionField &in, FermionField &out);
 | 
			
		||||
      
 | 
			
		||||
   void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
 | 
			
		||||
		int sF, int sU, const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
   void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, 
 | 
			
		||||
                   int LLs, int sU, const FermionField &in, FermionField &out);
 | 
			
		||||
  
 | 
			
		||||
public:
 | 
			
		||||
 | 
			
		||||
  StaggeredKernels(const ImplParams &p = ImplParams());
 | 
			
		||||
 | 
			
		||||
};
 | 
			
		||||
    
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										920
									
								
								lib/qcd/action/fermion/StaggeredKernelsAsm.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										920
									
								
								lib/qcd/action/fermion/StaggeredKernelsAsm.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,920 @@
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid.h>
 | 
			
		||||
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
#include <simd/Intel512common.h>
 | 
			
		||||
#include <simd/Intel512avx.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
// Interleave operations from two directions
 | 
			
		||||
// This looks just like a 2 spin multiply and reuse same sequence from the Wilson
 | 
			
		||||
// Kernel. But the spin index becomes a mu index instead.
 | 
			
		||||
#define Chi_00 %zmm0
 | 
			
		||||
#define Chi_01 %zmm1
 | 
			
		||||
#define Chi_02 %zmm2
 | 
			
		||||
#define Chi_10 %zmm3
 | 
			
		||||
#define Chi_11 %zmm4
 | 
			
		||||
#define Chi_12 %zmm5
 | 
			
		||||
#define Chi_20 %zmm6
 | 
			
		||||
#define Chi_21 %zmm7
 | 
			
		||||
#define Chi_22 %zmm8
 | 
			
		||||
#define Chi_30 %zmm9
 | 
			
		||||
#define Chi_31 %zmm10
 | 
			
		||||
#define Chi_32 %zmm11
 | 
			
		||||
 | 
			
		||||
#define UChi_00 %zmm12
 | 
			
		||||
#define UChi_01 %zmm13
 | 
			
		||||
#define UChi_02 %zmm14
 | 
			
		||||
#define UChi_10 %zmm15
 | 
			
		||||
#define UChi_11 %zmm16
 | 
			
		||||
#define UChi_12 %zmm17
 | 
			
		||||
#define UChi_20 %zmm18
 | 
			
		||||
#define UChi_21 %zmm19
 | 
			
		||||
#define UChi_22 %zmm20
 | 
			
		||||
#define UChi_30 %zmm21
 | 
			
		||||
#define UChi_31 %zmm22
 | 
			
		||||
#define UChi_32 %zmm23
 | 
			
		||||
 | 
			
		||||
#define pChi_00 %%zmm0
 | 
			
		||||
#define pChi_01 %%zmm1
 | 
			
		||||
#define pChi_02 %%zmm2
 | 
			
		||||
#define pChi_10 %%zmm3
 | 
			
		||||
#define pChi_11 %%zmm4
 | 
			
		||||
#define pChi_12 %%zmm5
 | 
			
		||||
#define pChi_20 %%zmm6
 | 
			
		||||
#define pChi_21 %%zmm7
 | 
			
		||||
#define pChi_22 %%zmm8
 | 
			
		||||
#define pChi_30 %%zmm9
 | 
			
		||||
#define pChi_31 %%zmm10
 | 
			
		||||
#define pChi_32 %%zmm11
 | 
			
		||||
 | 
			
		||||
#define pUChi_00 %%zmm12
 | 
			
		||||
#define pUChi_01 %%zmm13
 | 
			
		||||
#define pUChi_02 %%zmm14
 | 
			
		||||
#define pUChi_10 %%zmm15
 | 
			
		||||
#define pUChi_11 %%zmm16
 | 
			
		||||
#define pUChi_12 %%zmm17
 | 
			
		||||
#define pUChi_20 %%zmm18
 | 
			
		||||
#define pUChi_21 %%zmm19
 | 
			
		||||
#define pUChi_22 %%zmm20
 | 
			
		||||
#define pUChi_30 %%zmm21
 | 
			
		||||
#define pUChi_31 %%zmm22
 | 
			
		||||
#define pUChi_32 %%zmm23
 | 
			
		||||
 | 
			
		||||
#define T0 %zmm24
 | 
			
		||||
#define T1 %zmm25
 | 
			
		||||
#define T2 %zmm26
 | 
			
		||||
#define T3 %zmm27
 | 
			
		||||
 | 
			
		||||
#define Z00 %zmm26
 | 
			
		||||
#define Z10 %zmm27
 | 
			
		||||
#define Z0 Z00
 | 
			
		||||
#define Z1 %zmm28
 | 
			
		||||
#define Z2 %zmm29
 | 
			
		||||
 | 
			
		||||
#define Z3 %zmm30
 | 
			
		||||
#define Z4 %zmm31
 | 
			
		||||
#define Z5 Chi_31
 | 
			
		||||
#define Z6 Chi_32
 | 
			
		||||
 | 
			
		||||
#define MULT_ADD_LS(g0,g1,g2,g3)					\
 | 
			
		||||
  asm ( "movq %0, %%r8 \n\t"					\
 | 
			
		||||
	"movq %1, %%r9 \n\t"						\
 | 
			
		||||
        "movq %2, %%r10 \n\t"						\
 | 
			
		||||
        "movq %3, %%r11 \n\t" :  : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\
 | 
			
		||||
  asm (									\
 | 
			
		||||
  VSHUF(Chi_00,T0)      VSHUF(Chi_10,T1)				\
 | 
			
		||||
  VSHUF(Chi_20,T2)      VSHUF(Chi_30,T3)				\
 | 
			
		||||
  VMADDSUBIDUP(0,%r8,T0,UChi_00) VMADDSUBIDUP(0,%r9,T1,UChi_10)		\
 | 
			
		||||
  VMADDSUBIDUP(3,%r8,T0,UChi_01) VMADDSUBIDUP(3,%r9,T1,UChi_11)		\
 | 
			
		||||
  VMADDSUBIDUP(6,%r8,T0,UChi_02) VMADDSUBIDUP(6,%r9,T1,UChi_12)		\
 | 
			
		||||
  VMADDSUBIDUP(0,%r10,T2,UChi_20) VMADDSUBIDUP(0,%r11,T3,UChi_30)		\
 | 
			
		||||
  VMADDSUBIDUP(3,%r10,T2,UChi_21) VMADDSUBIDUP(3,%r11,T3,UChi_31)		\
 | 
			
		||||
  VMADDSUBIDUP(6,%r10,T2,UChi_22) VMADDSUBIDUP(6,%r11,T3,UChi_32)		\
 | 
			
		||||
  VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \
 | 
			
		||||
  VSHUF(Chi_01,T0)	  VSHUF(Chi_11,T1)				\
 | 
			
		||||
  VSHUF(Chi_21,T2)	  VSHUF(Chi_31,T3)				\
 | 
			
		||||
  VMADDSUBIDUP(1,%r8,T0,UChi_00)     VMADDSUBIDUP(1,%r9,T1,UChi_10)	\
 | 
			
		||||
  VMADDSUBIDUP(4,%r8,T0,UChi_01)     VMADDSUBIDUP(4,%r9,T1,UChi_11)	\
 | 
			
		||||
  VMADDSUBIDUP(7,%r8,T0,UChi_02)     VMADDSUBIDUP(7,%r9,T1,UChi_12)	\
 | 
			
		||||
  VMADDSUBIDUP(1,%r10,T2,UChi_20)     VMADDSUBIDUP(1,%r11,T3,UChi_30)	\
 | 
			
		||||
  VMADDSUBIDUP(4,%r10,T2,UChi_21)     VMADDSUBIDUP(4,%r11,T3,UChi_31)	\
 | 
			
		||||
  VMADDSUBIDUP(7,%r10,T2,UChi_22)     VMADDSUBIDUP(7,%r11,T3,UChi_32)	\
 | 
			
		||||
  VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \
 | 
			
		||||
  VSHUF(Chi_02,T0)    VSHUF(Chi_12,T1)					\
 | 
			
		||||
  VSHUF(Chi_22,T2)    VSHUF(Chi_32,T3)					\
 | 
			
		||||
  VMADDSUBIDUP(2,%r8,T0,UChi_00)     VMADDSUBIDUP(2,%r9,T1,UChi_10)     \
 | 
			
		||||
  VMADDSUBIDUP(5,%r8,T0,UChi_01)     VMADDSUBIDUP(5,%r9,T1,UChi_11)     \
 | 
			
		||||
  VMADDSUBIDUP(8,%r8,T0,UChi_02)     VMADDSUBIDUP(8,%r9,T1,UChi_12)     \
 | 
			
		||||
  VMADDSUBIDUP(2,%r10,T2,UChi_20)     VMADDSUBIDUP(2,%r11,T3,UChi_30)     \
 | 
			
		||||
  VMADDSUBIDUP(5,%r10,T2,UChi_21)     VMADDSUBIDUP(5,%r11,T3,UChi_31)     \
 | 
			
		||||
  VMADDSUBIDUP(8,%r10,T2,UChi_22)     VMADDSUBIDUP(8,%r11,T3,UChi_32)     \
 | 
			
		||||
  VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) );
 | 
			
		||||
 | 
			
		||||
#define MULT_LS(g0,g1,g2,g3)					\
 | 
			
		||||
  asm ( "movq %0, %%r8 \n\t"					\
 | 
			
		||||
	"movq %1, %%r9 \n\t"						\
 | 
			
		||||
        "movq %2, %%r10 \n\t"						\
 | 
			
		||||
        "movq %3, %%r11 \n\t" :  : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\
 | 
			
		||||
  asm (									\
 | 
			
		||||
  VSHUF(Chi_00,T0)      VSHUF(Chi_10,T1)				\
 | 
			
		||||
  VSHUF(Chi_20,T2)      VSHUF(Chi_30,T3)				\
 | 
			
		||||
  VMULIDUP(0,%r8,T0,UChi_00) VMULIDUP(0,%r9,T1,UChi_10)		\
 | 
			
		||||
  VMULIDUP(3,%r8,T0,UChi_01) VMULIDUP(3,%r9,T1,UChi_11)		\
 | 
			
		||||
  VMULIDUP(6,%r8,T0,UChi_02) VMULIDUP(6,%r9,T1,UChi_12)		\
 | 
			
		||||
  VMULIDUP(0,%r10,T2,UChi_20) VMULIDUP(0,%r11,T3,UChi_30)		\
 | 
			
		||||
  VMULIDUP(3,%r10,T2,UChi_21) VMULIDUP(3,%r11,T3,UChi_31)		\
 | 
			
		||||
  VMULIDUP(6,%r10,T2,UChi_22) VMULIDUP(6,%r11,T3,UChi_32)		\
 | 
			
		||||
  VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \
 | 
			
		||||
  VSHUF(Chi_01,T0)	  VSHUF(Chi_11,T1)				\
 | 
			
		||||
  VSHUF(Chi_21,T2)	  VSHUF(Chi_31,T3)				\
 | 
			
		||||
  VMADDSUBIDUP(1,%r8,T0,UChi_00)     VMADDSUBIDUP(1,%r9,T1,UChi_10)	\
 | 
			
		||||
  VMADDSUBIDUP(4,%r8,T0,UChi_01)     VMADDSUBIDUP(4,%r9,T1,UChi_11)	\
 | 
			
		||||
  VMADDSUBIDUP(7,%r8,T0,UChi_02)     VMADDSUBIDUP(7,%r9,T1,UChi_12)	\
 | 
			
		||||
  VMADDSUBIDUP(1,%r10,T2,UChi_20)     VMADDSUBIDUP(1,%r11,T3,UChi_30)	\
 | 
			
		||||
  VMADDSUBIDUP(4,%r10,T2,UChi_21)     VMADDSUBIDUP(4,%r11,T3,UChi_31)	\
 | 
			
		||||
  VMADDSUBIDUP(7,%r10,T2,UChi_22)     VMADDSUBIDUP(7,%r11,T3,UChi_32)	\
 | 
			
		||||
  VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \
 | 
			
		||||
  VSHUF(Chi_02,T0)    VSHUF(Chi_12,T1)					\
 | 
			
		||||
  VSHUF(Chi_22,T2)    VSHUF(Chi_32,T3)					\
 | 
			
		||||
  VMADDSUBIDUP(2,%r8,T0,UChi_00)     VMADDSUBIDUP(2,%r9,T1,UChi_10)     \
 | 
			
		||||
  VMADDSUBIDUP(5,%r8,T0,UChi_01)     VMADDSUBIDUP(5,%r9,T1,UChi_11)     \
 | 
			
		||||
  VMADDSUBIDUP(8,%r8,T0,UChi_02)     VMADDSUBIDUP(8,%r9,T1,UChi_12)     \
 | 
			
		||||
  VMADDSUBIDUP(2,%r10,T2,UChi_20)     VMADDSUBIDUP(2,%r11,T3,UChi_30)     \
 | 
			
		||||
  VMADDSUBIDUP(5,%r10,T2,UChi_21)     VMADDSUBIDUP(5,%r11,T3,UChi_31)     \
 | 
			
		||||
  VMADDSUBIDUP(8,%r10,T2,UChi_22)     VMADDSUBIDUP(8,%r11,T3,UChi_32)     \
 | 
			
		||||
  VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \
 | 
			
		||||
  VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \
 | 
			
		||||
  VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \
 | 
			
		||||
  VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \
 | 
			
		||||
  VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \
 | 
			
		||||
  VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) );
 | 
			
		||||
 | 
			
		||||
#define MULT_ADD_XYZTa(g0,g1)					\
 | 
			
		||||
  asm ( "movq %0, %%r8 \n\t"					\
 | 
			
		||||
	"movq %1, %%r9 \n\t"	 :  : "r"(g0), "r"(g1) : "%r8","%r9");\
 | 
			
		||||
	   __asm__ (						\
 | 
			
		||||
	   VSHUF(Chi_00,T0)				\
 | 
			
		||||
	   VSHUF(Chi_10,T1)						\
 | 
			
		||||
	   VMOVIDUP(0,%r8,Z0 )						\
 | 
			
		||||
           VMOVIDUP(3,%r8,Z1 )						\
 | 
			
		||||
           VMOVIDUP(6,%r8,Z2 )						\
 | 
			
		||||
           VMADDSUB(Z0,T0,UChi_00)					\
 | 
			
		||||
	   VMADDSUB(Z1,T0,UChi_01)					\
 | 
			
		||||
	   VMADDSUB(Z2,T0,UChi_02)					\
 | 
			
		||||
									\
 | 
			
		||||
	   VMOVIDUP(0,%r9,Z0 )						\
 | 
			
		||||
           VMOVIDUP(3,%r9,Z1 )						\
 | 
			
		||||
           VMOVIDUP(6,%r9,Z2 )						\
 | 
			
		||||
           VMADDSUB(Z0,T1,UChi_10)					\
 | 
			
		||||
           VMADDSUB(Z1,T1,UChi_11)            \
 | 
			
		||||
           VMADDSUB(Z2,T1,UChi_12)            \
 | 
			
		||||
	   							\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVRDUP(0,%r8,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(3,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(6,%r8,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_00,UChi_00)/*rr * ir = ri rr*/	\
 | 
			
		||||
           VMADDSUB(Z4,Chi_00,UChi_01)				\
 | 
			
		||||
           VMADDSUB(Z5,Chi_00,UChi_02)				\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVRDUP(0,%r9,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(3,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(6,%r9,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_10,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_10,UChi_11)\
 | 
			
		||||
           VMADDSUB(Z5,Chi_10,UChi_12)				\
 | 
			
		||||
	   							\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVIDUP(1,%r8,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(4,%r8,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(7,%r8,Z2 )					\
 | 
			
		||||
	   VSHUF(Chi_01,T0)					\
 | 
			
		||||
           VMADDSUB(Z0,T0,UChi_00)				\
 | 
			
		||||
           VMADDSUB(Z1,T0,UChi_01)				\
 | 
			
		||||
           VMADDSUB(Z2,T0,UChi_02)				\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVIDUP(1,%r9,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(4,%r9,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(7,%r9,Z2 )					\
 | 
			
		||||
	   VSHUF(Chi_11,T1)					\
 | 
			
		||||
           VMADDSUB(Z0,T1,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z1,T1,UChi_11)				\
 | 
			
		||||
           VMADDSUB(Z2,T1,UChi_12)				\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVRDUP(1,%r8,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(4,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(7,%r8,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_01,UChi_00)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_01,UChi_01)				\
 | 
			
		||||
           VMADDSUB(Z5,Chi_01,UChi_02)				\
 | 
			
		||||
								\
 | 
			
		||||
	   VMOVRDUP(1,%r9,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(4,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(7,%r9,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_11,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_11,UChi_11)				\
 | 
			
		||||
           VMADDSUB(Z5,Chi_11,UChi_12)				\
 | 
			
		||||
	   							\
 | 
			
		||||
	   VSHUF(Chi_02,T0)					\
 | 
			
		||||
	   VSHUF(Chi_12,T1)					\
 | 
			
		||||
	   VMOVIDUP(2,%r8,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(5,%r8,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(8,%r8,Z2 )					\
 | 
			
		||||
           VMADDSUB(Z0,T0,UChi_00)				\
 | 
			
		||||
           VMADDSUB(Z1,T0,UChi_01)			      \
 | 
			
		||||
           VMADDSUB(Z2,T0,UChi_02)			      \
 | 
			
		||||
	   VMOVIDUP(2,%r9,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(5,%r9,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(8,%r9,Z2 )					\
 | 
			
		||||
           VMADDSUB(Z0,T1,UChi_10)			      \
 | 
			
		||||
           VMADDSUB(Z1,T1,UChi_11)			      \
 | 
			
		||||
           VMADDSUB(Z2,T1,UChi_12)			      \
 | 
			
		||||
	   /*55*/					      \
 | 
			
		||||
	   VMOVRDUP(2,%r8,Z3 )		  \
 | 
			
		||||
	   VMOVRDUP(5,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(8,%r8,Z5 )				      \
 | 
			
		||||
           VMADDSUB(Z3,Chi_02,UChi_00)			      \
 | 
			
		||||
           VMADDSUB(Z4,Chi_02,UChi_01)			      \
 | 
			
		||||
           VMADDSUB(Z5,Chi_02,UChi_02)			      \
 | 
			
		||||
	   VMOVRDUP(2,%r9,Z3 )		  \
 | 
			
		||||
	   VMOVRDUP(5,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(8,%r9,Z5 )				      \
 | 
			
		||||
           VMADDSUB(Z3,Chi_12,UChi_10)			      \
 | 
			
		||||
           VMADDSUB(Z4,Chi_12,UChi_11)			      \
 | 
			
		||||
           VMADDSUB(Z5,Chi_12,UChi_12)			      \
 | 
			
		||||
	   /*61 insns*/							);
 | 
			
		||||
 | 
			
		||||
#define MULT_ADD_XYZT(g0,g1)					\
 | 
			
		||||
  asm ( "movq %0, %%r8 \n\t"					\
 | 
			
		||||
	"movq %1, %%r9 \n\t"	 :  : "r"(g0), "r"(g1) : "%r8","%r9");\
 | 
			
		||||
  __asm__ (							  \
 | 
			
		||||
  VSHUFMEM(0,%r8,Z00)		   VSHUFMEM(0,%r9,Z10)			\
 | 
			
		||||
  VRDUP(Chi_00,T0)           VIDUP(Chi_00,Chi_00)	          \
 | 
			
		||||
   VRDUP(Chi_10,T1)           VIDUP(Chi_10,Chi_10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z1)        VMUL(Z10,Chi_10,Z2)		  \
 | 
			
		||||
   VSHUFMEM(3,%r8,Z00)	      VSHUFMEM(3,%r9,Z10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z3)        VMUL(Z10,Chi_10,Z4)		  \
 | 
			
		||||
   VSHUFMEM(6,%r8,Z00)	      VSHUFMEM(6,%r9,Z10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z5)        VMUL(Z10,Chi_10,Z6)		  \
 | 
			
		||||
   VMADDMEM(0,%r8,T0,UChi_00)  VMADDMEM(0,%r9,T1,UChi_10)		  \
 | 
			
		||||
   VMADDMEM(3,%r8,T0,UChi_01)  VMADDMEM(3,%r9,T1,UChi_11)		  \
 | 
			
		||||
   VMADDMEM(6,%r8,T0,UChi_02)  VMADDMEM(6,%r9,T1,UChi_12)		  \
 | 
			
		||||
   VSHUFMEM(1,%r8,Z00)	      VSHUFMEM(1,%r9,Z10)		  \
 | 
			
		||||
   VRDUP(Chi_01,T0)           VIDUP(Chi_01,Chi_01)		  \
 | 
			
		||||
   VRDUP(Chi_11,T1)           VIDUP(Chi_11,Chi_11)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z1)       VMADD(Z10,Chi_11,Z2)		  \
 | 
			
		||||
   VSHUFMEM(4,%r8,Z00)	      VSHUFMEM(4,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z3)       VMADD(Z10,Chi_11,Z4)		  \
 | 
			
		||||
   VSHUFMEM(7,%r8,Z00)	      VSHUFMEM(7,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z5)       VMADD(Z10,Chi_11,Z6)		  \
 | 
			
		||||
   VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10)	  \
 | 
			
		||||
   VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11)	  \
 | 
			
		||||
   VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12)	  \
 | 
			
		||||
   VSHUFMEM(2,%r8,Z00)	      VSHUFMEM(2,%r9,Z10)			\
 | 
			
		||||
   VRDUP(Chi_02,T0)           VIDUP(Chi_02,Chi_02)			\
 | 
			
		||||
   VRDUP(Chi_12,T1)           VIDUP(Chi_12,Chi_12)			\
 | 
			
		||||
   VMADD(Z00,Chi_02,Z1)       VMADD(Z10,Chi_12,Z2)		  \
 | 
			
		||||
   VSHUFMEM(5,%r8,Z00)	      VSHUFMEM(5,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_02,Z3)       VMADD(Z10,Chi_12,Z4)		  \
 | 
			
		||||
   VSHUFMEM(8,%r8,Z00)	      VSHUFMEM(8,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_02,Z5)       VMADD(Z10,Chi_12,Z6)		  \
 | 
			
		||||
   VMADDSUBMEM(2,%r8,T0,Z1)   VMADDSUBMEM(2,%r9,T1,Z2)		  \
 | 
			
		||||
   VMADDSUBMEM(5,%r8,T0,Z3)   VMADDSUBMEM(5,%r9,T1,Z4)	          \
 | 
			
		||||
   VMADDSUBMEM(8,%r8,T0,Z5)   VMADDSUBMEM(8,%r9,T1,Z6)	       \
 | 
			
		||||
   VADD(Z1,UChi_00,UChi_00)   VADD(Z2,UChi_10,UChi_10)	       \
 | 
			
		||||
   VADD(Z3,UChi_01,UChi_01)   VADD(Z4,UChi_11,UChi_11)	       \
 | 
			
		||||
   VADD(Z5,UChi_02,UChi_02)   VADD(Z6,UChi_12,UChi_12) );
 | 
			
		||||
 | 
			
		||||
#define MULT_XYZT(g0,g1)					\
 | 
			
		||||
    asm ( "movq %0, %%r8 \n\t"						\
 | 
			
		||||
	"movq %1, %%r9 \n\t" :  : "r"(g0), "r"(g1) : "%r8","%r9" ); \
 | 
			
		||||
	   __asm__ (						\
 | 
			
		||||
	   VSHUF(Chi_00,T0)				\
 | 
			
		||||
	   VSHUF(Chi_10,T1)						\
 | 
			
		||||
	   VMOVIDUP(0,%r8,Z0 )						\
 | 
			
		||||
           VMOVIDUP(3,%r8,Z1 )						\
 | 
			
		||||
           VMOVIDUP(6,%r8,Z2 )						\
 | 
			
		||||
	   /*6*/							\
 | 
			
		||||
           VMUL(Z0,T0,UChi_00)            \
 | 
			
		||||
           VMUL(Z1,T0,UChi_01)            \
 | 
			
		||||
           VMUL(Z2,T0,UChi_02)            \
 | 
			
		||||
	   VMOVIDUP(0,%r9,Z0 )						\
 | 
			
		||||
           VMOVIDUP(3,%r9,Z1 )						\
 | 
			
		||||
           VMOVIDUP(6,%r9,Z2 )						\
 | 
			
		||||
           VMUL(Z0,T1,UChi_10)            \
 | 
			
		||||
           VMUL(Z1,T1,UChi_11)            \
 | 
			
		||||
           VMUL(Z2,T1,UChi_12)            \
 | 
			
		||||
	   VMOVRDUP(0,%r8,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(3,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(6,%r8,Z5 )					\
 | 
			
		||||
	   /*18*/						\
 | 
			
		||||
           VMADDSUB(Z3,Chi_00,UChi_00)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_00,UChi_01)\
 | 
			
		||||
           VMADDSUB(Z5,Chi_00,UChi_02) \
 | 
			
		||||
	   VMOVRDUP(0,%r9,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(3,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(6,%r9,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_10,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_10,UChi_11)\
 | 
			
		||||
           VMADDSUB(Z5,Chi_10,UChi_12)				\
 | 
			
		||||
	   VMOVIDUP(1,%r8,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(4,%r8,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(7,%r8,Z2 )					\
 | 
			
		||||
	   /*28*/						\
 | 
			
		||||
	   VSHUF(Chi_01,T0)					\
 | 
			
		||||
           VMADDSUB(Z0,T0,UChi_00)      \
 | 
			
		||||
           VMADDSUB(Z1,T0,UChi_01)       \
 | 
			
		||||
           VMADDSUB(Z2,T0,UChi_02)        \
 | 
			
		||||
	   VMOVIDUP(1,%r9,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(4,%r9,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(7,%r9,Z2 )					\
 | 
			
		||||
	   VSHUF(Chi_11,T1)					\
 | 
			
		||||
           VMADDSUB(Z0,T1,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z1,T1,UChi_11)				\
 | 
			
		||||
           VMADDSUB(Z2,T1,UChi_12)        \
 | 
			
		||||
	   VMOVRDUP(1,%r8,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(4,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(7,%r8,Z5 )					\
 | 
			
		||||
           /*38*/						\
 | 
			
		||||
           VMADDSUB(Z3,Chi_01,UChi_00)    \
 | 
			
		||||
           VMADDSUB(Z4,Chi_01,UChi_01)    \
 | 
			
		||||
           VMADDSUB(Z5,Chi_01,UChi_02)    \
 | 
			
		||||
	   VMOVRDUP(1,%r9,Z3 )					\
 | 
			
		||||
	   VMOVRDUP(4,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(7,%r9,Z5 )					\
 | 
			
		||||
           VMADDSUB(Z3,Chi_11,UChi_10)				\
 | 
			
		||||
           VMADDSUB(Z4,Chi_11,UChi_11)    \
 | 
			
		||||
           VMADDSUB(Z5,Chi_11,UChi_12)				\
 | 
			
		||||
	   /*48*/						\
 | 
			
		||||
	   VSHUF(Chi_02,T0)					\
 | 
			
		||||
	   VSHUF(Chi_12,T1)					\
 | 
			
		||||
	   VMOVIDUP(2,%r8,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(5,%r8,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(8,%r8,Z2 )					\
 | 
			
		||||
           VMADDSUB(Z0,T0,UChi_00)				\
 | 
			
		||||
           VMADDSUB(Z1,T0,UChi_01)			      \
 | 
			
		||||
           VMADDSUB(Z2,T0,UChi_02)			      \
 | 
			
		||||
	   VMOVIDUP(2,%r9,Z0 )					\
 | 
			
		||||
	   VMOVIDUP(5,%r9,Z1 )					\
 | 
			
		||||
	   VMOVIDUP(8,%r9,Z2 )					\
 | 
			
		||||
           VMADDSUB(Z0,T1,UChi_10)			      \
 | 
			
		||||
           VMADDSUB(Z1,T1,UChi_11)			      \
 | 
			
		||||
           VMADDSUB(Z2,T1,UChi_12)			      \
 | 
			
		||||
	   /*55*/					      \
 | 
			
		||||
	   VMOVRDUP(2,%r8,Z3 )		  \
 | 
			
		||||
	   VMOVRDUP(5,%r8,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(8,%r8,Z5 )				      \
 | 
			
		||||
           VMADDSUB(Z3,Chi_02,UChi_00)			      \
 | 
			
		||||
           VMADDSUB(Z4,Chi_02,UChi_01)			      \
 | 
			
		||||
           VMADDSUB(Z5,Chi_02,UChi_02)			      \
 | 
			
		||||
	   VMOVRDUP(2,%r9,Z3 )		  \
 | 
			
		||||
	   VMOVRDUP(5,%r9,Z4 )					\
 | 
			
		||||
	   VMOVRDUP(8,%r9,Z5 )				      \
 | 
			
		||||
           VMADDSUB(Z3,Chi_12,UChi_10)			      \
 | 
			
		||||
           VMADDSUB(Z4,Chi_12,UChi_11)			      \
 | 
			
		||||
           VMADDSUB(Z5,Chi_12,UChi_12)			      \
 | 
			
		||||
	   /*61 insns*/							);
 | 
			
		||||
 | 
			
		||||
#define MULT_XYZTa(g0,g1)					\
 | 
			
		||||
  asm ( "movq %0, %%r8 \n\t"					\
 | 
			
		||||
	"movq %1, %%r9 \n\t" :  : "r"(g0), "r"(g1) : "%r8","%r9" ); \
 | 
			
		||||
  __asm__ (							  \
 | 
			
		||||
   VSHUFMEM(0,%r8,Z00)		   VSHUFMEM(0,%r9,Z10)	  \
 | 
			
		||||
   VRDUP(Chi_00,T0)           VIDUP(Chi_00,Chi_00)	          \
 | 
			
		||||
   VRDUP(Chi_10,T1)           VIDUP(Chi_10,Chi_10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z1)        VMUL(Z10,Chi_10,Z2)		  \
 | 
			
		||||
   VSHUFMEM(3,%r8,Z00)	      VSHUFMEM(3,%r9,Z10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z3)        VMUL(Z10,Chi_10,Z4)		  \
 | 
			
		||||
   VSHUFMEM(6,%r8,Z00)	      VSHUFMEM(6,%r9,Z10)		  \
 | 
			
		||||
   VMUL(Z00,Chi_00,Z5)        VMUL(Z10,Chi_10,Z6)		  \
 | 
			
		||||
   VMULMEM(0,%r8,T0,UChi_00)  VMULMEM(0,%r9,T1,UChi_10)		  \
 | 
			
		||||
   VMULMEM(3,%r8,T0,UChi_01)  VMULMEM(3,%r9,T1,UChi_11)		  \
 | 
			
		||||
   VMULMEM(6,%r8,T0,UChi_02)  VMULMEM(6,%r9,T1,UChi_12)		  \
 | 
			
		||||
   VSHUFMEM(1,%r8,Z00)	      VSHUFMEM(1,%r9,Z10)		  \
 | 
			
		||||
   VRDUP(Chi_01,T0)           VIDUP(Chi_01,Chi_01)		  \
 | 
			
		||||
   VRDUP(Chi_11,T1)           VIDUP(Chi_11,Chi_11)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z1)       VMADD(Z10,Chi_11,Z2)		  \
 | 
			
		||||
   VSHUFMEM(4,%r8,Z00)	      VSHUFMEM(4,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z3)       VMADD(Z10,Chi_11,Z4)		  \
 | 
			
		||||
   VSHUFMEM(7,%r8,Z00)	      VSHUFMEM(7,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_01,Z5)       VMADD(Z10,Chi_11,Z6)		  \
 | 
			
		||||
   VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10)	  \
 | 
			
		||||
   VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11)	  \
 | 
			
		||||
   VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12)	  \
 | 
			
		||||
   VSHUFMEM(2,%r8,Z00)	      VSHUFMEM(2,%r9,Z10)			\
 | 
			
		||||
   VRDUP(Chi_02,T0)           VIDUP(Chi_02,Chi_02)			\
 | 
			
		||||
   VRDUP(Chi_12,T1)           VIDUP(Chi_12,Chi_12)			\
 | 
			
		||||
   VMADD(Z00,Chi_02,Z1)       VMADD(Z10,Chi_12,Z2)		  \
 | 
			
		||||
   VSHUFMEM(5,%r8,Z00)	      VSHUFMEM(5,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_02,Z3)       VMADD(Z10,Chi_12,Z4)		  \
 | 
			
		||||
   VSHUFMEM(8,%r8,Z00)	      VSHUFMEM(8,%r9,Z10)		  \
 | 
			
		||||
   VMADD(Z00,Chi_02,Z5)       VMADD(Z10,Chi_12,Z6)		  \
 | 
			
		||||
   VMADDSUBMEM(2,%r8,T0,Z1)   VMADDSUBMEM(2,%r9,T1,Z2)		  \
 | 
			
		||||
   VMADDSUBMEM(5,%r8,T0,Z3)   VMADDSUBMEM(5,%r9,T1,Z4)	          \
 | 
			
		||||
   VMADDSUBMEM(8,%r8,T0,Z5)   VMADDSUBMEM(8,%r9,T1,Z6)	       \
 | 
			
		||||
   VADD(Z1,UChi_00,UChi_00)   VADD(Z2,UChi_10,UChi_10)	       \
 | 
			
		||||
   VADD(Z3,UChi_01,UChi_01)   VADD(Z4,UChi_11,UChi_11)	       \
 | 
			
		||||
   VADD(Z5,UChi_02,UChi_02)   VADD(Z6,UChi_12,UChi_12) );
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHI(a0,a1,a2,a3)						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_00)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_01)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_02)						\
 | 
			
		||||
       : : "r" (a0) : "%r8" );						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_10)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_11)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_12)						\
 | 
			
		||||
       : : "r" (a1) : "%r8" );						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_20)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_21)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_22)						\
 | 
			
		||||
       : : "r" (a2) : "%r8" );						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_30)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_31)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_32)						\
 | 
			
		||||
       : : "r" (a3) : "%r8" );						
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHIa(a0,a1)						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_00)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_01)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_02)						\
 | 
			
		||||
       : : "r" (a0) : "%r8" );						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VLOAD(0,%%r8,pChi_10)						\
 | 
			
		||||
       VLOAD(1,%%r8,pChi_11)						\
 | 
			
		||||
       VLOAD(2,%%r8,pChi_12)						\
 | 
			
		||||
       : : "r" (a1) : "%r8" );						
 | 
			
		||||
 | 
			
		||||
#define PF_CHI(a0)							
 | 
			
		||||
#define PF_CHIa(a0)							\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VPREFETCH1(0,%%r8)						\
 | 
			
		||||
       VPREFETCH1(1,%%r8)						\
 | 
			
		||||
       VPREFETCH1(2,%%r8)						\
 | 
			
		||||
       : : "r" (a0) : "%r8" );						\
 | 
			
		||||
 | 
			
		||||
#define PF_GAUGE_XYZT(a0)							
 | 
			
		||||
#define PF_GAUGE_XYZTa(a0)						\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VPREFETCH1(0,%%r8)						\
 | 
			
		||||
       VPREFETCH1(1,%%r8)						\
 | 
			
		||||
       VPREFETCH1(2,%%r8)						\
 | 
			
		||||
       VPREFETCH1(3,%%r8)						\
 | 
			
		||||
       VPREFETCH1(4,%%r8)						\
 | 
			
		||||
       VPREFETCH1(5,%%r8)						\
 | 
			
		||||
       VPREFETCH1(6,%%r8)						\
 | 
			
		||||
       VPREFETCH1(7,%%r8)						\
 | 
			
		||||
       VPREFETCH1(8,%%r8)						\
 | 
			
		||||
       : : "r" (a0) : "%r8" );						\
 | 
			
		||||
 | 
			
		||||
#define PF_GAUGE_LS(a0)							
 | 
			
		||||
#define PF_GAUGE_LSa(a0)							\
 | 
			
		||||
  asm (									\
 | 
			
		||||
       "movq %0, %%r8 \n\t"						\
 | 
			
		||||
       VPREFETCH1(0,%%r8)						\
 | 
			
		||||
       VPREFETCH1(1,%%r8)						\
 | 
			
		||||
       : : "r" (a0) : "%r8" );						\
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
#define REDUCE(out)					\
 | 
			
		||||
  asm (							\
 | 
			
		||||
  VADD(UChi_00,UChi_10,UChi_00)				\
 | 
			
		||||
  VADD(UChi_01,UChi_11,UChi_01)				\
 | 
			
		||||
  VADD(UChi_02,UChi_12,UChi_02)				\
 | 
			
		||||
  VADD(UChi_30,UChi_20,UChi_30)				\
 | 
			
		||||
  VADD(UChi_31,UChi_21,UChi_31)				\
 | 
			
		||||
  VADD(UChi_32,UChi_22,UChi_32)				\
 | 
			
		||||
  VADD(UChi_00,UChi_30,UChi_00)				\
 | 
			
		||||
  VADD(UChi_01,UChi_31,UChi_01)				\
 | 
			
		||||
  VADD(UChi_02,UChi_32,UChi_02)				);	\
 | 
			
		||||
  asm (								\
 | 
			
		||||
       VSTORE(0,%0,pUChi_00)					\
 | 
			
		||||
       VSTORE(1,%0,pUChi_01)					\
 | 
			
		||||
       VSTORE(2,%0,pUChi_02)					\
 | 
			
		||||
       : : "r" (out) : "memory" );
 | 
			
		||||
 | 
			
		||||
#define REDUCEa(out)					\
 | 
			
		||||
  asm (							\
 | 
			
		||||
  VADD(UChi_00,UChi_10,UChi_00)				\
 | 
			
		||||
  VADD(UChi_01,UChi_11,UChi_01)				\
 | 
			
		||||
  VADD(UChi_02,UChi_12,UChi_02)	);			\
 | 
			
		||||
  asm (							\
 | 
			
		||||
  VSTORE(0,%0,pUChi_00)					\
 | 
			
		||||
  VSTORE(1,%0,pUChi_01)					\
 | 
			
		||||
  VSTORE(2,%0,pUChi_02)					\
 | 
			
		||||
  : : "r" (out) : "memory" );
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR(dir)			\
 | 
			
		||||
      permute##dir(Chi_0,Chi_0);\
 | 
			
		||||
      permute##dir(Chi_1,Chi_1);\
 | 
			
		||||
      permute##dir(Chi_2,Chi_2);
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
					 DoubledGaugeField &U,
 | 
			
		||||
					 DoubledGaugeField &UUU,
 | 
			
		||||
					 SiteSpinor *buf, int LLs,
 | 
			
		||||
					 int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
  assert(0);
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
//#define CONDITIONAL_MOVE(l,o,out) if ( l ) { out = (uint64_t) &in._odata[o] ; } else { out =(uint64_t) &buf[o]; }
 | 
			
		||||
 | 
			
		||||
#define CONDITIONAL_MOVE(l,o,out) { const SiteSpinor *ptr = l? in_p : buf; out = (uint64_t) &ptr[o]; }
 | 
			
		||||
 | 
			
		||||
#define PREPARE_XYZT(X,Y,Z,T,skew,UU)			\
 | 
			
		||||
  PREPARE(X,Y,Z,T,skew,UU);				\
 | 
			
		||||
  PF_GAUGE_XYZT(gauge0);					\
 | 
			
		||||
  PF_GAUGE_XYZT(gauge1);					\
 | 
			
		||||
  PF_GAUGE_XYZT(gauge2);					\
 | 
			
		||||
  PF_GAUGE_XYZT(gauge3);					
 | 
			
		||||
 | 
			
		||||
#define PREPARE_LS(X,Y,Z,T,skew,UU)			\
 | 
			
		||||
  PREPARE(X,Y,Z,T,skew,UU);				\
 | 
			
		||||
  PF_GAUGE_LS(gauge0);					\
 | 
			
		||||
  PF_GAUGE_LS(gauge1);					\
 | 
			
		||||
  PF_GAUGE_LS(gauge2);					\
 | 
			
		||||
  PF_GAUGE_LS(gauge3);					
 | 
			
		||||
 | 
			
		||||
#define PREPARE(X,Y,Z,T,skew,UU)					\
 | 
			
		||||
  SE0=st.GetEntry(ptype,X+skew,sF);					\
 | 
			
		||||
  o0 = SE0->_offset;							\
 | 
			
		||||
  l0 = SE0->_is_local;							\
 | 
			
		||||
  p0 = SE0->_permute;							\
 | 
			
		||||
  CONDITIONAL_MOVE(l0,o0,addr0);					\
 | 
			
		||||
  PF_CHI(addr0);							\
 | 
			
		||||
  									\
 | 
			
		||||
  SE1=st.GetEntry(ptype,Y+skew,sF);					\
 | 
			
		||||
  o1 = SE1->_offset;							\
 | 
			
		||||
  l1 = SE1->_is_local;							\
 | 
			
		||||
  p1 = SE1->_permute;							\
 | 
			
		||||
  CONDITIONAL_MOVE(l1,o1,addr1);					\
 | 
			
		||||
  PF_CHI(addr1);							\
 | 
			
		||||
  									\
 | 
			
		||||
  SE2=st.GetEntry(ptype,Z+skew,sF);					\
 | 
			
		||||
  o2 = SE2->_offset;							\
 | 
			
		||||
  l2 = SE2->_is_local;							\
 | 
			
		||||
  p2 = SE2->_permute;							\
 | 
			
		||||
  CONDITIONAL_MOVE(l2,o2,addr2);					\
 | 
			
		||||
  PF_CHI(addr2);							\
 | 
			
		||||
  									\
 | 
			
		||||
  SE3=st.GetEntry(ptype,T+skew,sF);					\
 | 
			
		||||
  o3 = SE3->_offset;							\
 | 
			
		||||
  l3 = SE3->_is_local;							\
 | 
			
		||||
  p3 = SE3->_permute;							\
 | 
			
		||||
  CONDITIONAL_MOVE(l3,o3,addr3);					\
 | 
			
		||||
  PF_CHI(addr3);							\
 | 
			
		||||
  									\
 | 
			
		||||
  gauge0 =(uint64_t)&UU._odata[sU]( X );				\
 | 
			
		||||
  gauge1 =(uint64_t)&UU._odata[sU]( Y );				\
 | 
			
		||||
  gauge2 =(uint64_t)&UU._odata[sU]( Z );				\
 | 
			
		||||
  gauge3 =(uint64_t)&UU._odata[sU]( T ); 
 | 
			
		||||
  
 | 
			
		||||
  // This is the single precision 5th direction vectorised kernel
 | 
			
		||||
#include <simd/Intel512single.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
								    DoubledGaugeField &U,
 | 
			
		||||
								    DoubledGaugeField &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int LLs,
 | 
			
		||||
								    int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
  uint64_t addr0,addr1,addr2,addr3;
 | 
			
		||||
  const SiteSpinor *in_p; in_p = &in._odata[0];
 | 
			
		||||
 | 
			
		||||
  int o0,o1,o2,o3; // offsets
 | 
			
		||||
  int l0,l1,l2,l3; // local 
 | 
			
		||||
  int p0,p1,p2,p3; // perm
 | 
			
		||||
  int ptype;
 | 
			
		||||
  StencilEntry *SE0;
 | 
			
		||||
  StencilEntry *SE1;
 | 
			
		||||
  StencilEntry *SE2;
 | 
			
		||||
  StencilEntry *SE3;
 | 
			
		||||
 | 
			
		||||
   for(int s=0;s<LLs;s++){
 | 
			
		||||
 | 
			
		||||
    int sF=s+LLs*sU;
 | 
			
		||||
    // Xp, Yp, Zp, Tp
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,0,U);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_LS(gauge0,gauge1,gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,0,U);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,8,UUU);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,8,UUU);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
 | 
			
		||||
 | 
			
		||||
    addr0 = (uint64_t) &out._odata[sF];
 | 
			
		||||
    REDUCE(addr0);
 | 
			
		||||
   }
 | 
			
		||||
#else 
 | 
			
		||||
    assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
   
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include <simd/Intel512double.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
								    DoubledGaugeField &U,
 | 
			
		||||
								    DoubledGaugeField &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int LLs,
 | 
			
		||||
								    int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
  uint64_t addr0,addr1,addr2,addr3;
 | 
			
		||||
  const SiteSpinor *in_p; in_p = &in._odata[0];
 | 
			
		||||
 | 
			
		||||
  int o0,o1,o2,o3; // offsets
 | 
			
		||||
  int l0,l1,l2,l3; // local 
 | 
			
		||||
  int p0,p1,p2,p3; // perm
 | 
			
		||||
  int ptype;
 | 
			
		||||
  StencilEntry *SE0;
 | 
			
		||||
  StencilEntry *SE1;
 | 
			
		||||
  StencilEntry *SE2;
 | 
			
		||||
  StencilEntry *SE3;
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<LLs;s++){
 | 
			
		||||
    int sF=s+LLs*sU;
 | 
			
		||||
    // Xp, Yp, Zp, Tp
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,0,U);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_LS(gauge0,gauge1,gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,0,U);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,8,UUU);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,8,UUU);
 | 
			
		||||
    LOAD_CHI(addr0,addr1,addr2,addr3);
 | 
			
		||||
    MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
 | 
			
		||||
 | 
			
		||||
    addr0 = (uint64_t) &out._odata[sF];
 | 
			
		||||
    REDUCE(addr0);
 | 
			
		||||
  }
 | 
			
		||||
#else 
 | 
			
		||||
  assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
   
 | 
			
		||||
   
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR3 __asm__ (	\
 | 
			
		||||
  VPERM3(Chi_00,Chi_00)	\
 | 
			
		||||
  VPERM3(Chi_01,Chi_01)	\
 | 
			
		||||
  VPERM3(Chi_02,Chi_02)	);
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR2 __asm__ (	\
 | 
			
		||||
  VPERM2(Chi_10,Chi_10)	\
 | 
			
		||||
  VPERM2(Chi_11,Chi_11)	\
 | 
			
		||||
  VPERM2(Chi_12,Chi_12) );
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR1 __asm__ (	\
 | 
			
		||||
  VPERM1(Chi_00,Chi_00)	\
 | 
			
		||||
  VPERM1(Chi_01,Chi_01)	\
 | 
			
		||||
  VPERM1(Chi_02,Chi_02)	);
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR0 __asm__ (			\
 | 
			
		||||
  VPERM0(Chi_10,Chi_10)	\
 | 
			
		||||
  VPERM0(Chi_11,Chi_11)	\
 | 
			
		||||
  VPERM0(Chi_12,Chi_12) );
 | 
			
		||||
 | 
			
		||||
#define PERMUTE01 \
 | 
			
		||||
  if ( p0 ) { PERMUTE_DIR3; }\
 | 
			
		||||
  if ( p1 ) { PERMUTE_DIR2; }
 | 
			
		||||
 | 
			
		||||
#define PERMUTE23 \
 | 
			
		||||
  if ( p2 ) { PERMUTE_DIR1; }\
 | 
			
		||||
  if ( p3 ) { PERMUTE_DIR0; }
 | 
			
		||||
 | 
			
		||||
  // This is the single precision 5th direction vectorised kernel
 | 
			
		||||
 | 
			
		||||
#include <simd/Intel512single.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
								    DoubledGaugeField &U,
 | 
			
		||||
								    DoubledGaugeField &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int LLs,
 | 
			
		||||
								    int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
  uint64_t addr0,addr1,addr2,addr3;
 | 
			
		||||
  const SiteSpinor *in_p; in_p = &in._odata[0];
 | 
			
		||||
 | 
			
		||||
  int o0,o1,o2,o3; // offsets
 | 
			
		||||
  int l0,l1,l2,l3; // local 
 | 
			
		||||
  int p0,p1,p2,p3; // perm
 | 
			
		||||
  int ptype;
 | 
			
		||||
  StencilEntry *SE0;
 | 
			
		||||
  StencilEntry *SE1;
 | 
			
		||||
  StencilEntry *SE2;
 | 
			
		||||
  StencilEntry *SE3;
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<LLs;s++){
 | 
			
		||||
    
 | 
			
		||||
    int sF=s+LLs*sU;
 | 
			
		||||
    // Xp, Yp, Zp, Tp
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,0,U);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,0,U);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,8,UUU);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
    
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,8,UUU);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
 | 
			
		||||
    addr0 = (uint64_t) &out._odata[sF];
 | 
			
		||||
    REDUCEa(addr0);
 | 
			
		||||
  }
 | 
			
		||||
#else 
 | 
			
		||||
  assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#include <simd/Intel512double.h>
 | 
			
		||||
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, 
 | 
			
		||||
								    DoubledGaugeField &U,
 | 
			
		||||
								    DoubledGaugeField &UUU,
 | 
			
		||||
								    SiteSpinor *buf, int LLs,
 | 
			
		||||
								    int sU, const FermionField &in, FermionField &out) 
 | 
			
		||||
{
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  uint64_t gauge0,gauge1,gauge2,gauge3;
 | 
			
		||||
  uint64_t addr0,addr1,addr2,addr3;
 | 
			
		||||
  const SiteSpinor *in_p; in_p = &in._odata[0];
 | 
			
		||||
 | 
			
		||||
  int o0,o1,o2,o3; // offsets
 | 
			
		||||
  int l0,l1,l2,l3; // local 
 | 
			
		||||
  int p0,p1,p2,p3; // perm
 | 
			
		||||
  int ptype;
 | 
			
		||||
  StencilEntry *SE0;
 | 
			
		||||
  StencilEntry *SE1;
 | 
			
		||||
  StencilEntry *SE2;
 | 
			
		||||
  StencilEntry *SE3;
 | 
			
		||||
 | 
			
		||||
  for(int s=0;s<LLs;s++){
 | 
			
		||||
    
 | 
			
		||||
    int sF=s+LLs*sU;
 | 
			
		||||
    // Xp, Yp, Zp, Tp
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,0,U);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
    
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,0,U);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
    
 | 
			
		||||
    PREPARE(Xp,Yp,Zp,Tp,8,UUU);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
    
 | 
			
		||||
    PREPARE(Xm,Ym,Zm,Tm,8,UUU);
 | 
			
		||||
    LOAD_CHIa(addr0,addr1);
 | 
			
		||||
    PERMUTE01;
 | 
			
		||||
    MULT_ADD_XYZT(gauge0,gauge1);
 | 
			
		||||
    LOAD_CHIa(addr2,addr3);
 | 
			
		||||
    PERMUTE23;
 | 
			
		||||
    MULT_ADD_XYZT(gauge2,gauge3);  
 | 
			
		||||
    
 | 
			
		||||
    addr0 = (uint64_t) &out._odata[sF];
 | 
			
		||||
    REDUCEa(addr0);
 | 
			
		||||
  }
 | 
			
		||||
#else 
 | 
			
		||||
  assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define KERNEL_INSTANTIATE(CLASS,FUNC,IMPL)			    \
 | 
			
		||||
  template void CLASS<IMPL>::FUNC(StencilImpl &st, LebesgueOrder &lo,	\
 | 
			
		||||
				  DoubledGaugeField &U,			\
 | 
			
		||||
				  DoubledGaugeField &UUU,		\
 | 
			
		||||
				  SiteSpinor *buf, int LLs,		\
 | 
			
		||||
				  int sU, const FermionField &in, FermionField &out);
 | 
			
		||||
 | 
			
		||||
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplD);
 | 
			
		||||
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplF);
 | 
			
		||||
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredVec5dImplD);
 | 
			
		||||
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredVec5dImplF);
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										322
									
								
								lib/qcd/action/fermion/StaggeredKernelsHand.cc
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										322
									
								
								lib/qcd/action/fermion/StaggeredKernelsHand.cc
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,322 @@
 | 
			
		||||
    /*************************************************************************************
 | 
			
		||||
 | 
			
		||||
    Grid physics library, www.github.com/paboyle/Grid 
 | 
			
		||||
 | 
			
		||||
    Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc
 | 
			
		||||
 | 
			
		||||
    Copyright (C) 2015
 | 
			
		||||
 | 
			
		||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
 | 
			
		||||
    This program is free software; you can redistribute it and/or modify
 | 
			
		||||
    it under the terms of the GNU General Public License as published by
 | 
			
		||||
    the Free Software Foundation; either version 2 of the License, or
 | 
			
		||||
    (at your option) any later version.
 | 
			
		||||
 | 
			
		||||
    This program is distributed in the hope that it will be useful,
 | 
			
		||||
    but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
    GNU General Public License for more details.
 | 
			
		||||
 | 
			
		||||
    You should have received a copy of the GNU General Public License along
 | 
			
		||||
    with this program; if not, write to the Free Software Foundation, Inc.,
 | 
			
		||||
    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 | 
			
		||||
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid.h>
 | 
			
		||||
 | 
			
		||||
#define REGISTER
 | 
			
		||||
 | 
			
		||||
#define LOAD_CHI(b)		\
 | 
			
		||||
  const SiteSpinor & ref (b[offset]);	\
 | 
			
		||||
    Chi_0=ref()()(0);\
 | 
			
		||||
    Chi_1=ref()()(1);\
 | 
			
		||||
    Chi_2=ref()()(2);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
// To splat or not to splat depends on the implementation
 | 
			
		||||
#define MULT(A,UChi)				\
 | 
			
		||||
  auto & ref(U._odata[sU](A));			\
 | 
			
		||||
   Impl::loadLinkElement(U_00,ref()(0,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_10,ref()(1,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_20,ref()(2,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_01,ref()(0,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_11,ref()(1,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_21,ref()(2,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_02,ref()(0,2));     \
 | 
			
		||||
   Impl::loadLinkElement(U_12,ref()(1,2));     \
 | 
			
		||||
   Impl::loadLinkElement(U_22,ref()(2,2));     \
 | 
			
		||||
    UChi ## _0  = U_00*Chi_0;	       \
 | 
			
		||||
    UChi ## _1  = U_10*Chi_0;\
 | 
			
		||||
    UChi ## _2  = U_20*Chi_0;\
 | 
			
		||||
    UChi ## _0 += U_01*Chi_1;\
 | 
			
		||||
    UChi ## _1 += U_11*Chi_1;\
 | 
			
		||||
    UChi ## _2 += U_21*Chi_1;\
 | 
			
		||||
    UChi ## _0 += U_02*Chi_2;\
 | 
			
		||||
    UChi ## _1 += U_12*Chi_2;\
 | 
			
		||||
    UChi ## _2 += U_22*Chi_2;
 | 
			
		||||
 | 
			
		||||
#define MULT_ADD(A,UChi)				\
 | 
			
		||||
  auto & ref(U._odata[sU](A));			\
 | 
			
		||||
   Impl::loadLinkElement(U_00,ref()(0,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_10,ref()(1,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_20,ref()(2,0));      \
 | 
			
		||||
   Impl::loadLinkElement(U_01,ref()(0,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_11,ref()(1,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_21,ref()(2,1));      \
 | 
			
		||||
   Impl::loadLinkElement(U_02,ref()(0,2));     \
 | 
			
		||||
   Impl::loadLinkElement(U_12,ref()(1,2));     \
 | 
			
		||||
   Impl::loadLinkElement(U_22,ref()(2,2));     \
 | 
			
		||||
    UChi ## _0 += U_00*Chi_0;	       \
 | 
			
		||||
    UChi ## _1 += U_10*Chi_0;\
 | 
			
		||||
    UChi ## _2 += U_20*Chi_0;\
 | 
			
		||||
    UChi ## _0 += U_01*Chi_1;\
 | 
			
		||||
    UChi ## _1 += U_11*Chi_1;\
 | 
			
		||||
    UChi ## _2 += U_21*Chi_1;\
 | 
			
		||||
    UChi ## _0 += U_02*Chi_2;\
 | 
			
		||||
    UChi ## _1 += U_12*Chi_2;\
 | 
			
		||||
    UChi ## _2 += U_22*Chi_2;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define PERMUTE_DIR(dir)			\
 | 
			
		||||
      permute##dir(Chi_0,Chi_0);\
 | 
			
		||||
      permute##dir(Chi_1,Chi_1);\
 | 
			
		||||
      permute##dir(Chi_2,Chi_2);
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
 | 
			
		||||
					  SiteSpinor *buf, int LLs,
 | 
			
		||||
					  int sU, const FermionField &in, FermionField &out, int dag) 
 | 
			
		||||
{
 | 
			
		||||
  SiteSpinor naik; 
 | 
			
		||||
  SiteSpinor naive;
 | 
			
		||||
  int oneLink  =0;
 | 
			
		||||
  int threeLink=1;
 | 
			
		||||
  int skew(0);
 | 
			
		||||
  Real scale(1.0);
 | 
			
		||||
  
 | 
			
		||||
  if(dag) scale = -1.0;
 | 
			
		||||
  
 | 
			
		||||
  for(int s=0;s<LLs;s++){
 | 
			
		||||
    int sF=s+LLs*sU;
 | 
			
		||||
    DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink);
 | 
			
		||||
    DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
 | 
			
		||||
    out._odata[sF] =scale*(naive+naik);
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void StaggeredKernels<Impl>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
 | 
			
		||||
					       SiteSpinor *buf, int sF,
 | 
			
		||||
					       int sU, const FermionField &in, SiteSpinor &out,int threeLink) 
 | 
			
		||||
{
 | 
			
		||||
  typedef typename Simd::scalar_type S;
 | 
			
		||||
  typedef typename Simd::vector_type V;
 | 
			
		||||
 | 
			
		||||
  REGISTER Simd even_0; // 12 regs on knc
 | 
			
		||||
  REGISTER Simd even_1;
 | 
			
		||||
  REGISTER Simd even_2;
 | 
			
		||||
  REGISTER Simd odd_0; // 12 regs on knc
 | 
			
		||||
  REGISTER Simd odd_1;
 | 
			
		||||
  REGISTER Simd odd_2;
 | 
			
		||||
 | 
			
		||||
  REGISTER Simd Chi_0;    // two spinor; 6 regs
 | 
			
		||||
  REGISTER Simd Chi_1;
 | 
			
		||||
  REGISTER Simd Chi_2;
 | 
			
		||||
 | 
			
		||||
  REGISTER Simd U_00;  // two rows of U matrix
 | 
			
		||||
  REGISTER Simd U_10;
 | 
			
		||||
  REGISTER Simd U_20;  
 | 
			
		||||
  REGISTER Simd U_01;
 | 
			
		||||
  REGISTER Simd U_11;
 | 
			
		||||
  REGISTER Simd U_21;  // 2 reg left.
 | 
			
		||||
  REGISTER Simd U_02;
 | 
			
		||||
  REGISTER Simd U_12;
 | 
			
		||||
  REGISTER Simd U_22; 
 | 
			
		||||
 | 
			
		||||
  int skew = 0;
 | 
			
		||||
  if (threeLink) skew=8;
 | 
			
		||||
 | 
			
		||||
  int offset,local,perm, ptype;
 | 
			
		||||
  StencilEntry *SE;
 | 
			
		||||
 | 
			
		||||
  // Xp
 | 
			
		||||
  SE=st.GetEntry(ptype,Xp+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT(Xp,even);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Yp
 | 
			
		||||
  SE=st.GetEntry(ptype,Yp+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT(Yp,odd);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
  // Zp
 | 
			
		||||
  SE=st.GetEntry(ptype,Zp+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Zp,even);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Tp
 | 
			
		||||
  SE=st.GetEntry(ptype,Tp+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Tp,odd);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  // Xm
 | 
			
		||||
  SE=st.GetEntry(ptype,Xm+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Xm,even);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  
 | 
			
		||||
  // Ym
 | 
			
		||||
  SE=st.GetEntry(ptype,Ym+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Ym,odd);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Zm
 | 
			
		||||
  SE=st.GetEntry(ptype,Zm+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Zm,even);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  // Tm
 | 
			
		||||
  SE=st.GetEntry(ptype,Tm+skew,sF);
 | 
			
		||||
  offset = SE->_offset;
 | 
			
		||||
  local  = SE->_is_local;
 | 
			
		||||
  perm   = SE->_permute;
 | 
			
		||||
  
 | 
			
		||||
  if ( local ) {
 | 
			
		||||
    LOAD_CHI(in._odata);
 | 
			
		||||
    if ( perm) {
 | 
			
		||||
      PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
 | 
			
		||||
    }
 | 
			
		||||
  } else { 
 | 
			
		||||
    LOAD_CHI(buf);
 | 
			
		||||
  }
 | 
			
		||||
  {
 | 
			
		||||
    MULT_ADD(Tm,odd);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  vstream(out()()(0),even_0+odd_0);
 | 
			
		||||
  vstream(out()()(1),even_1+odd_1);
 | 
			
		||||
  vstream(out()()(2),even_2+odd_2);
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define DHOP_SITE_HAND_INSTANTIATE(IMPL)				\
 | 
			
		||||
  template void StaggeredKernels<IMPL>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, \
 | 
			
		||||
						     DoubledGaugeField &U,DoubledGaugeField &UUU, \
 | 
			
		||||
						     SiteSpinor *buf, int LLs, \
 | 
			
		||||
						     int sU, const FermionField &in, FermionField &out, int dag);
 | 
			
		||||
 | 
			
		||||
#define DHOP_SITE_DEPTH_HAND_INSTANTIATE(IMPL)				\
 | 
			
		||||
  template void StaggeredKernels<IMPL>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, \
 | 
			
		||||
							  SiteSpinor *buf, int sF, \
 | 
			
		||||
							  int sU, const FermionField &in, SiteSpinor &out,int threeLink) ;
 | 
			
		||||
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplD);
 | 
			
		||||
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplF);
 | 
			
		||||
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplD);
 | 
			
		||||
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplF);
 | 
			
		||||
 | 
			
		||||
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplD);
 | 
			
		||||
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplF);
 | 
			
		||||
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplD);
 | 
			
		||||
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplF);
 | 
			
		||||
 | 
			
		||||
}}
 | 
			
		||||
@@ -171,6 +171,8 @@ namespace QCD {
 | 
			
		||||
  class WilsonStencil : public CartesianStencil<vobj,cobj> {
 | 
			
		||||
  public:
 | 
			
		||||
 | 
			
		||||
    typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
 | 
			
		||||
 | 
			
		||||
    WilsonStencil(GridBase *grid,
 | 
			
		||||
		int npoints,
 | 
			
		||||
		int checkerboard,
 | 
			
		||||
@@ -179,78 +181,77 @@ namespace QCD {
 | 
			
		||||
      {    };
 | 
			
		||||
 | 
			
		||||
    template < class compressor>
 | 
			
		||||
    std::thread HaloExchangeOptBegin(const Lattice<vobj> &source,compressor &compress) {
 | 
			
		||||
      this->Mergers.resize(0); 
 | 
			
		||||
      this->Packets.resize(0);
 | 
			
		||||
      this->HaloGatherOpt(source,compress);
 | 
			
		||||
      return std::thread([&] { this->Communicate(); });
 | 
			
		||||
    void HaloExchangeOpt(const Lattice<vobj> &source,compressor &compress) 
 | 
			
		||||
    {
 | 
			
		||||
      std::vector<std::vector<CommsRequest_t> > reqs;
 | 
			
		||||
      HaloExchangeOptGather(source,compress);
 | 
			
		||||
      this->CommunicateBegin(reqs);
 | 
			
		||||
      this->calls++;
 | 
			
		||||
      this->CommunicateComplete(reqs);
 | 
			
		||||
      this->CommsMerge();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template < class compressor>
 | 
			
		||||
    void HaloExchangeOpt(const Lattice<vobj> &source,compressor &compress) 
 | 
			
		||||
    void HaloExchangeOptGather(const Lattice<vobj> &source,compressor &compress) 
 | 
			
		||||
    {
 | 
			
		||||
      auto thr = this->HaloExchangeOptBegin(source,compress);
 | 
			
		||||
      this->HaloExchangeOptComplete(thr);
 | 
			
		||||
      this->calls++;
 | 
			
		||||
      this->Mergers.resize(0); 
 | 
			
		||||
      this->Packets.resize(0);
 | 
			
		||||
      this->HaloGatherOpt(source,compress);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void HaloExchangeOptComplete(std::thread &thr) 
 | 
			
		||||
    {
 | 
			
		||||
	this->CommsMerge(); // spins
 | 
			
		||||
	this->jointime-=usecond();
 | 
			
		||||
	thr.join();
 | 
			
		||||
	this->jointime+=usecond();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    template < class compressor>
 | 
			
		||||
    void HaloGatherOpt(const Lattice<vobj> &source,compressor &compress)
 | 
			
		||||
    {
 | 
			
		||||
	// conformable(source._grid,_grid);
 | 
			
		||||
	assert(source._grid==this->_grid);
 | 
			
		||||
	this->halogtime-=usecond();
 | 
			
		||||
      this->_grid->StencilBarrier();
 | 
			
		||||
      // conformable(source._grid,_grid);
 | 
			
		||||
      assert(source._grid==this->_grid);
 | 
			
		||||
      this->halogtime-=usecond();
 | 
			
		||||
      
 | 
			
		||||
      this->u_comm_offset=0;
 | 
			
		||||
      
 | 
			
		||||
      int dag = compress.dag;
 | 
			
		||||
      
 | 
			
		||||
      WilsonXpCompressor<cobj,vobj> XpCompress; 
 | 
			
		||||
      WilsonYpCompressor<cobj,vobj> YpCompress; 
 | 
			
		||||
      WilsonZpCompressor<cobj,vobj> ZpCompress; 
 | 
			
		||||
      WilsonTpCompressor<cobj,vobj> TpCompress;
 | 
			
		||||
      WilsonXmCompressor<cobj,vobj> XmCompress;
 | 
			
		||||
      WilsonYmCompressor<cobj,vobj> YmCompress;
 | 
			
		||||
      WilsonZmCompressor<cobj,vobj> ZmCompress;
 | 
			
		||||
      WilsonTmCompressor<cobj,vobj> TmCompress;
 | 
			
		||||
 | 
			
		||||
	assert (this->comm_buf.size() == this->_unified_buffer_size );
 | 
			
		||||
	this->u_comm_offset=0;
 | 
			
		||||
 | 
			
		||||
	int dag = compress.dag;
 | 
			
		||||
	static std::vector<int> dirs(Nd*2);
 | 
			
		||||
	for(int mu=0;mu<Nd;mu++){
 | 
			
		||||
	  if ( dag ) {
 | 
			
		||||
	    dirs[mu]  =mu;
 | 
			
		||||
	    dirs[mu+4]=mu+Nd;
 | 
			
		||||
	  } else { 
 | 
			
		||||
	    dirs[mu]  =mu+Nd;
 | 
			
		||||
	    dirs[mu+Nd]=mu;
 | 
			
		||||
	  }
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
	WilsonXpCompressor<cobj,vobj> XpCompress;
 | 
			
		||||
	this->HaloGatherDir(source,XpCompress,dirs[0]);
 | 
			
		||||
 | 
			
		||||
	WilsonYpCompressor<cobj,vobj> YpCompress;
 | 
			
		||||
	this->HaloGatherDir(source,YpCompress,dirs[1]);
 | 
			
		||||
 | 
			
		||||
	WilsonZpCompressor<cobj,vobj> ZpCompress;
 | 
			
		||||
	this->HaloGatherDir(source,ZpCompress,dirs[2]);
 | 
			
		||||
 | 
			
		||||
	WilsonTpCompressor<cobj,vobj> TpCompress;
 | 
			
		||||
	this->HaloGatherDir(source,TpCompress,dirs[3]);
 | 
			
		||||
 | 
			
		||||
	WilsonXmCompressor<cobj,vobj> XmCompress;
 | 
			
		||||
	this->HaloGatherDir(source,XmCompress,dirs[4]);
 | 
			
		||||
 | 
			
		||||
	WilsonYmCompressor<cobj,vobj> YmCompress;
 | 
			
		||||
	this->HaloGatherDir(source,YmCompress,dirs[5]);
 | 
			
		||||
 | 
			
		||||
	WilsonZmCompressor<cobj,vobj> ZmCompress;
 | 
			
		||||
	this->HaloGatherDir(source,ZmCompress,dirs[6]);
 | 
			
		||||
 | 
			
		||||
	WilsonTmCompressor<cobj,vobj> TmCompress;
 | 
			
		||||
	this->HaloGatherDir(source,TmCompress,dirs[7]);
 | 
			
		||||
 | 
			
		||||
	assert(this->u_comm_offset==this->_unified_buffer_size);
 | 
			
		||||
	this->halogtime+=usecond();
 | 
			
		||||
      // Gather all comms buffers
 | 
			
		||||
      //    for(int point = 0 ; point < _npoints; point++) {
 | 
			
		||||
      //      compress.Point(point);
 | 
			
		||||
      //      HaloGatherDir(source,compress,point,face_idx);
 | 
			
		||||
      //    }
 | 
			
		||||
      int face_idx=0;
 | 
			
		||||
      if ( dag ) { 
 | 
			
		||||
	//	std::cout << " Optimised Dagger compress " <<std::endl;
 | 
			
		||||
	this->HaloGatherDir(source,XpCompress,Xp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,YpCompress,Yp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,ZpCompress,Zp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,TpCompress,Tp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,XmCompress,Xm,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,YmCompress,Ym,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,ZmCompress,Zm,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,TmCompress,Tm,face_idx);
 | 
			
		||||
      } else {
 | 
			
		||||
	this->HaloGatherDir(source,XmCompress,Xp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,YmCompress,Yp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,ZmCompress,Zp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,TmCompress,Tp,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,XpCompress,Xm,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,YpCompress,Ym,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,ZpCompress,Zm,face_idx);
 | 
			
		||||
	this->HaloGatherDir(source,TpCompress,Tm,face_idx);
 | 
			
		||||
      }
 | 
			
		||||
      this->face_table_computed=1;
 | 
			
		||||
      assert(this->u_comm_offset==this->_unified_buffer_size);
 | 
			
		||||
      this->halogtime+=usecond();
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,4 @@
 | 
			
		||||
 | 
			
		||||
/*************************************************************************************
 | 
			
		||||
 | 
			
		||||
Grid physics library, www.github.com/paboyle/Grid
 | 
			
		||||
@@ -29,15 +30,14 @@ See the full license in the file "LICENSE" in the top level distribution
 | 
			
		||||
directory
 | 
			
		||||
*************************************************************************************/
 | 
			
		||||
/*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
 | 
			
		||||
const std::vector<int> WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2,
 | 
			
		||||
                                                        3});
 | 
			
		||||
const std::vector<int> WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1,
 | 
			
		||||
                                                           -1, -1});
 | 
			
		||||
const std::vector<int> WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3});
 | 
			
		||||
const std::vector<int> WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1});
 | 
			
		||||
int WilsonFermionStatic::HandOptDslash;
 | 
			
		||||
 | 
			
		||||
/////////////////////////////////
 | 
			
		||||
@@ -52,10 +52,8 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
 | 
			
		||||
      _grid(&Fgrid),
 | 
			
		||||
      _cbgrid(&Hgrid),
 | 
			
		||||
      Stencil(&Fgrid, npoint, Even, directions, displacements),
 | 
			
		||||
      StencilEven(&Hgrid, npoint, Even, directions,
 | 
			
		||||
                  displacements),  // source is Even
 | 
			
		||||
      StencilOdd(&Hgrid, npoint, Odd, directions,
 | 
			
		||||
                 displacements),  // source is Odd
 | 
			
		||||
      StencilEven(&Hgrid, npoint, Even, directions,displacements),  // source is Even
 | 
			
		||||
      StencilOdd(&Hgrid, npoint, Odd, directions,displacements),  // source is Odd
 | 
			
		||||
      mass(_mass),
 | 
			
		||||
      Lebesgue(_grid),
 | 
			
		||||
      LebesgueEvenOdd(_cbgrid),
 | 
			
		||||
@@ -113,86 +111,84 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  }
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
  template <class Impl>
 | 
			
		||||
  void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
 | 
			
		||||
    out.checkerboard = in.checkerboard;
 | 
			
		||||
    typename FermionField::scalar_type scal(4.0 + mass);
 | 
			
		||||
    out = scal * in;
 | 
			
		||||
  }
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  typename FermionField::scalar_type scal(4.0 + mass);
 | 
			
		||||
  out = scal * in;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
  template <class Impl>
 | 
			
		||||
  void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
    out.checkerboard = in.checkerboard;
 | 
			
		||||
    Mooee(in, out);
 | 
			
		||||
  }
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  Mooee(in, out);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
  template<class Impl>
 | 
			
		||||
  void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
 | 
			
		||||
    out.checkerboard = in.checkerboard;
 | 
			
		||||
    out = (1.0/(4.0+mass))*in;
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  out = (1.0/(4.0+mass))*in;
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
  out.checkerboard = in.checkerboard;
 | 
			
		||||
  MooeeInv(in,out);
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) 
 | 
			
		||||
{  
 | 
			
		||||
  typedef typename FermionField::vector_type vector_type;
 | 
			
		||||
  typedef typename FermionField::scalar_type ScalComplex;
 | 
			
		||||
  typedef Lattice<iSinglet<vector_type> > LatComplex;
 | 
			
		||||
  
 | 
			
		||||
  // what type LatticeComplex 
 | 
			
		||||
  conformable(_grid,out._grid);
 | 
			
		||||
  
 | 
			
		||||
  Gamma::Algebra Gmu [] = {
 | 
			
		||||
    Gamma::Algebra::GammaX,
 | 
			
		||||
    Gamma::Algebra::GammaY,
 | 
			
		||||
    Gamma::Algebra::GammaZ,
 | 
			
		||||
    Gamma::Algebra::GammaT
 | 
			
		||||
  };
 | 
			
		||||
  
 | 
			
		||||
  std::vector<int> latt_size   = _grid->_fdimensions;
 | 
			
		||||
  
 | 
			
		||||
  FermionField   num  (_grid); num  = zero;
 | 
			
		||||
  LatComplex    wilson(_grid); wilson= zero;
 | 
			
		||||
  LatComplex     one  (_grid); one = ScalComplex(1.0,0.0);
 | 
			
		||||
  
 | 
			
		||||
  LatComplex denom(_grid); denom= zero;
 | 
			
		||||
  LatComplex kmu(_grid); 
 | 
			
		||||
  ScalComplex ci(0.0,1.0);
 | 
			
		||||
  // momphase = n * 2pi / L
 | 
			
		||||
  for(int mu=0;mu<Nd;mu++) {
 | 
			
		||||
    
 | 
			
		||||
    LatticeCoordinate(kmu,mu);
 | 
			
		||||
    
 | 
			
		||||
    RealD TwoPiL =  M_PI * 2.0/ latt_size[mu];
 | 
			
		||||
    
 | 
			
		||||
    kmu = TwoPiL * kmu;
 | 
			
		||||
    
 | 
			
		||||
    wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
 | 
			
		||||
    
 | 
			
		||||
    num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);    // derivative term
 | 
			
		||||
    
 | 
			
		||||
    denom=denom + sin(kmu)*sin(kmu);
 | 
			
		||||
  }
 | 
			
		||||
  
 | 
			
		||||
  template<class Impl>
 | 
			
		||||
  void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
 | 
			
		||||
    out.checkerboard = in.checkerboard;
 | 
			
		||||
    MooeeInv(in,out);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  template<class Impl>
 | 
			
		||||
  void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) {
 | 
			
		||||
 | 
			
		||||
    // what type LatticeComplex 
 | 
			
		||||
    conformable(_grid,out._grid);
 | 
			
		||||
 | 
			
		||||
    typedef typename FermionField::vector_type vector_type;
 | 
			
		||||
    typedef typename FermionField::scalar_type ScalComplex;
 | 
			
		||||
 | 
			
		||||
    typedef Lattice<iSinglet<vector_type> > LatComplex;
 | 
			
		||||
 | 
			
		||||
    Gamma::Algebra Gmu [] = {
 | 
			
		||||
      Gamma::Algebra::GammaX,
 | 
			
		||||
      Gamma::Algebra::GammaY,
 | 
			
		||||
      Gamma::Algebra::GammaZ,
 | 
			
		||||
      Gamma::Algebra::GammaT
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    std::vector<int> latt_size   = _grid->_fdimensions;
 | 
			
		||||
 | 
			
		||||
    FermionField   num  (_grid); num  = zero;
 | 
			
		||||
    LatComplex    wilson(_grid); wilson= zero;
 | 
			
		||||
    LatComplex     one  (_grid); one = ScalComplex(1.0,0.0);
 | 
			
		||||
 | 
			
		||||
    LatComplex denom(_grid); denom= zero;
 | 
			
		||||
    LatComplex kmu(_grid); 
 | 
			
		||||
    ScalComplex ci(0.0,1.0);
 | 
			
		||||
    // momphase = n * 2pi / L
 | 
			
		||||
    for(int mu=0;mu<Nd;mu++) {
 | 
			
		||||
 | 
			
		||||
      LatticeCoordinate(kmu,mu);
 | 
			
		||||
 | 
			
		||||
      RealD TwoPiL =  M_PI * 2.0/ latt_size[mu];
 | 
			
		||||
 | 
			
		||||
      kmu = TwoPiL * kmu;
 | 
			
		||||
 | 
			
		||||
      wilson = wilson + 2.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
 | 
			
		||||
 | 
			
		||||
      num = num - sin(kmu)*ci*(Gamma(Gmu[mu])*in);    // derivative term
 | 
			
		||||
 | 
			
		||||
      denom=denom + sin(kmu)*sin(kmu);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    wilson = wilson + _m;     // 2 sin^2 k/2 + m
 | 
			
		||||
 | 
			
		||||
    num   = num + wilson*in;     // -i gmu sin k + 2 sin^2 k/2 + m
 | 
			
		||||
 | 
			
		||||
    denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
 | 
			
		||||
 | 
			
		||||
    denom= one/denom;
 | 
			
		||||
 | 
			
		||||
    out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
 
 | 
			
		||||
  wilson = wilson + _m;     // 2 sin^2 k/2 + m
 | 
			
		||||
  
 | 
			
		||||
  num   = num + wilson*in;     // -i gmu sin k + 2 sin^2 k/2 + m
 | 
			
		||||
  
 | 
			
		||||
  denom= denom+wilson*wilson; // sin^2 k + (2 sin^2 k/2 + m)^2
 | 
			
		||||
  
 | 
			
		||||
  denom= one/denom;
 | 
			
		||||
  
 | 
			
		||||
  out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
 | 
			
		||||
  
 | 
			
		||||
}
 | 
			
		||||
  
 | 
			
		||||
 | 
			
		||||
///////////////////////////////////
 | 
			
		||||
// Internal
 | 
			
		||||
@@ -222,10 +218,8 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
 | 
			
		||||
    ////////////////////////
 | 
			
		||||
    // Call the single hop
 | 
			
		||||
    ////////////////////////
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < B._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu,
 | 
			
		||||
                               gamma);
 | 
			
		||||
    parallel_for (int sss = 0; sss < B._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    //////////////////////////////////////////////////
 | 
			
		||||
@@ -276,8 +270,7 @@ void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out,
 | 
			
		||||
                               int dag) {
 | 
			
		||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
 | 
			
		||||
  conformable(in._grid, _grid);  // verifies full grid
 | 
			
		||||
  conformable(in._grid, out._grid);
 | 
			
		||||
 | 
			
		||||
@@ -287,8 +280,7 @@ void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out,
 | 
			
		||||
                                 int dag) {
 | 
			
		||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
 | 
			
		||||
  conformable(in._grid, _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in._grid, out._grid);  // drops the cb check
 | 
			
		||||
 | 
			
		||||
@@ -299,8 +291,7 @@ void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,
 | 
			
		||||
                                 int dag) {
 | 
			
		||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag) {
 | 
			
		||||
  conformable(in._grid, _cbgrid);    // verifies half grid
 | 
			
		||||
  conformable(in._grid, out._grid);  // drops the cb check
 | 
			
		||||
 | 
			
		||||
@@ -311,14 +302,12 @@ void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out,
 | 
			
		||||
                               int dir, int disp) {
 | 
			
		||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
 | 
			
		||||
  DhopDir(in, out, dir, disp);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out,
 | 
			
		||||
                                  int dir, int disp) {
 | 
			
		||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) {
 | 
			
		||||
  int skip = (disp == 1) ? 0 : 1;
 | 
			
		||||
  int dirdisp = dir + skip * 4;
 | 
			
		||||
  int gamma = dir + (1 - skip) * 4;
 | 
			
		||||
@@ -327,16 +316,13 @@ void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
template <class Impl>
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,
 | 
			
		||||
                                      int dirdisp, int gamma, int dag) {
 | 
			
		||||
void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag) {
 | 
			
		||||
  Compressor compressor(dag);
 | 
			
		||||
 | 
			
		||||
  Stencil.HaloExchange(in, compressor);
 | 
			
		||||
 | 
			
		||||
  PARALLEL_FOR_LOOP
 | 
			
		||||
  for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
    Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out,
 | 
			
		||||
                             dirdisp, gamma);
 | 
			
		||||
  parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
    Kernels::DhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma);
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -351,16 +337,12 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
 | 
			
		||||
  st.HaloExchange(in, compressor);
 | 
			
		||||
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
 | 
			
		||||
                                   out);
 | 
			
		||||
    parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
 | 
			
		||||
                                out);
 | 
			
		||||
    parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
 | 
			
		||||
      Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
 
 | 
			
		||||
@@ -29,8 +29,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
    See the full license in the file "LICENSE" in the top level distribution directory
 | 
			
		||||
    *************************************************************************************/
 | 
			
		||||
    /*  END LEGAL */
 | 
			
		||||
#include <Grid/Grid.h>
 | 
			
		||||
#include <Grid/PerfCount.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/FermionCore.h>
 | 
			
		||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
 | 
			
		||||
#include <Grid/perfmon/PerfCount.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
@@ -63,71 +64,55 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
 | 
			
		||||
  LebesgueEvenOdd(_FourDimRedBlackGrid),
 | 
			
		||||
  _tmp(&FiveDimRedBlackGrid)
 | 
			
		||||
{
 | 
			
		||||
  // some assertions
 | 
			
		||||
  assert(FiveDimGrid._ndimension==5);
 | 
			
		||||
  assert(FourDimGrid._ndimension==4);
 | 
			
		||||
  assert(FourDimRedBlackGrid._ndimension==4);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._ndimension==5);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
 | 
			
		||||
 | 
			
		||||
  // extent of fifth dim and not spread out
 | 
			
		||||
  Ls=FiveDimGrid._fdimensions[0];
 | 
			
		||||
  assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
 | 
			
		||||
  assert(FiveDimGrid._processors[0]         ==1);
 | 
			
		||||
  assert(FiveDimRedBlackGrid._processors[0] ==1);
 | 
			
		||||
 | 
			
		||||
  // Other dimensions must match the decomposition of the four-D fields 
 | 
			
		||||
  for(int d=0;d<4;d++){
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimGrid._processors[d+1]         ==FourDimGrid._processors[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._processors[d]   ==FourDimGrid._processors[d]);
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimGrid._fdimensions[d+1]        ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._fdimensions[d]  ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[d+1]        ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    assert(FourDimRedBlackGrid._simd_layout[d]  ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  if (Impl::LsVectorised) { 
 | 
			
		||||
 | 
			
		||||
    int nsimd = Simd::Nsimd();
 | 
			
		||||
    
 | 
			
		||||
    // some assertions
 | 
			
		||||
    assert(FiveDimGrid._ndimension==5);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._ndimension==5);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
 | 
			
		||||
    assert(FourDimGrid._ndimension==4);
 | 
			
		||||
 | 
			
		||||
    // Dimension zero of the five-d is the Ls direction
 | 
			
		||||
    Ls=FiveDimGrid._fdimensions[0];
 | 
			
		||||
    assert(FiveDimGrid._processors[0]         ==1);
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[0]        ==nsimd);
 | 
			
		||||
 | 
			
		||||
    assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._processors[0] ==1);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
 | 
			
		||||
 | 
			
		||||
    // Other dimensions must match the decomposition of the four-D fields 
 | 
			
		||||
    for(int d=0;d<4;d++){
 | 
			
		||||
      assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
 | 
			
		||||
      
 | 
			
		||||
      assert(FourDimGrid._simd_layout[d]=1);
 | 
			
		||||
      assert(FourDimRedBlackGrid._simd_layout[d]=1);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
 | 
			
		||||
 | 
			
		||||
      assert(FiveDimGrid._fdimensions[d+1]        ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
      assert(FiveDimGrid._processors[d+1]         ==FourDimGrid._processors[d]);
 | 
			
		||||
      assert(FiveDimGrid._simd_layout[d+1]        ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  } else {
 | 
			
		||||
 | 
			
		||||
    // some assertions
 | 
			
		||||
    assert(FiveDimGrid._ndimension==5);
 | 
			
		||||
    assert(FourDimGrid._ndimension==4);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._ndimension==5);
 | 
			
		||||
    assert(FourDimRedBlackGrid._ndimension==4);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._checker_dim==1);
 | 
			
		||||
    
 | 
			
		||||
    // Dimension zero of the five-d is the Ls direction
 | 
			
		||||
    Ls=FiveDimGrid._fdimensions[0];
 | 
			
		||||
    assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._processors[0] ==1);
 | 
			
		||||
    assert(FiveDimRedBlackGrid._simd_layout[0]==1);
 | 
			
		||||
    assert(FiveDimGrid._processors[0]         ==1);
 | 
			
		||||
    assert(FiveDimGrid._simd_layout[0]        ==1);
 | 
			
		||||
    
 | 
			
		||||
    // Other dimensions must match the decomposition of the four-D fields 
 | 
			
		||||
    for(int d=0;d<4;d++){
 | 
			
		||||
      assert(FourDimRedBlackGrid._fdimensions[d]  ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
 | 
			
		||||
      
 | 
			
		||||
      assert(FourDimRedBlackGrid._processors[d]   ==FourDimGrid._processors[d]);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
 | 
			
		||||
      
 | 
			
		||||
      assert(FourDimRedBlackGrid._simd_layout[d]  ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
      assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
 | 
			
		||||
      
 | 
			
		||||
      assert(FiveDimGrid._fdimensions[d+1]        ==FourDimGrid._fdimensions[d]);
 | 
			
		||||
      assert(FiveDimGrid._processors[d+1]         ==FourDimGrid._processors[d]);
 | 
			
		||||
      assert(FiveDimGrid._simd_layout[d+1]        ==FourDimGrid._simd_layout[d]);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
  }
 | 
			
		||||
    
 | 
			
		||||
  // Allocate the required comms buffer
 | 
			
		||||
@@ -182,34 +167,37 @@ void WilsonFermion5D<Impl>::Report(void)
 | 
			
		||||
    std::vector<int> latt = GridDefaultLatt();          
 | 
			
		||||
    RealD volume = Ls;  for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
 | 
			
		||||
    RealD NP = _FourDimGrid->_Nprocessors;
 | 
			
		||||
    RealD NN = _FourDimGrid->NodeCount();
 | 
			
		||||
 | 
			
		||||
  if ( DhopCalls > 0 ) {
 | 
			
		||||
    std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Number of Dhop Calls     : " << DhopCalls   << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " << DhopCommTime<< " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls           : " << DhopCommTime / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Total Compute time       : " << DhopComputeTime << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls        : " << DhopComputeTime / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Number of DhopEO Calls   : " << DhopCalls   << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D TotalTime   /Calls        : " << DhopTotalTime   / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D CommTime    /Calls        : " << DhopCommTime    / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D FaceTime    /Calls        : " << DhopFaceTime    / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D ComputeTime1/Calls        : " << DhopComputeTime / DhopCalls << " us" << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D ComputeTime2/Calls        : " << DhopComputeTime2/ DhopCalls << " us" << std::endl;
 | 
			
		||||
 | 
			
		||||
    // Average the compute time
 | 
			
		||||
    _FourDimGrid->GlobalSum(DhopComputeTime);
 | 
			
		||||
    DhopComputeTime/=NP;
 | 
			
		||||
    RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call                : " << mflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per rank       : " << mflops/NP << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node       : " << mflops/NN << std::endl;
 | 
			
		||||
 | 
			
		||||
    RealD Fullmflops = 1344*volume*DhopCalls/(DhopComputeTime+DhopCommTime)/2; // 2 for red black counting
 | 
			
		||||
    RealD Fullmflops = 1344*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call (full)         : " << Fullmflops << std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
 | 
			
		||||
 | 
			
		||||
    std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
 | 
			
		||||
 | 
			
		||||
   }
 | 
			
		||||
 | 
			
		||||
  if ( DerivCalls > 0 ) {
 | 
			
		||||
    std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls    : " <<DerivCalls <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " <<DerivCommTime <<" us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls           : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Total Compute time       : " <<DerivComputeTime <<" us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls        : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Total Dhop Compute time  : " <<DerivDhopComputeTime <<" us"<<std::endl;
 | 
			
		||||
    std::cout << GridLogMessage << "WilsonFermion5D Dhop ComputeTime/Calls   : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
 | 
			
		||||
    
 | 
			
		||||
    RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
 | 
			
		||||
@@ -232,6 +220,9 @@ void WilsonFermion5D<Impl>::ZeroCounters(void) {
 | 
			
		||||
  DhopCalls       = 0;
 | 
			
		||||
  DhopCommTime    = 0;
 | 
			
		||||
  DhopComputeTime = 0;
 | 
			
		||||
  DhopComputeTime2= 0;
 | 
			
		||||
  DhopFaceTime    = 0;
 | 
			
		||||
  DhopTotalTime   = 0;
 | 
			
		||||
 | 
			
		||||
  DerivCalls       = 0;
 | 
			
		||||
  DerivCommTime    = 0;
 | 
			
		||||
@@ -272,12 +263,11 @@ void WilsonFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,in
 | 
			
		||||
  assert(dirdisp<=7);
 | 
			
		||||
  assert(dirdisp>=0);
 | 
			
		||||
 | 
			
		||||
PARALLEL_FOR_LOOP
 | 
			
		||||
  for(int ss=0;ss<Umu._grid->oSites();ss++){
 | 
			
		||||
  parallel_for(int ss=0;ss<Umu._grid->oSites();ss++){
 | 
			
		||||
    for(int s=0;s<Ls;s++){
 | 
			
		||||
      int sU=ss;
 | 
			
		||||
      int sF = s+Ls*sU; 
 | 
			
		||||
      Kernels::DiracOptDhopDir(Stencil,Umu,Stencil.CommBuf(),sF,sU,in,out,dirdisp,gamma);
 | 
			
		||||
      Kernels::DhopDir(Stencil,Umu,Stencil.CommBuf(),sF,sU,in,out,dirdisp,gamma);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
};
 | 
			
		||||
@@ -320,8 +310,7 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
 | 
			
		||||
    ////////////////////////
 | 
			
		||||
 | 
			
		||||
    DerivDhopComputeTime -= usecond();
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int sss = 0; sss < U._grid->oSites(); sss++) {
 | 
			
		||||
    parallel_for (int sss = 0; sss < U._grid->oSites(); sss++) {
 | 
			
		||||
      for (int s = 0; s < Ls; s++) {
 | 
			
		||||
        int sU = sss;
 | 
			
		||||
        int sF = s + Ls * sU;
 | 
			
		||||
@@ -329,7 +318,7 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
 | 
			
		||||
        assert(sF < B._grid->oSites());
 | 
			
		||||
        assert(sU < U._grid->oSites());
 | 
			
		||||
 | 
			
		||||
        Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma);
 | 
			
		||||
        Kernels::DhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma);
 | 
			
		||||
 | 
			
		||||
        ////////////////////////////
 | 
			
		||||
        // spin trace outer product
 | 
			
		||||
@@ -396,6 +385,86 @@ template<class Impl>
 | 
			
		||||
void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
 | 
			
		||||
					 DoubledGaugeField & U,
 | 
			
		||||
					 const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  DhopTotalTime-=usecond();
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
  if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
 | 
			
		||||
    DhopInternalOverlappedComms(st,lo,U,in,out,dag);
 | 
			
		||||
  else 
 | 
			
		||||
#endif
 | 
			
		||||
    DhopInternalSerialComms(st,lo,U,in,out,dag);
 | 
			
		||||
  DhopTotalTime+=usecond();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, LebesgueOrder &lo,
 | 
			
		||||
							DoubledGaugeField & U,
 | 
			
		||||
							const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
#ifdef GRID_OMP
 | 
			
		||||
  //  assert((dag==DaggerNo) ||(dag==DaggerYes));
 | 
			
		||||
  typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
 | 
			
		||||
 | 
			
		||||
  Compressor compressor(dag);
 | 
			
		||||
 | 
			
		||||
  int LLs = in._grid->_rdimensions[0];
 | 
			
		||||
  int len =  U._grid->oSites();
 | 
			
		||||
  
 | 
			
		||||
  DhopFaceTime-=usecond();
 | 
			
		||||
  st.HaloExchangeOptGather(in,compressor);
 | 
			
		||||
  DhopFaceTime+=usecond();
 | 
			
		||||
  std::vector<std::vector<CommsRequest_t> > reqs;
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel 
 | 
			
		||||
  { 
 | 
			
		||||
    int nthreads = omp_get_num_threads();
 | 
			
		||||
    int me = omp_get_thread_num();
 | 
			
		||||
    int myoff, mywork;
 | 
			
		||||
 | 
			
		||||
    GridThread::GetWork(len,me-1,mywork,myoff,nthreads-1);
 | 
			
		||||
    int sF = LLs * myoff;
 | 
			
		||||
 | 
			
		||||
    if ( me == 0 ) {
 | 
			
		||||
      DhopCommTime-=usecond();
 | 
			
		||||
      st.CommunicateBegin(reqs);
 | 
			
		||||
      st.CommunicateComplete(reqs);
 | 
			
		||||
      DhopCommTime+=usecond();
 | 
			
		||||
    } else { 
 | 
			
		||||
      // Interior links in stencil
 | 
			
		||||
      if ( me==1 ) DhopComputeTime-=usecond();
 | 
			
		||||
      if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
 | 
			
		||||
      else      	    Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
 | 
			
		||||
      if ( me==1 ) DhopComputeTime+=usecond();
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
 | 
			
		||||
  DhopFaceTime-=usecond();
 | 
			
		||||
  st.CommsMerge();
 | 
			
		||||
  DhopFaceTime+=usecond();
 | 
			
		||||
 | 
			
		||||
#pragma omp parallel 
 | 
			
		||||
  {
 | 
			
		||||
    int nthreads = omp_get_num_threads();
 | 
			
		||||
    int me = omp_get_thread_num();
 | 
			
		||||
    int myoff, mywork;
 | 
			
		||||
 | 
			
		||||
    GridThread::GetWork(len,me,mywork,myoff,nthreads);
 | 
			
		||||
    int sF = LLs * myoff;
 | 
			
		||||
 | 
			
		||||
    // Exterior links in stencil
 | 
			
		||||
    if ( me==0 ) DhopComputeTime2-=usecond();
 | 
			
		||||
    if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1);
 | 
			
		||||
    else                  Kernels::DhopSite   (st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1);
 | 
			
		||||
    if ( me==0 ) DhopComputeTime2+=usecond();
 | 
			
		||||
  }// end parallel region
 | 
			
		||||
#else 
 | 
			
		||||
  assert(0);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
template<class Impl>
 | 
			
		||||
void WilsonFermion5D<Impl>::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo,
 | 
			
		||||
					 DoubledGaugeField & U,
 | 
			
		||||
					 const FermionField &in, FermionField &out,int dag)
 | 
			
		||||
{
 | 
			
		||||
  //  assert((dag==DaggerNo) ||(dag==DaggerYes));
 | 
			
		||||
  Compressor compressor(dag);
 | 
			
		||||
@@ -403,45 +472,23 @@ void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
 | 
			
		||||
  int LLs = in._grid->_rdimensions[0];
 | 
			
		||||
  
 | 
			
		||||
  DhopCommTime-=usecond();
 | 
			
		||||
  st.HaloExchange(in,compressor);
 | 
			
		||||
  st.HaloExchangeOpt(in,compressor);
 | 
			
		||||
  DhopCommTime+=usecond();
 | 
			
		||||
  
 | 
			
		||||
  DhopComputeTime-=usecond();
 | 
			
		||||
  // Dhop takes the 4d grid from U, and makes a 5d index for fermion
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
      int sU = ss;
 | 
			
		||||
      int sF = LLs * sU;
 | 
			
		||||
      Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sF, sU, LLs, 1, in, out);
 | 
			
		||||
    }
 | 
			
		||||
#ifdef AVX512
 | 
			
		||||
  } else if (stat.is_init() ) {
 | 
			
		||||
 | 
			
		||||
    int nthreads;
 | 
			
		||||
    stat.start();
 | 
			
		||||
#pragma omp parallel
 | 
			
		||||
    {
 | 
			
		||||
#pragma omp master
 | 
			
		||||
    nthreads = omp_get_num_threads();
 | 
			
		||||
    int mythread = omp_get_thread_num();
 | 
			
		||||
    stat.enter(mythread);
 | 
			
		||||
#pragma omp for nowait
 | 
			
		||||
    for(int ss=0;ss<U._grid->oSites();ss++) {
 | 
			
		||||
      int sU=ss;
 | 
			
		||||
      int sF=LLs*sU;
 | 
			
		||||
      Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
 | 
			
		||||
    }
 | 
			
		||||
    stat.exit(mythread);
 | 
			
		||||
    }
 | 
			
		||||
    stat.accum(nthreads);
 | 
			
		||||
#endif
 | 
			
		||||
  } else {
 | 
			
		||||
    PARALLEL_FOR_LOOP
 | 
			
		||||
    for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
  if (dag == DaggerYes) {
 | 
			
		||||
    parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
      int sU = ss;
 | 
			
		||||
      int sF = LLs * sU;
 | 
			
		||||
      Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
 | 
			
		||||
      Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
 | 
			
		||||
    }
 | 
			
		||||
  } else {
 | 
			
		||||
    parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
 | 
			
		||||
      int sU = ss;
 | 
			
		||||
      int sF = LLs * sU;
 | 
			
		||||
      Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
 | 
			
		||||
    }
 | 
			
		||||
  }
 | 
			
		||||
  DhopComputeTime+=usecond();
 | 
			
		||||
 
 | 
			
		||||
@@ -31,7 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
 | 
			
		||||
#ifndef  GRID_QCD_WILSON_FERMION_5D_H
 | 
			
		||||
#define  GRID_QCD_WILSON_FERMION_5D_H
 | 
			
		||||
 | 
			
		||||
#include <Grid/Stat.h>
 | 
			
		||||
#include <Grid/perfmon/Stat.h>
 | 
			
		||||
 | 
			
		||||
namespace Grid {
 | 
			
		||||
namespace QCD {
 | 
			
		||||
@@ -82,6 +82,9 @@ namespace QCD {
 | 
			
		||||
     double DhopCalls;
 | 
			
		||||
     double DhopCommTime;
 | 
			
		||||
     double DhopComputeTime;
 | 
			
		||||
     double DhopComputeTime2;
 | 
			
		||||
     double DhopFaceTime;
 | 
			
		||||
     double DhopTotalTime;
 | 
			
		||||
 | 
			
		||||
     double DerivCalls;
 | 
			
		||||
     double DerivCommTime;
 | 
			
		||||
@@ -145,6 +148,20 @@ namespace QCD {
 | 
			
		||||
		      const FermionField &in, 
 | 
			
		||||
		      FermionField &out,
 | 
			
		||||
		      int dag);
 | 
			
		||||
 | 
			
		||||
    void DhopInternalOverlappedComms(StencilImpl & st,
 | 
			
		||||
				     LebesgueOrder &lo,
 | 
			
		||||
				     DoubledGaugeField &U,
 | 
			
		||||
				     const FermionField &in, 
 | 
			
		||||
				     FermionField &out,
 | 
			
		||||
				     int dag);
 | 
			
		||||
 | 
			
		||||
    void DhopInternalSerialComms(StencilImpl & st,
 | 
			
		||||
				 LebesgueOrder &lo,
 | 
			
		||||
				 DoubledGaugeField &U,
 | 
			
		||||
				 const FermionField &in, 
 | 
			
		||||
				 FermionField &out,
 | 
			
		||||
				 int dag);
 | 
			
		||||
    
 | 
			
		||||
    // Constructors
 | 
			
		||||
    WilsonFermion5D(GaugeField &_Umu,
 | 
			
		||||
 
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user