mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-04 11:15:55 +01:00
Merge branch 'feature/hmc_generalise' into feature/scalar_adjointFT
This commit is contained in:
commit
4b759b8f2a
1
.gitignore
vendored
1
.gitignore
vendored
@ -92,6 +92,7 @@ build*/*
|
|||||||
#####################
|
#####################
|
||||||
*.xcodeproj/*
|
*.xcodeproj/*
|
||||||
build.sh
|
build.sh
|
||||||
|
.vscode
|
||||||
|
|
||||||
# Eigen source #
|
# Eigen source #
|
||||||
################
|
################
|
||||||
|
@ -104,5 +104,5 @@ script:
|
|||||||
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto
|
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto
|
||||||
- make -j4
|
- make -j4
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then mpirun -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
|
||||||
|
|
||||||
|
@ -48,9 +48,9 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
int maxlat=16;
|
int maxlat=24;
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -124,8 +124,8 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat,lat,lat,lat});
|
std::vector<int> latt_size ({lat,lat,lat,lat});
|
||||||
|
|
||||||
@ -194,14 +194,14 @@ int main (int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Nloop=100;
|
Nloop=10;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -281,8 +281,8 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -324,8 +324,8 @@ int main (int argc, char ** argv)
|
|||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
// Grid.StencilSendToRecvFromComplete(requests);
|
Grid.StencilSendToRecvFromComplete(requests);
|
||||||
// requests.resize(0);
|
requests.resize(0);
|
||||||
|
|
||||||
comm_proc = mpi_layout[mu]-1;
|
comm_proc = mpi_layout[mu]-1;
|
||||||
|
|
||||||
|
@ -37,27 +37,27 @@ struct scal {
|
|||||||
d internal;
|
d internal;
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::Algebra::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::Algebra::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::Algebra::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::Algebra::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR;
|
typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR;
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF;
|
typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF;
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD;
|
typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD;
|
||||||
|
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
Grid_init(&argc,&argv);
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
|
||||||
std::vector<int> latt4 = GridDefaultLatt();
|
std::vector<int> latt4 = GridDefaultLatt();
|
||||||
const int Ls=8;
|
const int Ls=16;
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
@ -72,34 +72,66 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> seeds4({1,2,3,4});
|
std::vector<int> seeds4({1,2,3,4});
|
||||||
std::vector<int> seeds5({5,6,7,8});
|
std::vector<int> seeds5({5,6,7,8});
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||||
|
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
||||||
|
|
||||||
LatticeFermion src (FGrid); random(RNG5,src);
|
LatticeFermion src (FGrid); random(RNG5,src);
|
||||||
|
#if 0
|
||||||
|
src = zero;
|
||||||
|
{
|
||||||
|
std::vector<int> origin({0,0,0,latt4[2]-1,0});
|
||||||
|
SpinColourVectorF tmp;
|
||||||
|
tmp=zero;
|
||||||
|
tmp()(0)(0)=Complex(-2.0,0.0);
|
||||||
|
std::cout << " source site 0 " << tmp<<std::endl;
|
||||||
|
pokeSite(tmp,src,origin);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
RealD N2 = 1.0/::sqrt(norm2(src));
|
||||||
|
src = src*N2;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LatticeFermion result(FGrid); result=zero;
|
LatticeFermion result(FGrid); result=zero;
|
||||||
LatticeFermion ref(FGrid); ref=zero;
|
LatticeFermion ref(FGrid); ref=zero;
|
||||||
|
LatticeFermion refDag(FGrid); refDag=zero;
|
||||||
LatticeFermion tmp(FGrid);
|
LatticeFermion tmp(FGrid);
|
||||||
LatticeFermion err(FGrid);
|
LatticeFermion err(FGrid);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||||
LatticeGaugeField Umu(UGrid);
|
LatticeGaugeField Umu(UGrid);
|
||||||
random(RNG4,Umu);
|
SU3::HotConfiguration(RNG4,Umu);
|
||||||
|
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||||
LatticeGaugeField Umu5d(FGrid);
|
#if 0
|
||||||
|
Umu=1.0;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
LatticeColourMatrix ttmp(UGrid);
|
||||||
|
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
||||||
|
// if (mu !=2 ) ttmp = 0;
|
||||||
|
// ttmp = ttmp* pow(10.0,mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// Naive wilson implementation
|
||||||
|
////////////////////////////////////
|
||||||
// replicate across fifth dimension
|
// replicate across fifth dimension
|
||||||
|
LatticeGaugeField Umu5d(FGrid);
|
||||||
|
std::vector<LatticeColourMatrix> U(4,FGrid);
|
||||||
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
Umu5d._odata[Ls*ss+s] = Umu._odata[ss];
|
Umu5d._odata[Ls*ss+s] = Umu._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////
|
|
||||||
// Naive wilson implementation
|
|
||||||
////////////////////////////////////
|
|
||||||
std::vector<LatticeColourMatrix> U(4,FGrid);
|
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
|
U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
|
||||||
}
|
}
|
||||||
|
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
{
|
{
|
||||||
@ -121,6 +153,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
RealD NP = UGrid->_Nprocessors;
|
RealD NP = UGrid->_Nprocessors;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Creating action operator " << std::endl;
|
||||||
DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
@ -136,10 +169,11 @@ int main (int argc, char ** argv)
|
|||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
|
|
||||||
int ncall =100;
|
int ncall =1000;
|
||||||
if (1) {
|
if (1) {
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.ZeroCounters();
|
Dw.ZeroCounters();
|
||||||
|
Dw.Dhop(src,result,0);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
__SSC_START;
|
__SSC_START;
|
||||||
@ -153,36 +187,45 @@ int main (int argc, char ** argv)
|
|||||||
double flops=1344*volume*ncall;
|
double flops=1344*volume*ncall;
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
if(( norm2(err)>1.0e-4) ) {
|
||||||
|
std::cout << "RESULT\n " << result<<std::endl;
|
||||||
|
std::cout << "REF \n " << ref <<std::endl;
|
||||||
|
std::cout << "ERR \n " << err <<std::endl;
|
||||||
|
FGrid->Barrier();
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
*/
|
||||||
assert (norm2(err)< 1.0e-4 );
|
assert (norm2(err)< 1.0e-4 );
|
||||||
Dw.Report();
|
Dw.Report();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1) { // Naive wilson dag implementation
|
if (1) { // Naive wilson dag implementation
|
||||||
ref = zero;
|
refDag = zero;
|
||||||
for (int mu = 0; mu < Nd; mu++) {
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
// ref = src - Gamma(Gamma::GammaX)* src ; // 1+gamma_x
|
// ref = src - Gamma(Gamma::GammaX)* src ; // 1+gamma_x
|
||||||
tmp = U[mu] * Cshift(src, mu + 1, 1);
|
tmp = U[mu] * Cshift(src, mu + 1, 1);
|
||||||
for (int i = 0; i < ref._odata.size(); i++) {
|
for (int i = 0; i < refDag._odata.size(); i++) {
|
||||||
ref._odata[i] += tmp._odata[i] + Gamma(Gmu[mu]) * tmp._odata[i];
|
refDag._odata[i] += tmp._odata[i] + Gamma(Gmu[mu]) * tmp._odata[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp = adj(U[mu]) * src;
|
tmp = adj(U[mu]) * src;
|
||||||
tmp = Cshift(tmp, mu + 1, -1);
|
tmp = Cshift(tmp, mu + 1, -1);
|
||||||
for (int i = 0; i < ref._odata.size(); i++) {
|
for (int i = 0; i < refDag._odata.size(); i++) {
|
||||||
ref._odata[i] += tmp._odata[i] - Gamma(Gmu[mu]) * tmp._odata[i];
|
refDag._odata[i] += tmp._odata[i] - Gamma(Gmu[mu]) * tmp._odata[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ref = -0.5 * ref;
|
refDag = -0.5 * refDag;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1)
|
if (1) {
|
||||||
{
|
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::Dhop "<<std::endl;
|
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::Dhop "<<std::endl;
|
||||||
@ -201,20 +244,12 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5);
|
WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5);
|
||||||
|
|
||||||
for(int x=0;x<latt4[0];x++){
|
localConvert(src,ssrc);
|
||||||
for(int y=0;y<latt4[1];y++){
|
|
||||||
for(int z=0;z<latt4[2];z++){
|
|
||||||
for(int t=0;t<latt4[3];t++){
|
|
||||||
for(int s=0;s<Ls;s++){
|
|
||||||
std::vector<int> site({s,x,y,z,t});
|
|
||||||
SpinColourVector tmp;
|
|
||||||
peekSite(tmp,src,site);
|
|
||||||
pokeSite(tmp,ssrc,site);
|
|
||||||
}}}}}
|
|
||||||
std::cout<<GridLogMessage<< "src norms "<< norm2(src)<<" " <<norm2(ssrc)<<std::endl;
|
std::cout<<GridLogMessage<< "src norms "<< norm2(src)<<" " <<norm2(ssrc)<<std::endl;
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
double t0=usecond();
|
sDw.Dhop(ssrc,sresult,0);
|
||||||
sDw.ZeroCounters();
|
sDw.ZeroCounters();
|
||||||
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
__SSC_START;
|
__SSC_START;
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
sDw.Dhop(ssrc,sresult,0);
|
||||||
@ -228,73 +263,63 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
sDw.Report();
|
|
||||||
|
|
||||||
if(0){
|
|
||||||
for(int i=0;i< PerformanceCounter::NumTypes(); i++ ){
|
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
|
||||||
PerformanceCounter Counter(i);
|
|
||||||
Counter.Start();
|
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
|
||||||
Counter.Stop();
|
|
||||||
Counter.Report();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
|
std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
|
||||||
|
sDw.Report();
|
||||||
RealD sum=0;
|
RealD sum=0;
|
||||||
for(int x=0;x<latt4[0];x++){
|
|
||||||
for(int y=0;y<latt4[1];y++){
|
err=zero;
|
||||||
for(int z=0;z<latt4[2];z++){
|
localConvert(sresult,err);
|
||||||
for(int t=0;t<latt4[3];t++){
|
err = err - ref;
|
||||||
for(int s=0;s<Ls;s++){
|
sum = norm2(err);
|
||||||
std::vector<int> site({s,x,y,z,t});
|
std::cout<<GridLogMessage<<" difference between normal ref and simd is "<<sum<<std::endl;
|
||||||
SpinColourVector normal, simd;
|
if(sum > 1.0e-4 ){
|
||||||
peekSite(normal,result,site);
|
std::cout<< "sD REF\n " <<ref << std::endl;
|
||||||
peekSite(simd,sresult,site);
|
std::cout<< "sD ERR \n " <<err <<std::endl;
|
||||||
sum=sum+norm2(normal-simd);
|
|
||||||
if (norm2(normal-simd) > 1.0e-6 ) {
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" "<<norm2(normal-simd)<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" normal "<<normal<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" simd "<<simd<<std::endl;
|
|
||||||
}
|
}
|
||||||
}}}}}
|
|
||||||
std::cout<<GridLogMessage<<" difference between normal and simd is "<<sum<<std::endl;
|
err=zero;
|
||||||
assert (sum< 1.0e-4 );
|
localConvert(sresult,err);
|
||||||
|
err = err - result;
|
||||||
|
sum = norm2(err);
|
||||||
|
std::cout<<GridLogMessage<<" difference between normal result and simd is "<<sum<<std::endl;
|
||||||
|
if(sum > 1.0e-4 ){
|
||||||
|
std::cout<< "sD REF\n " <<result << std::endl;
|
||||||
|
std::cout<< "sD ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
|
assert(sum < 1.0e-4);
|
||||||
|
|
||||||
|
|
||||||
// Check Dag
|
// Check Dag
|
||||||
std::cout << GridLogMessage << "Compare WilsonFermion5D<DomainWallVec5dImplR>::Dhop to naive wilson implementation Dag to verify correctness" << std::endl;
|
std::cout << GridLogMessage << "Compare WilsonFermion5D<DomainWallVec5dImplR>::Dhop to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||||
sDw.Dhop(ssrc,sresult,1);
|
sDw.Dhop(ssrc,sresult,1);
|
||||||
sum=0;
|
err=zero;
|
||||||
for(int x=0;x<latt4[0];x++){
|
localConvert(sresult,err);
|
||||||
for(int y=0;y<latt4[1];y++){
|
err = err - refDag;
|
||||||
for(int z=0;z<latt4[2];z++){
|
sum = norm2(err);
|
||||||
for(int t=0;t<latt4[3];t++){
|
std::cout<<GridLogMessage<<" difference between normal dag ref and simd is "<<sum<<std::endl;
|
||||||
for(int s=0;s<Ls;s++){
|
if(sum > 1.0e-4 ){
|
||||||
std::vector<int> site({s,x,y,z,t});
|
std::cout<< "sD REF\n " <<result << std::endl;
|
||||||
SpinColourVector normal, simd;
|
std::cout<< "sD ERR \n " << err <<std::endl;
|
||||||
peekSite(normal,ref,site);
|
|
||||||
peekSite(simd,sresult,site);
|
|
||||||
sum=sum+norm2(normal-simd);
|
|
||||||
if (norm2(normal-simd) > 1.0e-6 ) {
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" "<<norm2(normal-simd)<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" normal "<<normal<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" simd "<<simd<<std::endl;
|
|
||||||
}
|
}
|
||||||
}}}}}
|
assert(sum < 1.0e-4);
|
||||||
std::cout<<GridLogMessage<<" difference between normal and simd is "<<sum<<std::endl;
|
|
||||||
assert (sum< 1.0e-4 );
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if(1){
|
||||||
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
|
||||||
if (1) {
|
std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
|
||||||
|
if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
|
||||||
|
if ( sizeof(Real)==8 ) std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric )
|
||||||
|
std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll)
|
||||||
|
std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm )
|
||||||
|
std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
|
|
||||||
LatticeFermion sr_eo(sFGrid);
|
LatticeFermion sr_eo(sFGrid);
|
||||||
|
|
||||||
LatticeFermion ssrc_e (sFrbGrid);
|
LatticeFermion ssrc_e (sFrbGrid);
|
||||||
LatticeFermion ssrc_o (sFrbGrid);
|
LatticeFermion ssrc_o (sFrbGrid);
|
||||||
LatticeFermion sr_e (sFrbGrid);
|
LatticeFermion sr_e (sFrbGrid);
|
||||||
@ -302,33 +327,23 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
pickCheckerboard(Even,ssrc_e,ssrc);
|
pickCheckerboard(Even,ssrc_e,ssrc);
|
||||||
pickCheckerboard(Odd,ssrc_o,ssrc);
|
pickCheckerboard(Odd,ssrc_o,ssrc);
|
||||||
|
// setCheckerboard(sr_eo,ssrc_o);
|
||||||
setCheckerboard(sr_eo,ssrc_o);
|
// setCheckerboard(sr_eo,ssrc_e);
|
||||||
setCheckerboard(sr_eo,ssrc_e);
|
|
||||||
|
|
||||||
sr_e = zero;
|
sr_e = zero;
|
||||||
sr_o = zero;
|
sr_o = zero;
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
|
|
||||||
if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
|
|
||||||
if ( sizeof(Real)==8 ) std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
|
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
|
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
||||||
sDw.ZeroCounters();
|
sDw.ZeroCounters();
|
||||||
sDw.stat.init("DhopEO");
|
// sDw.stat.init("DhopEO");
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for (int i = 0; i < ncall; i++) {
|
for (int i = 0; i < ncall; i++) {
|
||||||
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
||||||
}
|
}
|
||||||
double t1=usecond();
|
double t1=usecond();
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
sDw.stat.print();
|
// sDw.stat.print();
|
||||||
|
|
||||||
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
||||||
double flops=(1344.0*volume*ncall)/2;
|
double flops=(1344.0*volume*ncall)/2;
|
||||||
@ -343,19 +358,27 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
pickCheckerboard(Even,ssrc_e,sresult);
|
pickCheckerboard(Even,ssrc_e,sresult);
|
||||||
pickCheckerboard(Odd ,ssrc_o,sresult);
|
pickCheckerboard(Odd ,ssrc_o,sresult);
|
||||||
|
|
||||||
|
// Check even part
|
||||||
ssrc_e = ssrc_e - sr_e;
|
ssrc_e = ssrc_e - sr_e;
|
||||||
RealD error = norm2(ssrc_e);
|
RealD error = norm2(ssrc_e);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm: "<<norm2(sr_e) <<std::endl;
|
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm: "<<norm2(sr_e) <<std::endl;
|
||||||
ssrc_o = ssrc_o - sr_o;
|
|
||||||
|
|
||||||
|
// Check odd part
|
||||||
|
ssrc_o = ssrc_o - sr_o;
|
||||||
error+= norm2(ssrc_o);
|
error+= norm2(ssrc_o);
|
||||||
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm: "<<norm2(sr_o) <<std::endl;
|
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm: "<<norm2(sr_o) <<std::endl;
|
||||||
|
|
||||||
if(error>1.0e-4) {
|
if(error>1.0e-4) {
|
||||||
setCheckerboard(ssrc,ssrc_o);
|
setCheckerboard(ssrc,ssrc_o);
|
||||||
setCheckerboard(ssrc,ssrc_e);
|
setCheckerboard(ssrc,ssrc_e);
|
||||||
std::cout<< ssrc << std::endl;
|
std::cout<< "DIFF\n " <<ssrc << std::endl;
|
||||||
|
setCheckerboard(ssrc,sr_o);
|
||||||
|
setCheckerboard(ssrc,sr_e);
|
||||||
|
std::cout<< "CBRESULT\n " <<ssrc << std::endl;
|
||||||
|
std::cout<< "RESULT\n " <<sresult<< std::endl;
|
||||||
}
|
}
|
||||||
|
assert(error<1.0e-4);
|
||||||
|
|
||||||
// Check the dag
|
// Check the dag
|
||||||
std::cout << GridLogMessage << "Compare WilsonFermion5D<DomainWallVec5dImplR>::DhopEO to Dhop to verify correctness" << std::endl;
|
std::cout << GridLogMessage << "Compare WilsonFermion5D<DomainWallVec5dImplR>::DhopEO to Dhop to verify correctness" << std::endl;
|
||||||
@ -367,36 +390,53 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
pickCheckerboard(Even,ssrc_e,sresult);
|
pickCheckerboard(Even,ssrc_e,sresult);
|
||||||
pickCheckerboard(Odd ,ssrc_o,sresult);
|
pickCheckerboard(Odd ,ssrc_o,sresult);
|
||||||
|
|
||||||
ssrc_e = ssrc_e - sr_e;
|
ssrc_e = ssrc_e - sr_e;
|
||||||
error = norm2(ssrc_e);
|
error = norm2(ssrc_e);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm: "<<norm2(sr_e) <<std::endl;
|
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm: "<<norm2(sr_e) <<std::endl;
|
||||||
ssrc_o = ssrc_o - sr_o;
|
ssrc_o = ssrc_o - sr_o;
|
||||||
|
|
||||||
error+= norm2(ssrc_o);
|
error+= norm2(ssrc_o);
|
||||||
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm: "<<norm2(sr_o) <<std::endl;
|
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm: "<<norm2(sr_o) <<std::endl;
|
||||||
|
|
||||||
if(error>1.0e-4) {
|
if(error>1.0e-4) {
|
||||||
setCheckerboard(ssrc,ssrc_o);
|
setCheckerboard(ssrc,ssrc_o);
|
||||||
setCheckerboard(ssrc,ssrc_e);
|
setCheckerboard(ssrc,ssrc_e);
|
||||||
std::cout<< ssrc << std::endl;
|
std::cout<< ssrc << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (1) { // Naive wilson dag implementation
|
||||||
|
ref = zero;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
// ref = src - Gamma(Gamma::Algebra::GammaX)* src ; // 1+gamma_x
|
||||||
|
tmp = U[mu]*Cshift(src,mu+1,1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tmp =adj(U[mu])*src;
|
||||||
|
tmp =Cshift(tmp,mu+1,-1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ref = -0.5*ref;
|
||||||
|
}
|
||||||
|
|
||||||
Dw.Dhop(src,result,1);
|
Dw.Dhop(src,result,1);
|
||||||
std::cout << GridLogMessage << "Compare DomainWallFermionR::Dhop to naive wilson implementation Dag to verify correctness" << std::endl;
|
std::cout << GridLogMessage << "Compare DomainWallFermionR::Dhop to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||||
assert(norm2(err)<1.0e-4);
|
if((norm2(err)>1.0e-4)){
|
||||||
|
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
||||||
|
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
||||||
|
std::cout<< "DAG ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
LatticeFermion src_e (FrbGrid);
|
LatticeFermion src_e (FrbGrid);
|
||||||
LatticeFermion src_o (FrbGrid);
|
LatticeFermion src_o (FrbGrid);
|
||||||
LatticeFermion r_e (FrbGrid);
|
LatticeFermion r_e (FrbGrid);
|
||||||
@ -404,13 +444,18 @@ int main (int argc, char ** argv)
|
|||||||
LatticeFermion r_eo (FGrid);
|
LatticeFermion r_eo (FGrid);
|
||||||
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Calling Deo and Doe and assert Deo+Doe == Dunprec"<<std::endl;
|
std::cout<<GridLogMessage << "Calling Deo and Doe and //assert Deo+Doe == Dunprec"<<std::endl;
|
||||||
pickCheckerboard(Even,src_e,src);
|
pickCheckerboard(Even,src_e,src);
|
||||||
pickCheckerboard(Odd,src_o,src);
|
pickCheckerboard(Odd,src_o,src);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "src_e"<<norm2(src_e)<<std::endl;
|
std::cout<<GridLogMessage << "src_e"<<norm2(src_e)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "src_o"<<norm2(src_o)<<std::endl;
|
std::cout<<GridLogMessage << "src_o"<<norm2(src_o)<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
static int Opt; // these are a temporary hack
|
||||||
|
static int Comms; // these are a temporary hack
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionR::DhopEO "<<std::endl;
|
std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionR::DhopEO "<<std::endl;
|
||||||
std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
|
std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
|
||||||
@ -423,6 +468,7 @@ int main (int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
Dw.ZeroCounters();
|
Dw.ZeroCounters();
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
@ -450,14 +496,20 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
err = r_eo-result;
|
err = r_eo-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
assert(norm2(err)<1.0e-4);
|
if((norm2(err)>1.0e-4)){
|
||||||
|
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
||||||
|
std::cout<< "Deo REF\n " <<result << std::endl;
|
||||||
|
std::cout<< "Deo ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
pickCheckerboard(Even,src_e,err);
|
pickCheckerboard(Even,src_e,err);
|
||||||
pickCheckerboard(Odd,src_o,err);
|
pickCheckerboard(Odd,src_o,err);
|
||||||
std::cout<<GridLogMessage << "norm diff even "<< norm2(src_e)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff even "<< norm2(src_e)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm diff odd "<< norm2(src_o)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff odd "<< norm2(src_o)<<std::endl;
|
||||||
assert(norm2(src_e)<1.0e-4);
|
|
||||||
assert(norm2(src_o)<1.0e-4);
|
//assert(norm2(src_e)<1.0e-4);
|
||||||
|
//assert(norm2(src_o)<1.0e-4);
|
||||||
|
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,8 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
Vec tsum; tsum = zero;
|
Vec tsum; tsum = zero;
|
||||||
|
|
||||||
GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
GridParallelRNG pRNG(&Grid);
|
||||||
|
pRNG.SeedFixedIntegers(std::vector<int>({56,17,89,101}));
|
||||||
|
|
||||||
std::vector<double> stop(threads);
|
std::vector<double> stop(threads);
|
||||||
Vector<Vec> sum(threads);
|
Vector<Vec> sum(threads);
|
||||||
@ -77,8 +78,7 @@ int main (int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
double start=usecond();
|
double start=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int t=0;t<threads;t++){
|
||||||
for(int t=0;t<threads;t++){
|
|
||||||
|
|
||||||
sum[t] = x[t]._odata[0];
|
sum[t] = x[t]._odata[0];
|
||||||
for(int i=0;i<Nloop;i++){
|
for(int i=0;i<Nloop;i++){
|
||||||
|
@ -65,7 +65,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
uint64_t Nloop=NLOOP;
|
uint64_t Nloop=NLOOP;
|
||||||
|
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeVec z(&Grid); //random(pRNG,z);
|
LatticeVec z(&Grid); //random(pRNG,z);
|
||||||
LatticeVec x(&Grid); //random(pRNG,x);
|
LatticeVec x(&Grid); //random(pRNG,x);
|
||||||
@ -100,7 +100,7 @@ int main (int argc, char ** argv)
|
|||||||
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeVec z(&Grid); //random(pRNG,z);
|
LatticeVec z(&Grid); //random(pRNG,z);
|
||||||
LatticeVec x(&Grid); //random(pRNG,x);
|
LatticeVec x(&Grid); //random(pRNG,x);
|
||||||
@ -138,7 +138,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeVec z(&Grid); //random(pRNG,z);
|
LatticeVec z(&Grid); //random(pRNG,z);
|
||||||
LatticeVec x(&Grid); //random(pRNG,x);
|
LatticeVec x(&Grid); //random(pRNG,x);
|
||||||
@ -173,7 +173,7 @@ int main (int argc, char ** argv)
|
|||||||
uint64_t Nloop=NLOOP;
|
uint64_t Nloop=NLOOP;
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
LatticeVec z(&Grid); //random(pRNG,z);
|
LatticeVec z(&Grid); //random(pRNG,z);
|
||||||
LatticeVec x(&Grid); //random(pRNG,x);
|
LatticeVec x(&Grid); //random(pRNG,x);
|
||||||
LatticeVec y(&Grid); //random(pRNG,y);
|
LatticeVec y(&Grid); //random(pRNG,y);
|
||||||
|
134
benchmarks/Benchmark_staggered.cc
Normal file
134
benchmarks/Benchmark_staggered.cc
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./benchmarks/Benchmark_staggered.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
using namespace Grid::QCD;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
|
int threads = GridThread::GetThreads();
|
||||||
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REALF"<< sizeof(RealF)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REALD"<< sizeof(RealD)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REAL"<< sizeof(Real)<<std::endl;
|
||||||
|
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
GridParallelRNG pRNG(&Grid);
|
||||||
|
pRNG.SeedFixedIntegers(seeds);
|
||||||
|
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
|
typedef typename ImprovedStaggeredFermionR::FermionField FermionField;
|
||||||
|
typename ImprovedStaggeredFermionR::ImplParams params;
|
||||||
|
|
||||||
|
FermionField src (&Grid); random(pRNG,src);
|
||||||
|
FermionField result(&Grid); result=zero;
|
||||||
|
FermionField ref(&Grid); ref=zero;
|
||||||
|
FermionField tmp(&Grid); tmp=zero;
|
||||||
|
FermionField err(&Grid); tmp=zero;
|
||||||
|
LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
|
||||||
|
std::vector<LatticeColourMatrix> U(4,&Grid);
|
||||||
|
|
||||||
|
double volume=1;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
volume=volume*latt_size[mu];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only one non-zero (y)
|
||||||
|
#if 0
|
||||||
|
Umu=zero;
|
||||||
|
Complex cone(1.0,0.0);
|
||||||
|
for(int nn=0;nn<Nd;nn++){
|
||||||
|
random(pRNG,U[nn]);
|
||||||
|
if(1) {
|
||||||
|
if (nn!=2) { U[nn]=zero; std::cout<<GridLogMessage << "zeroing gauge field in dir "<<nn<<std::endl; }
|
||||||
|
// else { U[nn]= cone;std::cout<<GridLogMessage << "unit gauge field in dir "<<nn<<std::endl; }
|
||||||
|
else { std::cout<<GridLogMessage << "random gauge field in dir "<<nn<<std::endl; }
|
||||||
|
}
|
||||||
|
PokeIndex<LorentzIndex>(Umu,U[nn],nn);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||||
|
}
|
||||||
|
ref = zero;
|
||||||
|
/*
|
||||||
|
{ // Naive wilson implementation
|
||||||
|
ref = zero;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
// ref = src + Gamma(Gamma::GammaX)* src ; // 1-gamma_x
|
||||||
|
tmp = U[mu]*Cshift(src,mu,1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp =adj(U[mu])*src;
|
||||||
|
tmp =Cshift(tmp,mu,-1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ref = -0.5*ref;
|
||||||
|
*/
|
||||||
|
|
||||||
|
RealD mass=0.1;
|
||||||
|
RealD c1=9.0/8.0;
|
||||||
|
RealD c2=-1.0/24.0;
|
||||||
|
RealD u0=1.0;
|
||||||
|
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "Calling Ds"<<std::endl;
|
||||||
|
int ncall=1000;
|
||||||
|
double t0=usecond();
|
||||||
|
for(int i=0;i<ncall;i++){
|
||||||
|
Ds.Dhop(src,result,0);
|
||||||
|
}
|
||||||
|
double t1=usecond();
|
||||||
|
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
|
err = ref-result;
|
||||||
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
@ -55,7 +55,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> latt_size ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]});
|
std::vector<int> latt_size ({lat*mpi_layout[0],lat*mpi_layout[1],lat*mpi_layout[2],lat*mpi_layout[3]});
|
||||||
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeColourMatrix z(&Grid);// random(pRNG,z);
|
LatticeColourMatrix z(&Grid);// random(pRNG,z);
|
||||||
LatticeColourMatrix x(&Grid);// random(pRNG,x);
|
LatticeColourMatrix x(&Grid);// random(pRNG,x);
|
||||||
@ -88,7 +88,7 @@ int main (int argc, char ** argv)
|
|||||||
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
||||||
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
||||||
@ -119,7 +119,7 @@ int main (int argc, char ** argv)
|
|||||||
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
||||||
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
||||||
@ -150,7 +150,7 @@ int main (int argc, char ** argv)
|
|||||||
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
int vol = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
|
||||||
|
|
||||||
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
// GridParallelRNG pRNG(&Grid); pRNG.SeedRandomDevice();
|
// GridParallelRNG pRNG(&Grid); pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
LatticeColourMatrix z(&Grid); //random(pRNG,z);
|
||||||
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
LatticeColourMatrix x(&Grid); //random(pRNG,x);
|
||||||
|
@ -69,7 +69,7 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> seeds({1,2,3,4});
|
std::vector<int> seeds({1,2,3,4});
|
||||||
GridParallelRNG pRNG(&Grid);
|
GridParallelRNG pRNG(&Grid);
|
||||||
pRNG.SeedFixedIntegers(seeds);
|
pRNG.SeedFixedIntegers(seeds);
|
||||||
// pRNG.SeedRandomDevice();
|
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
|
||||||
|
|
||||||
LatticeFermion src (&Grid); random(pRNG,src);
|
LatticeFermion src (&Grid); random(pRNG,src);
|
||||||
LatticeFermion result(&Grid); result=zero;
|
LatticeFermion result(&Grid); result=zero;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
]#!/usr/bin/env bash
|
||||||
|
|
||||||
EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.2.9.tar.bz2'
|
EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.2.9.tar.bz2'
|
||||||
|
|
||||||
|
@ -339,7 +339,7 @@ AM_CONDITIONAL(BUILD_COMMS_NONE, [ test "${comms_type}X" == "noneX" ])
|
|||||||
############### RNG selection
|
############### RNG selection
|
||||||
AC_ARG_ENABLE([rng],[AC_HELP_STRING([--enable-rng=ranlux48|mt19937|sitmo],\
|
AC_ARG_ENABLE([rng],[AC_HELP_STRING([--enable-rng=ranlux48|mt19937|sitmo],\
|
||||||
[Select Random Number Generator to be used])],\
|
[Select Random Number Generator to be used])],\
|
||||||
[ac_RNG=${enable_rng}],[ac_RNG=ranlux48])
|
[ac_RNG=${enable_rng}],[ac_RNG=sitmo])
|
||||||
|
|
||||||
case ${ac_RNG} in
|
case ${ac_RNG} in
|
||||||
ranlux48)
|
ranlux48)
|
||||||
@ -419,6 +419,8 @@ AC_CONFIG_FILES(tests/hadrons/Makefile)
|
|||||||
AC_CONFIG_FILES(tests/hmc/Makefile)
|
AC_CONFIG_FILES(tests/hmc/Makefile)
|
||||||
AC_CONFIG_FILES(tests/solver/Makefile)
|
AC_CONFIG_FILES(tests/solver/Makefile)
|
||||||
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
AC_CONFIG_FILES(tests/qdpxx/Makefile)
|
||||||
|
AC_CONFIG_FILES(tests/smearing/Makefile)
|
||||||
|
AC_CONFIG_FILES(tests/testu01/Makefile)
|
||||||
AC_CONFIG_FILES(benchmarks/Makefile)
|
AC_CONFIG_FILES(benchmarks/Makefile)
|
||||||
AC_CONFIG_FILES(extras/Makefile)
|
AC_CONFIG_FILES(extras/Makefile)
|
||||||
AC_CONFIG_FILES(extras/Hadrons/Makefile)
|
AC_CONFIG_FILES(extras/Hadrons/Makefile)
|
||||||
|
60
lib/Grid.h
60
lib/Grid.h
@ -38,60 +38,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_H
|
#ifndef GRID_H
|
||||||
#define GRID_H
|
#define GRID_H
|
||||||
|
|
||||||
///////////////////
|
#include <Grid/GridCore.h>
|
||||||
// Std C++ dependencies
|
#include <Grid/GridQCDcore.h>
|
||||||
///////////////////
|
#include <Grid/qcd/action/Action.h>
|
||||||
#include <cassert>
|
#include <Grid/qcd/smearing/Smearing.h>
|
||||||
#include <complex>
|
#include <Grid/qcd/hmc/HMC_aggregate.h>
|
||||||
#include <vector>
|
|
||||||
#include <iostream>
|
|
||||||
#include <iomanip>
|
|
||||||
#include <random>
|
|
||||||
#include <functional>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <ctime>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <chrono>
|
|
||||||
|
|
||||||
///////////////////
|
|
||||||
// Grid headers
|
|
||||||
///////////////////
|
|
||||||
#include "Config.h"
|
|
||||||
#include <Grid/DisableWarnings.h>
|
|
||||||
#include <Grid/Timer.h>
|
|
||||||
#include <Grid/PerfCount.h>
|
|
||||||
#include <Grid/Log.h>
|
|
||||||
#include <Grid/AlignedAllocator.h>
|
|
||||||
#include <Grid/Simd.h>
|
|
||||||
#include <Grid/serialisation/Serialisation.h>
|
|
||||||
#include <Grid/Threads.h>
|
|
||||||
#include <Grid/Lexicographic.h>
|
|
||||||
#include <Grid/Init.h>
|
|
||||||
#include <Grid/Communicator.h>
|
|
||||||
#include <Grid/Cartesian.h>
|
|
||||||
#include <Grid/Tensors.h>
|
|
||||||
#include <Grid/Lattice.h>
|
|
||||||
#include <Grid/Cshift.h>
|
|
||||||
#include <Grid/Stencil.h>
|
|
||||||
#include <Grid/Algorithms.h>
|
|
||||||
|
|
||||||
#include <Grid/FFT.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/QCD.h>
|
|
||||||
#include <Grid/parallelIO/IldgIOtypes.h>
|
|
||||||
#include <Grid/parallelIO/BinaryIO.h>
|
|
||||||
#include <Grid/parallelIO/IldgIO.h>
|
|
||||||
#include <Grid/parallelIO/NerscIO.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/hmc/checkpointers/CheckPointers.h>
|
|
||||||
#include <Grid/qcd/hmc/HMCModules.h>
|
|
||||||
#include <Grid/qcd/modules/mods.h>
|
|
||||||
#include <Grid/qcd/hmc/HMCResourceManager.h>
|
|
||||||
#include <Grid/qcd/hmc/GenericHMCrunner.h>
|
|
||||||
#include <Grid/qcd/hmc/HMCRunnerModule.h>
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
81
lib/GridCore.h
Normal file
81
lib/GridCore.h
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/Grid.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
//
|
||||||
|
// Grid.h
|
||||||
|
// simd
|
||||||
|
//
|
||||||
|
// Created by Peter Boyle on 09/05/2014.
|
||||||
|
// Copyright (c) 2014 University of Edinburgh. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef GRID_BASE_H
|
||||||
|
#define GRID_BASE_H
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
// Std C++ dependencies
|
||||||
|
///////////////////
|
||||||
|
#include <cassert>
|
||||||
|
#include <complex>
|
||||||
|
#include <vector>
|
||||||
|
#include <iostream>
|
||||||
|
#include <iomanip>
|
||||||
|
#include <random>
|
||||||
|
#include <functional>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <ctime>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
// Grid headers
|
||||||
|
///////////////////
|
||||||
|
#include "Config.h"
|
||||||
|
|
||||||
|
#include <Grid/perfmon/Timer.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
#include <Grid/log/Log.h>
|
||||||
|
#include <Grid/allocator/AlignedAllocator.h>
|
||||||
|
#include <Grid/simd/Simd.h>
|
||||||
|
#include <Grid/serialisation/Serialisation.h>
|
||||||
|
#include <Grid/threads/Threads.h>
|
||||||
|
#include <Grid/util/Util.h>
|
||||||
|
#include <Grid/communicator/Communicator.h>
|
||||||
|
#include <Grid/cartesian/Cartesian.h>
|
||||||
|
#include <Grid/tensors/Tensors.h>
|
||||||
|
#include <Grid/lattice/Lattice.h>
|
||||||
|
#include <Grid/cshift/Cshift.h>
|
||||||
|
#include <Grid/stencil/Stencil.h>
|
||||||
|
#include <Grid/parallelIO/BinaryIO.h>
|
||||||
|
#include <Grid/algorithms/Algorithms.h>
|
||||||
|
|
||||||
|
#endif
|
@ -2,12 +2,12 @@
|
|||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/qcd/hmc/HMC.cc
|
Source file: ./lib/Grid.h
|
||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: neo <cossu@post.kek.jp>
|
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
@ -27,10 +27,16 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#ifndef GRID_QCD_CORE_H
|
||||||
|
#define GRID_QCD_CORE_H
|
||||||
|
|
||||||
namespace Grid{
|
/////////////////////////
|
||||||
namespace QCD{
|
// Core Grid QCD headers
|
||||||
|
/////////////////////////
|
||||||
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/qcd/QCD.h>
|
||||||
|
#include <Grid/qcd/spin/Spin.h>
|
||||||
|
#include <Grid/qcd/utils/Utils.h>
|
||||||
|
#include <Grid/qcd/representations/Representations.h>
|
||||||
|
|
||||||
}
|
#endif
|
||||||
}
|
|
Binary file not shown.
@ -1,154 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/Old/Tensor_peek.h
|
|
||||||
|
|
||||||
Copyright (C) 2015
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#ifndef GRID_MATH_PEEK_H
|
|
||||||
#define GRID_MATH_PEEK_H
|
|
||||||
namespace Grid {
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Peek on a specific index; returns a scalar in that index, tensor inherits rest
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// If we hit the right index, return scalar with no further recursion
|
|
||||||
|
|
||||||
//template<int Level> inline ComplexF peekIndex(const ComplexF arg) { return arg;}
|
|
||||||
//template<int Level> inline ComplexD peekIndex(const ComplexD arg) { return arg;}
|
|
||||||
//template<int Level> inline RealF peekIndex(const RealF arg) { return arg;}
|
|
||||||
//template<int Level> inline RealD peekIndex(const RealD arg) { return arg;}
|
|
||||||
#if 0
|
|
||||||
// Scalar peek, no indices
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg) -> iScalar<vtype>
|
|
||||||
{
|
|
||||||
return arg;
|
|
||||||
}
|
|
||||||
// Vector peek, one index
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i) -> iScalar<vtype> // Index matches
|
|
||||||
{
|
|
||||||
iScalar<vtype> ret; // return scalar
|
|
||||||
ret._internal = arg._internal[i];
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
// Matrix peek, two indices
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) -> iScalar<vtype>
|
|
||||||
{
|
|
||||||
iScalar<vtype> ret; // return scalar
|
|
||||||
ret._internal = arg._internal[i][j];
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// No match peek for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
|
|
||||||
/////////////
|
|
||||||
// scalar
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg) -> iScalar<decltype(peekIndex<Level>(arg._internal))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal))> ret;
|
|
||||||
ret._internal= peekIndex<Level>(arg._internal);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg,int i) -> iScalar<decltype(peekIndex<Level>(arg._internal,i))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal,i))> ret;
|
|
||||||
ret._internal=peekIndex<Level>(arg._internal,i);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg,int i,int j) -> iScalar<decltype(peekIndex<Level>(arg._internal,i,j))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal,i,j))> ret;
|
|
||||||
ret._internal=peekIndex<Level>(arg._internal,i,j);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
// vector
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg) -> iVector<decltype(peekIndex<Level>(arg._internal[0])),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0])),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii]);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i) -> iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i,int j) -> iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i,j);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
// matrix
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj]);// Could avoid this because peeking a scalar is dumb
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i);
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i,j);
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,127 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/Old/Tensor_poke.h
|
|
||||||
|
|
||||||
Copyright (C) 2015
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#ifndef GRID_MATH_POKE_H
|
|
||||||
#define GRID_MATH_POKE_H
|
|
||||||
namespace Grid {
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Poke a specific index;
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
#if 0
|
|
||||||
// Scalar poke
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<vtype> &arg)
|
|
||||||
{
|
|
||||||
ret._internal = arg._internal;
|
|
||||||
}
|
|
||||||
// Vector poke, one index
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iScalar<vtype> &arg,int i)
|
|
||||||
{
|
|
||||||
ret._internal[i] = arg._internal;
|
|
||||||
}
|
|
||||||
//Matrix poke, two indices
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iScalar<vtype> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
ret._internal[i][j] = arg._internal;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// No match poke for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
|
|
||||||
/////////////
|
|
||||||
// scalar
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal))> &arg)
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal);
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0))> &arg, int i)
|
|
||||||
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal,i);
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0,0))> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal,i,j);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Vector
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, iVector<decltype(peekIndex<Level>(ret._internal)),N> &arg)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i,j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matrix
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal)),N> &arg)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj]);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg, int i,int j)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i,j);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -42,15 +42,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
||||||
#include <Grid/algorithms/iterative/NormalEquations.h>
|
#include <Grid/algorithms/iterative/NormalEquations.h>
|
||||||
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
||||||
|
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
||||||
|
|
||||||
// Lanczos support
|
// Lanczos support
|
||||||
#include <Grid/algorithms/iterative/MatrixUtils.h>
|
#include <Grid/algorithms/iterative/MatrixUtils.h>
|
||||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||||
|
|
||||||
#include <Grid/algorithms/CoarsenedMatrix.h>
|
#include <Grid/algorithms/CoarsenedMatrix.h>
|
||||||
|
#include <Grid/algorithms/FFT.h>
|
||||||
|
|
||||||
// Eigen/lanczos
|
// Eigen/lanczos
|
||||||
// EigCg
|
// EigCg
|
@ -267,8 +267,7 @@ namespace Grid {
|
|||||||
SimpleCompressor<siteVector> compressor;
|
SimpleCompressor<siteVector> compressor;
|
||||||
Stencil.HaloExchange(in,compressor);
|
Stencil.HaloExchange(in,compressor);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<Grid()->oSites();ss++){
|
||||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
|
||||||
siteVector res = zero;
|
siteVector res = zero;
|
||||||
siteVector nbr;
|
siteVector nbr;
|
||||||
int ptype;
|
int ptype;
|
||||||
@ -380,8 +379,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Subspace.ProjectToSubspace(oProj,oblock);
|
Subspace.ProjectToSubspace(oProj,oblock);
|
||||||
// blockProject(iProj,iblock,Subspace.subspace);
|
// blockProject(iProj,iblock,Subspace.subspace);
|
||||||
// blockProject(oProj,oblock,Subspace.subspace);
|
// blockProject(oProj,oblock,Subspace.subspace);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<Grid()->oSites();ss++){
|
||||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
|
||||||
for(int j=0;j<nbasis;j++){
|
for(int j=0;j<nbasis;j++){
|
||||||
if( disp!= 0 ) {
|
if( disp!= 0 ) {
|
||||||
A[p]._odata[ss](j,i) = oProj._odata[ss](j);
|
A[p]._odata[ss](j,i) = oProj._odata[ss](j);
|
||||||
@ -427,7 +425,7 @@ PARALLEL_FOR_LOOP
|
|||||||
A[p]=zero;
|
A[p]=zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
GridParallelRNG RNG(Grid()); RNG.SeedRandomDevice();
|
GridParallelRNG RNG(Grid()); RNG.SeedFixedIntegers(std::vector<int>({55,72,19,17,34}));
|
||||||
Lattice<iScalar<CComplex> > val(Grid()); random(RNG,val);
|
Lattice<iScalar<CComplex> > val(Grid()); random(RNG,val);
|
||||||
|
|
||||||
Complex one(1.0);
|
Complex one(1.0);
|
||||||
|
@ -25,7 +25,7 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
double MultiShiftFunction::approx(double x)
|
double MultiShiftFunction::approx(double x)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
@ -13,9 +13,10 @@ void *PointerCache::Insert(void *ptr,size_t bytes) {
|
|||||||
|
|
||||||
if (bytes < 4096 ) return NULL;
|
if (bytes < 4096 ) return NULL;
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef GRID_OMP
|
||||||
assert(omp_in_parallel()==0);
|
assert(omp_in_parallel()==0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void * ret = NULL;
|
void * ret = NULL;
|
||||||
int v = -1;
|
int v = -1;
|
||||||
|
|
@ -6,9 +6,9 @@
|
|||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -53,7 +53,7 @@ public:
|
|||||||
|
|
||||||
// Physics Grid information.
|
// Physics Grid information.
|
||||||
std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
|
std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
|
||||||
std::vector<int> _fdimensions;// Global dimensions of array prior to cb removal
|
std::vector<int> _fdimensions;// (full) Global dimensions of array prior to cb removal
|
||||||
std::vector<int> _gdimensions;// Global dimensions of array after cb removal
|
std::vector<int> _gdimensions;// Global dimensions of array after cb removal
|
||||||
std::vector<int> _ldimensions;// local dimensions of array with processor images removed
|
std::vector<int> _ldimensions;// local dimensions of array with processor images removed
|
||||||
std::vector<int> _rdimensions;// Reduced local dimensions with simd lane images and processor images removed
|
std::vector<int> _rdimensions;// Reduced local dimensions with simd lane images and processor images removed
|
||||||
@ -122,7 +122,6 @@ public:
|
|||||||
Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
|
Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline void InOutCoorToLocalCoor (std::vector<int> &ocoor, std::vector<int> &icoor, std::vector<int> &lcoor) {
|
inline void InOutCoorToLocalCoor (std::vector<int> &ocoor, std::vector<int> &icoor, std::vector<int> &lcoor) {
|
||||||
lcoor.resize(_ndimension);
|
lcoor.resize(_ndimension);
|
||||||
for (int d = 0; d < _ndimension; d++)
|
for (int d = 0; d < _ndimension; d++)
|
||||||
@ -204,9 +203,11 @@ public:
|
|||||||
// Global addressing
|
// Global addressing
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
void GlobalIndexToGlobalCoor(int gidx,std::vector<int> &gcoor){
|
void GlobalIndexToGlobalCoor(int gidx,std::vector<int> &gcoor){
|
||||||
|
assert(gidx< gSites());
|
||||||
Lexicographic::CoorFromIndex(gcoor,gidx,_gdimensions);
|
Lexicographic::CoorFromIndex(gcoor,gidx,_gdimensions);
|
||||||
}
|
}
|
||||||
void LocalIndexToLocalCoor(int lidx,std::vector<int> &lcoor){
|
void LocalIndexToLocalCoor(int lidx,std::vector<int> &lcoor){
|
||||||
|
assert(lidx<lSites());
|
||||||
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
|
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -236,16 +237,16 @@ public:
|
|||||||
std::vector<int> lcoor;
|
std::vector<int> lcoor;
|
||||||
GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
|
GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
|
||||||
rank = RankFromProcessorCoor(pcoor);
|
rank = RankFromProcessorCoor(pcoor);
|
||||||
|
/*
|
||||||
std::vector<int> cblcoor(lcoor);
|
std::vector<int> cblcoor(lcoor);
|
||||||
for(int d=0;d<cblcoor.size();d++){
|
for(int d=0;d<cblcoor.size();d++){
|
||||||
if( this->CheckerBoarded(d) ) {
|
if( this->CheckerBoarded(d) ) {
|
||||||
cblcoor[d] = lcoor[d]/2;
|
cblcoor[d] = lcoor[d]/2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
i_idx= iIndex(cblcoor);// this does not imply divide by 2 on checker dim
|
i_idx= iIndex(lcoor);
|
||||||
o_idx= oIndex(lcoor); // this implies divide by 2 on checkerdim
|
o_idx= oIndex(lcoor);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector<int> &gcoor)
|
void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector<int> &gcoor)
|
||||||
|
@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
@ -33,6 +34,7 @@ namespace Grid {
|
|||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
void * CartesianCommunicator::ShmCommBuf;
|
void * CartesianCommunicator::ShmCommBuf;
|
||||||
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
||||||
|
CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
|
||||||
|
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
// Alloc, free shmem region
|
// Alloc, free shmem region
|
||||||
@ -88,7 +90,9 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
|||||||
|
|
||||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
||||||
|
|
||||||
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
void *recv,
|
void *recv,
|
||||||
@ -96,6 +100,7 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||||
|
return 2.0*bytes;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||||
{
|
{
|
||||||
|
@ -116,6 +116,12 @@ class CartesianCommunicator {
|
|||||||
// Implemented in Communicator_base.C
|
// Implemented in Communicator_base.C
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
static void * ShmCommBuf;
|
static void * ShmCommBuf;
|
||||||
|
|
||||||
|
// Isend/Irecv/Wait, or Sendrecv blocking
|
||||||
|
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
|
||||||
|
static CommunicatorPolicy_t CommunicatorPolicy;
|
||||||
|
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
|
||||||
|
|
||||||
size_t heap_top;
|
size_t heap_top;
|
||||||
size_t heap_bytes;
|
size_t heap_bytes;
|
||||||
|
|
||||||
@ -148,6 +154,7 @@ class CartesianCommunicator {
|
|||||||
const std::vector<int> & ThisProcessorCoor(void) ;
|
const std::vector<int> & ThisProcessorCoor(void) ;
|
||||||
const std::vector<int> & ProcessorGrid(void) ;
|
const std::vector<int> & ProcessorGrid(void) ;
|
||||||
int ProcessorCount(void) ;
|
int ProcessorCount(void) ;
|
||||||
|
int NodeCount(void) ;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||||
@ -200,7 +207,7 @@ class CartesianCommunicator {
|
|||||||
|
|
||||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||||
|
|
||||||
void StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
void *recv,
|
void *recv,
|
||||||
|
@ -25,7 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/GridQCDcore.h>
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -39,9 +41,13 @@ MPI_Comm CartesianCommunicator::communicator_world;
|
|||||||
// Should error check all MPI calls.
|
// Should error check all MPI calls.
|
||||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||||
int flag;
|
int flag;
|
||||||
|
int provided;
|
||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
MPI_Init(argc,argv);
|
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||||
|
if ( provided != MPI_THREAD_MULTIPLE ) {
|
||||||
|
QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
@ -152,24 +158,34 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
int from,
|
int from,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
|
int myrank = _processor;
|
||||||
|
int ierr;
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
MPI_Request xrq;
|
MPI_Request xrq;
|
||||||
MPI_Request rrq;
|
MPI_Request rrq;
|
||||||
int rank = _processor;
|
|
||||||
int ierr;
|
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
|
||||||
|
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
list.push_back(rrq);
|
list.push_back(rrq);
|
||||||
|
} else {
|
||||||
|
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||||
|
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||||
|
recv,bytes,MPI_CHAR,from, from,
|
||||||
|
communicator,MPI_STATUS_IGNORE);
|
||||||
|
assert(ierr==0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
int nreq=list.size();
|
int nreq=list.size();
|
||||||
std::vector<MPI_Status> status(nreq);
|
std::vector<MPI_Status> status(nreq);
|
||||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -25,9 +25,23 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
|
||||||
|
#include <semaphore.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <limits.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/ipc.h>
|
||||||
|
#include <sys/shm.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
//#include <zlib.h>
|
||||||
|
#ifndef SHM_HUGETLB
|
||||||
|
#define SHM_HUGETLB 04000
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -50,6 +64,10 @@ std::vector<int> CartesianCommunicator::GroupRanks;
|
|||||||
std::vector<int> CartesianCommunicator::MyGroup;
|
std::vector<int> CartesianCommunicator::MyGroup;
|
||||||
std::vector<void *> CartesianCommunicator::ShmCommBufs;
|
std::vector<void *> CartesianCommunicator::ShmCommBufs;
|
||||||
|
|
||||||
|
int CartesianCommunicator::NodeCount(void) { return GroupSize;};
|
||||||
|
|
||||||
|
|
||||||
|
#undef FORCE_COMMS
|
||||||
void *CartesianCommunicator::ShmBufferSelf(void)
|
void *CartesianCommunicator::ShmBufferSelf(void)
|
||||||
{
|
{
|
||||||
return ShmCommBufs[ShmRank];
|
return ShmCommBufs[ShmRank];
|
||||||
@ -57,6 +75,9 @@ void *CartesianCommunicator::ShmBufferSelf(void)
|
|||||||
void *CartesianCommunicator::ShmBuffer(int rank)
|
void *CartesianCommunicator::ShmBuffer(int rank)
|
||||||
{
|
{
|
||||||
int gpeer = GroupRanks[rank];
|
int gpeer = GroupRanks[rank];
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
if (gpeer == MPI_UNDEFINED){
|
if (gpeer == MPI_UNDEFINED){
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
@ -65,7 +86,13 @@ void *CartesianCommunicator::ShmBuffer(int rank)
|
|||||||
}
|
}
|
||||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
||||||
{
|
{
|
||||||
|
static int count =0;
|
||||||
int gpeer = GroupRanks[rank];
|
int gpeer = GroupRanks[rank];
|
||||||
|
assert(gpeer!=ShmRank); // never send to self
|
||||||
|
assert(rank!=WorldRank);// never send to self
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
if (gpeer == MPI_UNDEFINED){
|
if (gpeer == MPI_UNDEFINED){
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
@ -76,16 +103,27 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||||
|
|
||||||
int flag;
|
int flag;
|
||||||
|
int provided;
|
||||||
|
// mtrace();
|
||||||
|
|
||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
MPI_Init(argc,argv);
|
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||||
|
assert (provided == MPI_THREAD_MULTIPLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Grid_quiesce_nodes();
|
||||||
|
|
||||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||||
MPI_Comm_rank(communicator_world,&WorldRank);
|
MPI_Comm_rank(communicator_world,&WorldRank);
|
||||||
MPI_Comm_size(communicator_world,&WorldSize);
|
MPI_Comm_size(communicator_world,&WorldSize);
|
||||||
|
|
||||||
|
if ( WorldRank == 0 ) {
|
||||||
|
std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"<<WorldSize <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
// Split into groups that can share memory
|
// Split into groups that can share memory
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
@ -131,7 +169,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// find the group leaders world rank
|
// find the group leaders world rank
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
@ -141,7 +178,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
leaders_group[group++] = l;
|
leaders_group[group++] = l;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// Identify the rank of the group in which I (and my leader) live
|
// Identify the rank of the group in which I (and my leader) live
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
@ -152,38 +188,113 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(GroupRank!=-1);
|
assert(GroupRank!=-1);
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// allocate the shared window for our group
|
// allocate the shared window for our group
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
ShmCommBuf = 0;
|
ShmCommBuf = 0;
|
||||||
ierr = MPI_Win_allocate_shared(MAX_MPI_SHM_BYTES,1,MPI_INFO_NULL,ShmComm,&ShmCommBuf,&ShmWindow);
|
|
||||||
assert(ierr==0);
|
|
||||||
// KNL hack -- force to numa-domain 1 in flat
|
|
||||||
#if 0
|
|
||||||
//#include <numaif.h>
|
|
||||||
for(uint64_t page=0;page<MAX_MPI_SHM_BYTES;page+=4096){
|
|
||||||
void *pages = (void *) ( page + ShmCommBuf );
|
|
||||||
int status;
|
|
||||||
int flags=MPOL_MF_MOVE_ALL;
|
|
||||||
int nodes=1; // numa domain == MCDRAM
|
|
||||||
unsigned long count=1;
|
|
||||||
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
|
||||||
if (ierr && (page==0)) perror("numa relocate command failed");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
MPI_Win_lock_all (MPI_MODE_NOCHECK, ShmWindow);
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Plan: allocate a fixed SHM region. Scratch that is just used via some scheme during stencil comms, with no allocate free.
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
ShmCommBufs.resize(ShmSize);
|
ShmCommBufs.resize(ShmSize);
|
||||||
|
|
||||||
|
#if 1
|
||||||
|
char shm_name [NAME_MAX];
|
||||||
|
if ( ShmRank == 0 ) {
|
||||||
for(int r=0;r<ShmSize;r++){
|
for(int r=0;r<ShmSize;r++){
|
||||||
MPI_Aint sz;
|
|
||||||
int dsp_unit;
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||||
MPI_Win_shared_query (ShmWindow, r, &sz, &dsp_unit, &ShmCommBufs[r]);
|
|
||||||
|
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||||
|
|
||||||
|
shm_unlink(shm_name);
|
||||||
|
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0666);
|
||||||
|
if ( fd < 0 ) { perror("failed shm_open"); assert(0); }
|
||||||
|
ftruncate(fd, size);
|
||||||
|
|
||||||
|
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||||
|
assert(((uint64_t)ptr&0x3F)==0);
|
||||||
|
ShmCommBufs[r] =ptr;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
if ( ShmRank != 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES ;
|
||||||
|
|
||||||
|
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||||
|
|
||||||
|
int fd=shm_open(shm_name,O_RDWR,0666);
|
||||||
|
if ( fd<0 ) { perror("failed shm_open"); assert(0); }
|
||||||
|
|
||||||
|
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||||
|
assert(((uint64_t)ptr&0x3F)==0);
|
||||||
|
ShmCommBufs[r] =ptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
std::vector<int> shmids(ShmSize);
|
||||||
|
|
||||||
|
if ( ShmRank == 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||||
|
key_t key = 0x4545 + r;
|
||||||
|
if ((shmids[r]= shmget(key,size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
|
||||||
|
int errsv = errno;
|
||||||
|
printf("Errno %d\n",errsv);
|
||||||
|
perror("shmget");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
printf("shmid: 0x%x\n", shmids[r]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
MPI_Bcast(&shmids[0],ShmSize*sizeof(int),MPI_BYTE,0,ShmComm);
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
ShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
|
||||||
|
if (ShmCommBufs[r] == (uint64_t *)-1) {
|
||||||
|
perror("Shared memory attach failure");
|
||||||
|
shmctl(shmids[r], IPC_RMID, NULL);
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
|
printf("shmaddr: %p\n", ShmCommBufs[r]);
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
// Mark for clean up
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
ShmCommBuf = ShmCommBufs[ShmRank];
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
if ( ShmRank == 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
uint64_t * check = (uint64_t *) ShmCommBufs[r];
|
||||||
|
check[0] = GroupRank;
|
||||||
|
check[1] = r;
|
||||||
|
check[2] = 0x5A5A5A;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
uint64_t * check = (uint64_t *) ShmCommBufs[r];
|
||||||
|
|
||||||
|
assert(check[0]==GroupRank);
|
||||||
|
assert(check[1]==r);
|
||||||
|
assert(check[2]==0x5A5A5A);
|
||||||
|
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Verbose for now
|
// Verbose for now
|
||||||
@ -192,7 +303,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
|
std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
|
||||||
std::cout<< WorldSize << " Ranks " ;
|
std::cout<< WorldSize << " Ranks " ;
|
||||||
std::cout<< GroupSize << " Nodes " ;
|
std::cout<< GroupSize << " Nodes " ;
|
||||||
std::cout<< ShmSize << " with ranks-per-node "<<std::endl;
|
std::cout<< " with "<< ShmSize << " ranks-per-node "<<std::endl;
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Grid MPI-3 configuration: allocated shared memory region of size ";
|
std::cout<<GridLogMessage <<"Grid MPI-3 configuration: allocated shared memory region of size ";
|
||||||
std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
|
std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
|
||||||
@ -207,7 +318,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
if(g!=ShmSize-1) std::cout<<",";
|
if(g!=ShmSize-1) std::cout<<",";
|
||||||
else std::cout<<"}"<<std::endl;
|
else std::cout<<"}"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int g=0;g<GroupSize;g++){
|
for(int g=0;g<GroupSize;g++){
|
||||||
@ -216,7 +326,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
if ( (ShmRank == 0) && (GroupRank==g) ) {
|
if ( (ShmRank == 0) && (GroupRank==g) ) {
|
||||||
std::cout<<MyGroup[r];
|
std::cout<<MyGroup[r];
|
||||||
if(r<ShmSize-1) std::cout<<",";
|
if(r<ShmSize-1) std::cout<<",";
|
||||||
else std::cout<<"}"<<std::endl;
|
else std::cout<<"}"<<std::endl<<std::flush;
|
||||||
}
|
}
|
||||||
MPI_Barrier(communicator_world);
|
MPI_Barrier(communicator_world);
|
||||||
}
|
}
|
||||||
@ -225,14 +335,12 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
assert(ShmSetup==0); ShmSetup=1;
|
assert(ShmSetup==0); ShmSetup=1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Want to implement some magic ... Group sub-cubes into those on same node
|
// Want to implement some magic ... Group sub-cubes into those on same node
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &dest,int &source)
|
||||||
{
|
{
|
||||||
std::vector<int> coor = _processor_coor;
|
std::vector<int> coor = _processor_coor; // my coord
|
||||||
|
|
||||||
assert(std::abs(shift) <_processors[dim]);
|
assert(std::abs(shift) <_processors[dim]);
|
||||||
|
|
||||||
coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
|
coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
|
||||||
@ -242,26 +350,30 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
|||||||
coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
|
coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
|
||||||
Lexicographic::IndexFromCoor(coor,dest,_processors);
|
Lexicographic::IndexFromCoor(coor,dest,_processors);
|
||||||
dest = LexicographicToWorldRank[dest];
|
dest = LexicographicToWorldRank[dest];
|
||||||
}
|
|
||||||
|
}// rank is world rank.
|
||||||
|
|
||||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||||
{
|
{
|
||||||
int rank;
|
int rank;
|
||||||
Lexicographic::IndexFromCoor(coor,rank,_processors);
|
Lexicographic::IndexFromCoor(coor,rank,_processors);
|
||||||
rank = LexicographicToWorldRank[rank];
|
rank = LexicographicToWorldRank[rank];
|
||||||
return rank;
|
return rank;
|
||||||
}
|
}// rank is world rank
|
||||||
|
|
||||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||||
{
|
{
|
||||||
Lexicographic::CoorFromIndex(coor,rank,_processors);
|
int lr=-1;
|
||||||
rank = LexicographicToWorldRank[rank];
|
for(int r=0;r<WorldSize;r++){// map world Rank to lexico and then to coor
|
||||||
|
if( LexicographicToWorldRank[r]==rank) lr = r;
|
||||||
|
}
|
||||||
|
assert(lr!=-1);
|
||||||
|
Lexicographic::CoorFromIndex(coor,lr,_processors);
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
int ierr;
|
int ierr;
|
||||||
|
|
||||||
communicator=communicator_world;
|
communicator=communicator_world;
|
||||||
|
|
||||||
_ndimension = processors.size();
|
_ndimension = processors.size();
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
@ -280,19 +392,17 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
// Identify subblock of ranks on node spreading across dims
|
// Identify subblock of ranks on node spreading across dims
|
||||||
// in a maximally symmetrical way
|
// in a maximally symmetrical way
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
int dim = 0;
|
|
||||||
|
|
||||||
std::vector<int> WorldDims = processors;
|
std::vector<int> WorldDims = processors;
|
||||||
|
|
||||||
ShmDims.resize(_ndimension,1);
|
ShmDims.resize (_ndimension,1);
|
||||||
GroupDims.resize(_ndimension);
|
GroupDims.resize(_ndimension);
|
||||||
|
ShmCoor.resize (_ndimension);
|
||||||
ShmCoor.resize(_ndimension);
|
|
||||||
GroupCoor.resize(_ndimension);
|
GroupCoor.resize(_ndimension);
|
||||||
WorldCoor.resize(_ndimension);
|
WorldCoor.resize(_ndimension);
|
||||||
|
|
||||||
|
int dim = 0;
|
||||||
for(int l2=0;l2<log2size;l2++){
|
for(int l2=0;l2<log2size;l2++){
|
||||||
while ( WorldDims[dim] / ShmDims[dim] <= 1 ) dim=(dim+1)%_ndimension;
|
while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%_ndimension;
|
||||||
ShmDims[dim]*=2;
|
ShmDims[dim]*=2;
|
||||||
dim=(dim+1)%_ndimension;
|
dim=(dim+1)%_ndimension;
|
||||||
}
|
}
|
||||||
@ -304,6 +414,29 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
GroupDims[d] = WorldDims[d]/ShmDims[d];
|
GroupDims[d] = WorldDims[d]/ShmDims[d];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Verbose
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
#if 0
|
||||||
|
std::cout<< GridLogMessage << "MPI-3 usage "<<std::endl;
|
||||||
|
std::cout<< GridLogMessage << "SHM ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< ShmDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
|
||||||
|
std::cout<< GridLogMessage << "Group ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< GroupDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
|
||||||
|
std::cout<< GridLogMessage<<"World ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< WorldDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
#endif
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Check processor counts match
|
// Check processor counts match
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
@ -317,29 +450,57 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish mapping between lexico physics coord and WorldRank
|
// Establish mapping between lexico physics coord and WorldRank
|
||||||
//
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
LexicographicToWorldRank.resize(WorldSize,0);
|
|
||||||
Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
|
Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
|
||||||
Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
|
Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
|
||||||
for(int d=0;d<_ndimension;d++){
|
for(int d=0;d<_ndimension;d++){
|
||||||
WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
|
WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
|
||||||
}
|
}
|
||||||
_processor_coor = WorldCoor;
|
_processor_coor = WorldCoor;
|
||||||
|
_processor = WorldRank;
|
||||||
int lexico;
|
|
||||||
Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
|
|
||||||
LexicographicToWorldRank[lexico]=WorldRank;
|
|
||||||
_processor = lexico;
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// global sum Lexico to World mapping
|
// global sum Lexico to World mapping
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
|
int lexico;
|
||||||
|
LexicographicToWorldRank.resize(WorldSize,0);
|
||||||
|
Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
|
||||||
|
LexicographicToWorldRank[lexico] = WorldRank;
|
||||||
ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
|
ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
};
|
for(int i=0;i<WorldSize;i++){
|
||||||
|
|
||||||
|
int wr = LexicographicToWorldRank[i];
|
||||||
|
// int wr = i;
|
||||||
|
|
||||||
|
std::vector<int> coor(_ndimension);
|
||||||
|
ProcessorCoorFromRank(wr,coor); // from world rank
|
||||||
|
int ck = RankFromProcessorCoor(coor);
|
||||||
|
assert(ck==wr);
|
||||||
|
|
||||||
|
if ( wr == WorldRank ) {
|
||||||
|
for(int j=0;j<coor.size();j++) {
|
||||||
|
assert(coor[j] == _processor_coor[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
std::cout << GridLogMessage<< " Lexicographic "<<i;
|
||||||
|
std::cout << " MPI rank "<<wr;
|
||||||
|
std::cout << " Coor ";
|
||||||
|
for(int j=0;j<coor.size();j++) std::cout << coor[j];
|
||||||
|
std::cout<< std::endl;
|
||||||
|
*/
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
// Check everyone agrees on everyone elses coords
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
std::vector<int> mcoor = coor;
|
||||||
|
this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int));
|
||||||
|
for(int d = 0 ; d< _ndimension; d++) {
|
||||||
|
assert(coor[d] == mcoor[d]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
@ -367,8 +528,6 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
|||||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
@ -377,10 +536,14 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
std::vector<CommsRequest_t> reqs(0);
|
std::vector<CommsRequest_t> reqs(0);
|
||||||
|
// unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||||
|
// unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||||
|
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
|
||||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||||
SendToRecvFromComplete(reqs);
|
SendToRecvFromComplete(reqs);
|
||||||
|
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||||
|
// printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int sender,
|
int sender,
|
||||||
@ -397,7 +560,6 @@ void CartesianCommunicator::SendRecvPacket(void *xmit,
|
|||||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
@ -406,95 +568,29 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
int from,
|
int from,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
#if 0
|
int myrank = _processor;
|
||||||
this->StencilBarrier();
|
int ierr;
|
||||||
|
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
MPI_Request xrq;
|
MPI_Request xrq;
|
||||||
MPI_Request rrq;
|
MPI_Request rrq;
|
||||||
|
|
||||||
static int sequence;
|
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
|
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
|
|
||||||
int ierr;
|
|
||||||
int tag;
|
|
||||||
int check;
|
|
||||||
|
|
||||||
assert(dest != _processor);
|
|
||||||
assert(from != _processor);
|
|
||||||
|
|
||||||
int gdest = GroupRanks[dest];
|
|
||||||
int gfrom = GroupRanks[from];
|
|
||||||
int gme = GroupRanks[_processor];
|
|
||||||
|
|
||||||
sequence++;
|
|
||||||
|
|
||||||
char *from_ptr = (char *)ShmCommBufs[ShmRank];
|
|
||||||
|
|
||||||
int small = (bytes<MAX_MPI_SHM_BYTES);
|
|
||||||
|
|
||||||
typedef uint64_t T;
|
|
||||||
int words = bytes/sizeof(T);
|
|
||||||
|
|
||||||
assert(((size_t)bytes &(sizeof(T)-1))==0);
|
|
||||||
assert(gme == ShmRank);
|
|
||||||
|
|
||||||
if ( small && (gdest !=MPI_UNDEFINED) ) {
|
|
||||||
|
|
||||||
char *to_ptr = (char *)ShmCommBufs[gdest];
|
|
||||||
|
|
||||||
assert(gme != gdest);
|
|
||||||
|
|
||||||
T *ip = (T *)xmit;
|
|
||||||
T *op = (T *)to_ptr;
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int w=0;w<words;w++) {
|
|
||||||
op[w]=ip[w];
|
|
||||||
}
|
|
||||||
|
|
||||||
bcopy(&_processor,&to_ptr[bytes],sizeof(_processor));
|
|
||||||
bcopy(& sequence,&to_ptr[bytes+4],sizeof(sequence));
|
|
||||||
} else {
|
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
}
|
list.push_back(rrq);
|
||||||
|
|
||||||
this->StencilBarrier();
|
|
||||||
|
|
||||||
if (small && (gfrom !=MPI_UNDEFINED) ) {
|
|
||||||
T *ip = (T *)from_ptr;
|
|
||||||
T *op = (T *)recv;
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int w=0;w<words;w++) {
|
|
||||||
op[w]=ip[w];
|
|
||||||
}
|
|
||||||
bcopy(&from_ptr[bytes] ,&tag ,sizeof(tag));
|
|
||||||
bcopy(&from_ptr[bytes+4],&check,sizeof(check));
|
|
||||||
assert(check==sequence);
|
|
||||||
assert(tag==from);
|
|
||||||
} else {
|
} else {
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||||
|
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||||
|
recv,bytes,MPI_CHAR,from, from,
|
||||||
|
communicator,MPI_STATUS_IGNORE);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(rrq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this->StencilBarrier();
|
|
||||||
|
|
||||||
#else
|
|
||||||
MPI_Request xrq;
|
|
||||||
MPI_Request rrq;
|
|
||||||
int rank = _processor;
|
|
||||||
int ierr;
|
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
|
||||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
|
||||||
|
|
||||||
assert(ierr==0);
|
|
||||||
|
|
||||||
list.push_back(xrq);
|
|
||||||
list.push_back(rrq);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
void *recv,
|
void *recv,
|
||||||
@ -505,57 +601,63 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
MPI_Request rrq;
|
MPI_Request rrq;
|
||||||
|
|
||||||
int ierr;
|
int ierr;
|
||||||
|
|
||||||
assert(dest != _processor);
|
|
||||||
assert(from != _processor);
|
|
||||||
|
|
||||||
int gdest = GroupRanks[dest];
|
int gdest = GroupRanks[dest];
|
||||||
int gfrom = GroupRanks[from];
|
int gfrom = GroupRanks[from];
|
||||||
int gme = GroupRanks[_processor];
|
int gme = GroupRanks[_processor];
|
||||||
|
|
||||||
|
assert(dest != _processor);
|
||||||
|
assert(from != _processor);
|
||||||
assert(gme == ShmRank);
|
assert(gme == ShmRank);
|
||||||
|
double off_node_bytes=0.0;
|
||||||
|
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
gdest = MPI_UNDEFINED;
|
||||||
|
gfrom = MPI_UNDEFINED;
|
||||||
|
#endif
|
||||||
|
if ( gfrom ==MPI_UNDEFINED) {
|
||||||
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
|
assert(ierr==0);
|
||||||
|
list.push_back(rrq);
|
||||||
|
off_node_bytes+=bytes;
|
||||||
|
}
|
||||||
|
|
||||||
if ( gdest == MPI_UNDEFINED ) {
|
if ( gdest == MPI_UNDEFINED ) {
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
|
off_node_bytes+=bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( gfrom ==MPI_UNDEFINED) {
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
this->StencilSendToRecvFromComplete(list);
|
||||||
assert(ierr==0);
|
|
||||||
list.push_back(rrq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return off_node_bytes;
|
||||||
}
|
}
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
|
||||||
{
|
{
|
||||||
SendToRecvFromComplete(list);
|
SendToRecvFromComplete(waitall);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilBarrier(void)
|
void CartesianCommunicator::StencilBarrier(void)
|
||||||
{
|
{
|
||||||
MPI_Win_sync (ShmWindow);
|
|
||||||
MPI_Barrier (ShmComm);
|
MPI_Barrier (ShmComm);
|
||||||
MPI_Win_sync (ShmWindow);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
int nreq=list.size();
|
int nreq=list.size();
|
||||||
|
|
||||||
|
if (nreq==0) return;
|
||||||
|
|
||||||
std::vector<MPI_Status> status(nreq);
|
std::vector<MPI_Status> status(nreq);
|
||||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
list.resize(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
{
|
{
|
||||||
int ierr = MPI_Barrier(communicator);
|
int ierr = MPI_Barrier(communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||||
{
|
{
|
||||||
int ierr=MPI_Bcast(data,
|
int ierr=MPI_Bcast(data,
|
||||||
@ -565,7 +667,11 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
|||||||
communicator);
|
communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
int CartesianCommunicator::RankWorld(void){
|
||||||
|
int r;
|
||||||
|
MPI_Comm_rank(communicator_world,&r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||||
{
|
{
|
||||||
int ierr= MPI_Bcast(data,
|
int ierr= MPI_Bcast(data,
|
||||||
|
@ -27,6 +27,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include "Grid.h"
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
//#include <numaif.h>
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
/// Workarounds:
|
/// Workarounds:
|
||||||
@ -42,19 +43,27 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
|
||||||
typedef sem_t *Grid_semaphore;
|
typedef sem_t *Grid_semaphore;
|
||||||
|
|
||||||
|
|
||||||
|
#error /*THis is deprecated*/
|
||||||
|
|
||||||
|
#if 0
|
||||||
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
||||||
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
|
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
|
||||||
#define SEM_POST(S) assert ( sem_post(S) == 0 );
|
#define SEM_POST(S) assert ( sem_post(S) == 0 );
|
||||||
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
|
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
|
||||||
|
#else
|
||||||
|
#define SEM_INIT(S) ;
|
||||||
|
#define SEM_INIT_EXCL(S) ;
|
||||||
|
#define SEM_POST(S) ;
|
||||||
|
#define SEM_WAIT(S) ;
|
||||||
|
#endif
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL };
|
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV };
|
||||||
|
|
||||||
struct Descriptor {
|
struct Descriptor {
|
||||||
uint64_t buf;
|
uint64_t buf;
|
||||||
@ -62,6 +71,12 @@ struct Descriptor {
|
|||||||
int rank;
|
int rank;
|
||||||
int tag;
|
int tag;
|
||||||
int command;
|
int command;
|
||||||
|
uint64_t xbuf;
|
||||||
|
uint64_t rbuf;
|
||||||
|
int xtag;
|
||||||
|
int rtag;
|
||||||
|
int src;
|
||||||
|
int dest;
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,18 +109,14 @@ public:
|
|||||||
|
|
||||||
void SemInit(void) {
|
void SemInit(void) {
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||||
// printf("SEM_NAME: %s \n",sem_name);
|
|
||||||
SEM_INIT(sem_head);
|
SEM_INIT(sem_head);
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||||
// printf("SEM_NAME: %s \n",sem_name);
|
|
||||||
SEM_INIT(sem_tail);
|
SEM_INIT(sem_tail);
|
||||||
}
|
}
|
||||||
void SemInitExcl(void) {
|
void SemInitExcl(void) {
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
|
||||||
SEM_INIT_EXCL(sem_head);
|
SEM_INIT_EXCL(sem_head);
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
|
||||||
SEM_INIT_EXCL(sem_tail);
|
SEM_INIT_EXCL(sem_tail);
|
||||||
}
|
}
|
||||||
void WakeUpDMA(void) {
|
void WakeUpDMA(void) {
|
||||||
@ -125,6 +136,13 @@ public:
|
|||||||
while(1){
|
while(1){
|
||||||
WaitForCommand();
|
WaitForCommand();
|
||||||
// std::cout << "Getting command "<<std::endl;
|
// std::cout << "Getting command "<<std::endl;
|
||||||
|
#if 0
|
||||||
|
_mm_monitor((void *)&state->head,0,0);
|
||||||
|
int s=state->start;
|
||||||
|
if ( s != state->head ) {
|
||||||
|
_mm_mwait(0,0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
Event();
|
Event();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -132,6 +150,7 @@ public:
|
|||||||
int Event (void) ;
|
int Event (void) ;
|
||||||
|
|
||||||
uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
|
uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
|
||||||
|
void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ;
|
||||||
|
|
||||||
void WaitAll() {
|
void WaitAll() {
|
||||||
// std::cout << "Queueing WAIT command "<<std::endl;
|
// std::cout << "Queueing WAIT command "<<std::endl;
|
||||||
@ -141,7 +160,7 @@ public:
|
|||||||
// std::cout << "Waiting from semaphore "<<std::endl;
|
// std::cout << "Waiting from semaphore "<<std::endl;
|
||||||
WaitForComplete();
|
WaitForComplete();
|
||||||
// std::cout << "Checking FIFO is empty "<<std::endl;
|
// std::cout << "Checking FIFO is empty "<<std::endl;
|
||||||
assert ( state->tail == state->head );
|
while ( state->tail != state->head );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -196,6 +215,12 @@ public:
|
|||||||
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void QueueSendRecv(int slave,void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src)
|
||||||
|
{
|
||||||
|
Slaves[slave].QueueSendRecv(xbuf,rbuf,bytes,xtag,rtag,comm,dest,src);
|
||||||
|
Slaves[slave].WakeUpDMA();
|
||||||
|
}
|
||||||
|
|
||||||
static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||||
// std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank <<std::endl;
|
// std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank <<std::endl;
|
||||||
Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
|
Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
|
||||||
@ -226,6 +251,28 @@ public:
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void QueueRoundRobinSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
|
||||||
|
uint8_t * cxbuf = (uint8_t *) xbuf;
|
||||||
|
uint8_t * crbuf = (uint8_t *) rbuf;
|
||||||
|
static int rrp=0;
|
||||||
|
int procs = VerticalSize-1;
|
||||||
|
int myoff=0;
|
||||||
|
int mywork=bytes;
|
||||||
|
QueueSendRecv(rrp+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
|
||||||
|
rrp = rrp+1;
|
||||||
|
if ( rrp == (VerticalSize-1) ) rrp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void QueueMultiplexedSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
|
||||||
|
uint8_t * cxbuf = (uint8_t *) xbuf;
|
||||||
|
uint8_t * crbuf = (uint8_t *) rbuf;
|
||||||
|
int mywork, myoff, procs;
|
||||||
|
procs = VerticalSize-1;
|
||||||
|
for(int s=0;s<procs;s++) {
|
||||||
|
GetWork(bytes,s,mywork,myoff,procs);
|
||||||
|
QueueSendRecv(s+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
|
||||||
|
}
|
||||||
|
};
|
||||||
static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||||
uint8_t * cbuf = (uint8_t *) buf;
|
uint8_t * cbuf = (uint8_t *) buf;
|
||||||
int mywork, myoff, procs;
|
int mywork, myoff, procs;
|
||||||
@ -275,6 +322,7 @@ std::vector<void *> MPIoffloadEngine::VerticalShmBufs;
|
|||||||
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
|
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
|
||||||
std::vector<int> MPIoffloadEngine::UserCommunicatorToWorldRanks;
|
std::vector<int> MPIoffloadEngine::UserCommunicatorToWorldRanks;
|
||||||
|
|
||||||
|
int CartesianCommunicator::NodeCount(void) { return HorizontalSize;};
|
||||||
int MPIoffloadEngine::ShmSetup = 0;
|
int MPIoffloadEngine::ShmSetup = 0;
|
||||||
|
|
||||||
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
||||||
@ -370,12 +418,22 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
|||||||
ftruncate(fd, size);
|
ftruncate(fd, size);
|
||||||
|
|
||||||
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
|
||||||
if ( VerticalShmBufs[r] == MAP_FAILED ) {
|
if ( VerticalShmBufs[r] == MAP_FAILED ) {
|
||||||
perror("failed mmap");
|
perror("failed mmap");
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
for(uint64_t page=0;page<size;page+=4096){
|
||||||
|
void *pages = (void *) ( page + (uint64_t)VerticalShmBufs[r] );
|
||||||
|
int status;
|
||||||
|
int flags=MPOL_MF_MOVE_ALL;
|
||||||
|
int nodes=1; // numa domain == MCDRAM
|
||||||
|
unsigned long count=1;
|
||||||
|
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
||||||
|
if (ierr && (page==0)) perror("numa relocate command failed");
|
||||||
|
}
|
||||||
|
*/
|
||||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||||
check[0] = WorldRank;
|
check[0] = WorldRank;
|
||||||
check[1] = r;
|
check[1] = r;
|
||||||
@ -404,7 +462,7 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
|||||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||||
assert(check[0]== WorldRank);
|
assert(check[0]== WorldRank);
|
||||||
assert(check[1]== r);
|
assert(check[1]== r);
|
||||||
std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
// std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -542,6 +600,8 @@ int Slave::Event (void) {
|
|||||||
static int head_last;
|
static int head_last;
|
||||||
static int start_last;
|
static int start_last;
|
||||||
int ierr;
|
int ierr;
|
||||||
|
MPI_Status stat;
|
||||||
|
static int i=0;
|
||||||
|
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
// Try to advance the start pointers
|
// Try to advance the start pointers
|
||||||
@ -550,11 +610,6 @@ int Slave::Event (void) {
|
|||||||
if ( s != state->head ) {
|
if ( s != state->head ) {
|
||||||
switch ( state->Descrs[s].command ) {
|
switch ( state->Descrs[s].command ) {
|
||||||
case COMMAND_ISEND:
|
case COMMAND_ISEND:
|
||||||
/*
|
|
||||||
std::cout<< " Send "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
|
||||||
<< " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
|
||||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me " <<universe_rank<< std::endl;
|
|
||||||
*/
|
|
||||||
ierr = MPI_Isend((void *)(state->Descrs[s].buf+base),
|
ierr = MPI_Isend((void *)(state->Descrs[s].buf+base),
|
||||||
state->Descrs[s].bytes,
|
state->Descrs[s].bytes,
|
||||||
MPI_CHAR,
|
MPI_CHAR,
|
||||||
@ -568,11 +623,6 @@ int Slave::Event (void) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case COMMAND_IRECV:
|
case COMMAND_IRECV:
|
||||||
/*
|
|
||||||
std::cout<< " Recv "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
|
||||||
<< " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
|
||||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl;
|
|
||||||
*/
|
|
||||||
ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base),
|
ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base),
|
||||||
state->Descrs[s].bytes,
|
state->Descrs[s].bytes,
|
||||||
MPI_CHAR,
|
MPI_CHAR,
|
||||||
@ -588,10 +638,32 @@ int Slave::Event (void) {
|
|||||||
return 1;
|
return 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case COMMAND_SENDRECV:
|
||||||
|
|
||||||
|
// fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10);
|
||||||
|
|
||||||
|
ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10,
|
||||||
|
(void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10,
|
||||||
|
MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
|
assert(ierr==0);
|
||||||
|
|
||||||
|
// fprintf(stderr,"Sendrecv done %d %d\n",ierr,i);
|
||||||
|
// MPI_Barrier(MPIoffloadEngine::HorizontalComm);
|
||||||
|
// fprintf(stderr,"Barrier\n");
|
||||||
|
i++;
|
||||||
|
|
||||||
|
state->start = PERI_PLUS(s);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
break;
|
||||||
|
|
||||||
case COMMAND_WAITALL:
|
case COMMAND_WAITALL:
|
||||||
|
|
||||||
for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
|
for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
|
||||||
|
if ( state->Descrs[t].command != COMMAND_SENDRECV ) {
|
||||||
MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
|
MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
s=PERI_PLUS(s);
|
s=PERI_PLUS(s);
|
||||||
state->start = s;
|
state->start = s;
|
||||||
@ -613,6 +685,45 @@ int Slave::Event (void) {
|
|||||||
// External interaction with the queue
|
// External interaction with the queue
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src)
|
||||||
|
{
|
||||||
|
int head =state->head;
|
||||||
|
int next = PERI_PLUS(head);
|
||||||
|
|
||||||
|
// Set up descriptor
|
||||||
|
int worldrank;
|
||||||
|
int hashtag;
|
||||||
|
MPI_Comm communicator;
|
||||||
|
MPI_Request request;
|
||||||
|
uint64_t relative;
|
||||||
|
|
||||||
|
relative = (uint64_t)xbuf - base;
|
||||||
|
state->Descrs[head].xbuf = relative;
|
||||||
|
|
||||||
|
relative= (uint64_t)rbuf - base;
|
||||||
|
state->Descrs[head].rbuf = relative;
|
||||||
|
|
||||||
|
state->Descrs[head].bytes = bytes;
|
||||||
|
|
||||||
|
MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest);
|
||||||
|
state->Descrs[head].dest = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
|
||||||
|
state->Descrs[head].xtag = hashtag;
|
||||||
|
|
||||||
|
MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src);
|
||||||
|
state->Descrs[head].src = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
|
||||||
|
state->Descrs[head].rtag = hashtag;
|
||||||
|
|
||||||
|
state->Descrs[head].command= COMMAND_SENDRECV;
|
||||||
|
|
||||||
|
// Block until FIFO has space
|
||||||
|
while( state->tail==next );
|
||||||
|
|
||||||
|
// Msync on weak order architectures
|
||||||
|
|
||||||
|
// Advance pointer
|
||||||
|
state->head = next;
|
||||||
|
|
||||||
|
};
|
||||||
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank)
|
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank)
|
||||||
{
|
{
|
||||||
/////////////////////////////////////////
|
/////////////////////////////////////////
|
||||||
@ -812,19 +923,22 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
||||||
assert(from!=_processor);
|
assert(from!=_processor);
|
||||||
assert(dest!=_processor);
|
assert(dest!=_processor);
|
||||||
MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
|
|
||||||
MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
|
||||||
|
|
||||||
|
//MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
|
||||||
|
|
||||||
|
//MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
|
||||||
|
//MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
|
||||||
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
MPIoffloadEngine::WaitAll();
|
MPIoffloadEngine::WaitAll();
|
||||||
|
//this->Barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilBarrier(void)
|
void CartesianCommunicator::StencilBarrier(void) { }
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -87,6 +88,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
@ -97,7 +99,7 @@ void CartesianCommunicator::Barrier(void){}
|
|||||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
|
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
|
||||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
|
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
|
||||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) { return 0;}
|
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) { return 0;}
|
||||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor ;}
|
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor; }
|
||||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||||
{
|
{
|
||||||
source =0;
|
source =0;
|
||||||
|
@ -27,6 +27,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/Grid.h>
|
||||||
#include <mpp/shmem.h>
|
#include <mpp/shmem.h>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
@ -51,7 +52,7 @@ typedef struct HandShake_t {
|
|||||||
} HandShake;
|
} HandShake;
|
||||||
|
|
||||||
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
|
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
|
||||||
array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
|
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
|
||||||
ret.fill(SHMEM_SYNC_VALUE);
|
ret.fill(SHMEM_SYNC_VALUE);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -109,7 +110,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){
|
|||||||
|
|
||||||
source = u;
|
source = u;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
shmem_barrier_all(); // necessary?
|
shmem_barrier_all(); // necessary?
|
||||||
u = dest;
|
u = dest;
|
||||||
}
|
}
|
||||||
@ -125,7 +126,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
|||||||
|
|
||||||
source = u;
|
source = u;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
shmem_barrier_all(); // necessary?
|
shmem_barrier_all(); // necessary?
|
||||||
u = dest;
|
u = dest;
|
||||||
}
|
}
|
||||||
@ -137,7 +138,8 @@ void CartesianCommunicator::GlobalSum(float &f){
|
|||||||
|
|
||||||
source = f;
|
source = f;
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
f = dest;
|
f = dest;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||||
@ -148,14 +150,16 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
|||||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||||
|
|
||||||
if ( shmem_addr_accessible(f,_processor) ){
|
if ( shmem_addr_accessible(f,_processor) ){
|
||||||
shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int i=0;i<N;i++){
|
for(int i=0;i<N;i++){
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
source = f[i];
|
source = f[i];
|
||||||
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
f[i] = dest;
|
f[i] = dest;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +172,8 @@ void CartesianCommunicator::GlobalSum(double &d)
|
|||||||
|
|
||||||
source = d;
|
source = d;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
d = dest;
|
d = dest;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||||
@ -180,14 +185,16 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
|||||||
|
|
||||||
|
|
||||||
if ( shmem_addr_accessible(d,_processor) ){
|
if ( shmem_addr_accessible(d,_processor) ){
|
||||||
shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int i=0;i<N;i++){
|
for(int i=0;i<N;i++){
|
||||||
source = d[i];
|
source = d[i];
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
d[i] = dest;
|
d[i] = dest;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,11 +289,13 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
SHMEM_VET(recv);
|
SHMEM_VET(recv);
|
||||||
// shmem_putmem_nb(recv,xmit,bytes,dest,NULL);
|
// shmem_putmem_nb(recv,xmit,bytes,dest,NULL);
|
||||||
shmem_putmem(recv,xmit,bytes,dest);
|
shmem_putmem(recv,xmit,bytes,dest);
|
||||||
|
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all();
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
// shmem_quiet(); // I'm done
|
// shmem_quiet(); // I'm done
|
||||||
shmem_barrier_all();// He's done too
|
if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
{
|
{
|
||||||
@ -301,13 +310,13 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
|||||||
int words = bytes/4;
|
int words = bytes/4;
|
||||||
|
|
||||||
if ( shmem_addr_accessible(data,_processor) ){
|
if ( shmem_addr_accessible(data,_processor) ){
|
||||||
shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int w=0;w<words;w++){
|
for(int w=0;w<words;w++){
|
||||||
word = array[w];
|
word = array[w];
|
||||||
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
|
||||||
if ( shmem_my_pe() != root ) {
|
if ( shmem_my_pe() != root ) {
|
||||||
array[w] = word;
|
array[w] = word;
|
||||||
}
|
}
|
||||||
@ -325,7 +334,7 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
|||||||
|
|
||||||
for(int w=0;w<words;w++){
|
for(int w=0;w<words;w++){
|
||||||
word = array[w];
|
word = array[w];
|
||||||
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
|
||||||
if ( shmem_my_pe() != root ) {
|
if ( shmem_my_pe() != root ) {
|
||||||
array[w]= word;
|
array[w]= word;
|
||||||
}
|
}
|
||||||
@ -333,5 +342,9 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int CartesianCommunicator::RankWorld(void){
|
||||||
|
return shmem_my_pe();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
|
/*************************************************************************************
|
||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -53,15 +52,13 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<cobj> &buffer,int dimen
|
|||||||
cbmask = 0x3;
|
cbmask = 0x3;
|
||||||
}
|
}
|
||||||
|
|
||||||
int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane
|
int so=plane*rhs._grid->_ostride[dimension]; // base offset for start of plane
|
||||||
|
|
||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
|
||||||
int stride=rhs._grid->_slice_stride[dimension];
|
int stride=rhs._grid->_slice_stride[dimension];
|
||||||
if ( cbmask == 0x3 ) {
|
if ( cbmask == 0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*stride;
|
int o = n*stride;
|
||||||
int bo = n*e2;
|
int bo = n*e2;
|
||||||
@ -74,14 +71,13 @@ PARALLEL_NESTED_LOOP2
|
|||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*stride;
|
int o = n*stride;
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindexTable(o+b);
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
||||||
if ( ocb &cbmask ) {
|
if ( ocb &cbmask ) {
|
||||||
table.push_back(std::pair<int,int> (bo++,o+b));
|
table.push_back(std::pair<int,int> (bo++,o+b));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int i=0;i<table.size();i++){
|
||||||
for(int i=0;i<table.size();i++){
|
|
||||||
buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
|
buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -105,29 +101,30 @@ Gather_plane_extract(const Lattice<vobj> &rhs,std::vector<typename cobj::scalar_
|
|||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
int n1=rhs._grid->_slice_stride[dimension];
|
int n1=rhs._grid->_slice_stride[dimension];
|
||||||
int n2=rhs._grid->_slice_block[dimension];
|
|
||||||
if ( cbmask ==0x3){
|
if ( cbmask ==0x3){
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o = n*n1;
|
int o = n*n1;
|
||||||
int offset = b+n*n2;
|
int offset = b+n*e2;
|
||||||
cobj temp =compress(rhs._odata[so+o+b]);
|
|
||||||
|
|
||||||
|
cobj temp =compress(rhs._odata[so+o+b]);
|
||||||
extract<cobj>(temp,pointers,offset);
|
extract<cobj>(temp,pointers,offset);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
assert(0); //Fixme think this is buggy
|
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||||
|
// Test_cshift_red_black code.
|
||||||
for(int n=0;n<e1;n++){
|
std::cout << " Dense packed buffer WARNING " <<std::endl;
|
||||||
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o=n*rhs._grid->_slice_stride[dimension];
|
|
||||||
|
int o=n*n1;
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
||||||
int offset = b+n*rhs._grid->_slice_block[dimension];
|
int offset = b+n*e2;
|
||||||
|
|
||||||
if ( ocb & cbmask ) {
|
if ( ocb & cbmask ) {
|
||||||
cobj temp =compress(rhs._odata[so+o+b]);
|
cobj temp =compress(rhs._odata[so+o+b]);
|
||||||
@ -171,10 +168,10 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
|
|||||||
|
|
||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
int stride=rhs._grid->_slice_stride[dimension];
|
||||||
|
|
||||||
if ( cbmask ==0x3 ) {
|
if ( cbmask ==0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o =n*rhs._grid->_slice_stride[dimension];
|
int o =n*rhs._grid->_slice_stride[dimension];
|
||||||
int bo =n*rhs._grid->_slice_block[dimension];
|
int bo =n*rhs._grid->_slice_block[dimension];
|
||||||
@ -182,17 +179,21 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
std::vector<std::pair<int,int> > table;
|
||||||
int bo=0;
|
int bo=0;
|
||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o =n*rhs._grid->_slice_stride[dimension];
|
int o =n*rhs._grid->_slice_stride[dimension];
|
||||||
int bo =n*rhs._grid->_slice_block[dimension];
|
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
||||||
if ( ocb & cbmask ) {
|
if ( ocb & cbmask ) {
|
||||||
rhs._odata[so+o+b]=buffer[bo++];
|
table.push_back(std::pair<int,int> (so+o+b,bo++));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
parallel_for(int i=0;i<table.size();i++){
|
||||||
|
// std::cout << "Rcv"<< table[i].first << " " << table[i].second << " " <<buffer[table[i].second]<<std::endl;
|
||||||
|
rhs._odata[table[i].first]=buffer[table[i].second];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,8 +214,7 @@ PARALLEL_NESTED_LOOP2
|
|||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
|
||||||
if(cbmask ==0x3 ) {
|
if(cbmask ==0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*rhs._grid->_slice_stride[dimension];
|
int o = n*rhs._grid->_slice_stride[dimension];
|
||||||
int offset = b+n*rhs._grid->_slice_block[dimension];
|
int offset = b+n*rhs._grid->_slice_block[dimension];
|
||||||
@ -222,7 +222,11 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(0); // think this is buggy FIXME
|
|
||||||
|
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||||
|
// Test_cshift_red_black code.
|
||||||
|
// std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
|
||||||
|
std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
|
||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*rhs._grid->_slice_stride[dimension];
|
int o = n*rhs._grid->_slice_stride[dimension];
|
||||||
@ -254,8 +258,7 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
|
|||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
int stride = rhs._grid->_slice_stride[dimension];
|
int stride = rhs._grid->_slice_stride[dimension];
|
||||||
if(cbmask == 0x3 ){
|
if(cbmask == 0x3 ){
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride+b;
|
int o =n*stride+b;
|
||||||
@ -264,8 +267,7 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride+b;
|
int o =n*stride+b;
|
||||||
@ -295,8 +297,8 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
|
|||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block [dimension];
|
int e2=rhs._grid->_slice_block [dimension];
|
||||||
int stride = rhs._grid->_slice_stride[dimension];
|
int stride = rhs._grid->_slice_stride[dimension];
|
||||||
PARALLEL_NESTED_LOOP2
|
|
||||||
for(int n=0;n<e1;n++){
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride;
|
int o =n*stride;
|
||||||
@ -338,8 +340,8 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
// Map to always positive shift modulo global full dimension.
|
// Map to always positive shift modulo global full dimension.
|
||||||
shift = (shift+fd)%fd;
|
shift = (shift+fd)%fd;
|
||||||
|
|
||||||
ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
|
|
||||||
// the permute type
|
// the permute type
|
||||||
|
ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
|
||||||
int permute_dim =grid->PermuteDim(dimension);
|
int permute_dim =grid->PermuteDim(dimension);
|
||||||
int permute_type=grid->PermuteType(dimension);
|
int permute_type=grid->PermuteType(dimension);
|
||||||
int permute_type_dist;
|
int permute_type_dist;
|
||||||
@ -348,7 +350,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
|
|
||||||
int o = 0;
|
int o = 0;
|
||||||
int bo = x * grid->_ostride[dimension];
|
int bo = x * grid->_ostride[dimension];
|
||||||
|
|
||||||
int cb= (cbmask==0x2)? Odd : Even;
|
int cb= (cbmask==0x2)? Odd : Even;
|
||||||
|
|
||||||
int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
|
int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
|
||||||
@ -361,9 +362,23 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
// wrap is whether sshift > rd.
|
// wrap is whether sshift > rd.
|
||||||
// num is sshift mod rd.
|
// num is sshift mod rd.
|
||||||
//
|
//
|
||||||
|
// shift 7
|
||||||
|
//
|
||||||
|
// XoXo YcYc
|
||||||
|
// oXoX cYcY
|
||||||
|
// XoXo YcYc
|
||||||
|
// oXoX cYcY
|
||||||
|
//
|
||||||
|
// sshift --
|
||||||
|
//
|
||||||
|
// XX YY ; 3
|
||||||
|
// XX YY ; 0
|
||||||
|
// XX YY ; 3
|
||||||
|
// XX YY ; 0
|
||||||
|
//
|
||||||
int permute_slice=0;
|
int permute_slice=0;
|
||||||
if(permute_dim){
|
if(permute_dim){
|
||||||
int wrap = sshift/rd;
|
int wrap = sshift/rd; wrap=wrap % ly;
|
||||||
int num = sshift%rd;
|
int num = sshift%rd;
|
||||||
|
|
||||||
if ( x< rd-num ) permute_slice=wrap;
|
if ( x< rd-num ) permute_slice=wrap;
|
||||||
@ -375,7 +390,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
} else {
|
} else {
|
||||||
permute_type_dist = permute_type;
|
permute_type_dist = permute_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
|
if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
|
||||||
|
@ -74,7 +74,6 @@ template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &r
|
|||||||
sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
|
sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
|
||||||
|
|
||||||
// std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
|
// std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
|
||||||
|
|
||||||
if ( sshift[0] == sshift[1] ) {
|
if ( sshift[0] == sshift[1] ) {
|
||||||
// std::cout << "Single pass Cshift_comms" <<std::endl;
|
// std::cout << "Single pass Cshift_comms" <<std::endl;
|
||||||
Cshift_comms(ret,rhs,dimension,shift,0x3);
|
Cshift_comms(ret,rhs,dimension,shift,0x3);
|
||||||
@ -154,10 +153,14 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
(void *)&recv_buf[0],
|
(void *)&recv_buf[0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
grid->Barrier();
|
||||||
// for(int i=0;i<words;i++){
|
/*
|
||||||
// std::cout << "SendRecv ["<<i<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << " 0x" << cbmask<<std::endl;
|
for(int i=0;i<send_buf.size();i++){
|
||||||
// }
|
assert(recv_buf.size()==buffer_size);
|
||||||
|
assert(send_buf.size()==buffer_size);
|
||||||
|
std::cout << "SendRecv_Cshift_comms ["<<i<<" "<< dimension<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << " 0x" << cbmask<<std::endl;
|
||||||
|
}
|
||||||
|
*/
|
||||||
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -243,7 +246,14 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
(void *)&recv_buf_extract[i][0],
|
(void *)&recv_buf_extract[i][0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
/*
|
||||||
|
for(int w=0;w<recv_buf_extract[i].size();w++){
|
||||||
|
assert(recv_buf_extract[i].size()==buffer_size);
|
||||||
|
assert(send_buf_extract[i].size()==buffer_size);
|
||||||
|
std::cout << "SendRecv_Cshift_comms ["<<w<<" "<< dimension<<"] recv "<<recv_buf_extract[i][w]<<" send " << send_buf_extract[nbr_lane][w] << cbmask<<std::endl;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
grid->Barrier();
|
||||||
rpointers[i] = &recv_buf_extract[i][0];
|
rpointers[i] = &recv_buf_extract[i][0];
|
||||||
} else {
|
} else {
|
||||||
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
||||||
|
@ -39,8 +39,7 @@ namespace Grid {
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -73,8 +71,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -89,8 +86,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -108,8 +104,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(lhs,ret);
|
conformable(lhs,ret);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs._odata[ss],&rhs);
|
mult(&tmp,&lhs._odata[ss],&rhs);
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -120,8 +115,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,lhs);
|
conformable(ret,lhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs._odata[ss],&rhs);
|
mac(&tmp,&lhs._odata[ss],&rhs);
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -132,8 +126,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,lhs);
|
conformable(ret,lhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs._odata[ss],&rhs);
|
sub(&tmp,&lhs._odata[ss],&rhs);
|
||||||
@ -147,8 +140,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(lhs,ret);
|
conformable(lhs,ret);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs._odata[ss],&rhs);
|
add(&tmp,&lhs._odata[ss],&rhs);
|
||||||
@ -166,8 +158,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs,&rhs._odata[ss]);
|
mult(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -182,8 +173,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs,&rhs._odata[ss]);
|
mac(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -198,8 +188,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs,&rhs._odata[ss]);
|
sub(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -213,8 +202,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs,&rhs._odata[ss]);
|
add(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -230,8 +218,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = x.checkerboard;
|
ret.checkerboard = x.checkerboard;
|
||||||
conformable(ret,x);
|
conformable(ret,x);
|
||||||
conformable(x,y);
|
conformable(x,y);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<x._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<x._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = a*x._odata[ss]+y._odata[ss];
|
vobj tmp = a*x._odata[ss]+y._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -245,8 +232,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = x.checkerboard;
|
ret.checkerboard = x.checkerboard;
|
||||||
conformable(ret,x);
|
conformable(ret,x);
|
||||||
conformable(x,y);
|
conformable(x,y);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<x._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<x._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = a*x._odata[ss]+b*y._odata[ss];
|
vobj tmp = a*x._odata[ss]+b*y._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
|
@ -121,8 +121,7 @@ public:
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -144,8 +143,7 @@ PARALLEL_FOR_LOOP
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -167,8 +165,7 @@ PARALLEL_FOR_LOOP
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
//vobj tmp = eval(ss,expr);
|
//vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,eval(ss,expr));
|
vstream(_odata[ss] ,eval(ss,expr));
|
||||||
@ -191,8 +188,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -213,8 +209,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -235,8 +230,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
vstream(_odata[ss] ,eval(ss,expr));
|
vstream(_odata[ss] ,eval(ss,expr));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -258,8 +252,7 @@ PARALLEL_FOR_LOOP
|
|||||||
_grid = r._grid;
|
_grid = r._grid;
|
||||||
checkerboard = r.checkerboard;
|
checkerboard = r.checkerboard;
|
||||||
_odata.resize(_grid->oSites());// essential
|
_odata.resize(_grid->oSites());// essential
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
_odata[ss]=r._odata[ss];
|
_odata[ss]=r._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -278,18 +271,17 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
|
|
||||||
template<class sobj> strong_inline Lattice<vobj> & operator = (const sobj & r){
|
template<class sobj> strong_inline Lattice<vobj> & operator = (const sobj & r){
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
this->_odata[ss]=r;
|
this->_odata[ss]=r;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class robj> strong_inline Lattice<vobj> & operator = (const Lattice<robj> & r){
|
template<class robj> strong_inline Lattice<vobj> & operator = (const Lattice<robj> & r){
|
||||||
this->checkerboard = r.checkerboard;
|
this->checkerboard = r.checkerboard;
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
this->_odata[ss]=r._odata[ss];
|
this->_odata[ss]=r._odata[ss];
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
@ -309,7 +301,7 @@ PARALLEL_FOR_LOOP
|
|||||||
*this = (*this)+r;
|
*this = (*this)+r;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
}; // class Lattice
|
}; // class Lattice
|
||||||
|
|
||||||
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
|
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
|
||||||
std::vector<int> gcoor;
|
std::vector<int> gcoor;
|
||||||
|
@ -47,8 +47,7 @@ namespace Grid {
|
|||||||
inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
|
inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(rhs._grid);
|
Lattice<vInteger> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
|
ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -60,8 +59,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
|
inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(lhs._grid);
|
Lattice<vInteger> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -73,8 +71,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
|
inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(rhs._grid);
|
Lattice<vInteger> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -168,6 +165,5 @@ PARALLEL_FOR_LOOP
|
|||||||
inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vne<lobj,robj>(),lhs,rhs);
|
return SLComparison(vne<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -43,8 +43,7 @@ namespace Grid {
|
|||||||
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
|
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
|
||||||
{
|
{
|
||||||
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
|
ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -55,8 +54,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
|
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
|
||||||
{
|
{
|
||||||
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
|
ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -68,13 +66,10 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
|
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
|
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
|
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -37,8 +37,7 @@ namespace Grid {
|
|||||||
inline Lattice<vobj> operator -(const Lattice<vobj> &r)
|
inline Lattice<vobj> operator -(const Lattice<vobj> &r)
|
||||||
{
|
{
|
||||||
Lattice<vobj> ret(r._grid);
|
Lattice<vobj> ret(r._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<r._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<r._grid->oSites();ss++){
|
|
||||||
vstream(ret._odata[ss], -r._odata[ss]);
|
vstream(ret._odata[ss], -r._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -74,8 +73,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
|
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss];
|
decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs*rhs._odata[ss];
|
// ret._odata[ss]=lhs*rhs._odata[ss];
|
||||||
@ -86,8 +84,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
|
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];
|
decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs+rhs._odata[ss];
|
// ret._odata[ss]=lhs+rhs._odata[ss];
|
||||||
@ -98,11 +95,9 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
|
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];
|
decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs-rhs._odata[ss];
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -110,8 +105,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
|
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
|
decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]*rhs;
|
// ret._odata[ss]=lhs._odata[ss]*rhs;
|
||||||
@ -122,8 +116,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
|
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs;
|
decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]+rhs;
|
// ret._odata[ss]=lhs._odata[ss]+rhs;
|
||||||
@ -134,15 +127,12 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
|
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
|
decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]-rhs;
|
// ret._odata[ss]=lhs._odata[ss]-rhs;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -44,8 +44,7 @@ namespace Grid {
|
|||||||
{
|
{
|
||||||
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
|
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
|
||||||
ret.checkerboard=lhs.checkerboard;
|
ret.checkerboard=lhs.checkerboard;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
|
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -55,8 +54,7 @@ PARALLEL_FOR_LOOP
|
|||||||
{
|
{
|
||||||
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
|
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
|
||||||
ret.checkerboard=lhs.checkerboard;
|
ret.checkerboard=lhs.checkerboard;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
|
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -68,16 +66,14 @@ PARALLEL_FOR_LOOP
|
|||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
|
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
|
||||||
{
|
{
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
|
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
|
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
|
||||||
{
|
{
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
|
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -131,9 +127,6 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
assert( l.checkerboard == l._grid->CheckerBoard(site));
|
assert( l.checkerboard == l._grid->CheckerBoard(site));
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// assert( sizeof(sobj)*Nsimd == sizeof(vobj));
|
|
||||||
|
|
||||||
int rank,odx,idx;
|
int rank,odx,idx;
|
||||||
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
|
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
|
||||||
|
|
||||||
|
@ -40,8 +40,7 @@ namespace Grid {
|
|||||||
|
|
||||||
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = adj(lhs._odata[ss]);
|
ret._odata[ss] = adj(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -49,13 +48,10 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = conjugate(lhs._odata[ss]);
|
ret._odata[ss] = conjugate(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -38,14 +38,14 @@ namespace Grid {
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Deterministic Reduction operations
|
// Deterministic Reduction operations
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
template <class vobj>
|
template <class vobj>
|
||||||
inline RealD norm2(const Lattice<vobj> &arg) {
|
inline RealD norm2(const Lattice<vobj> &arg) {
|
||||||
ComplexD nrm = innerProduct(arg, arg);
|
ComplexD nrm = innerProduct(arg, arg);
|
||||||
return std::real(nrm);
|
return std::real(nrm);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class vobj>
|
template <class vobj>
|
||||||
inline ComplexD innerProduct(const Lattice<vobj> &left,
|
inline ComplexD innerProduct(const Lattice<vobj> &left,
|
||||||
const Lattice<vobj> &right) {
|
const Lattice<vobj> &right) {
|
||||||
typedef typename vobj::scalar_type scalar_type;
|
typedef typename vobj::scalar_type scalar_type;
|
||||||
typedef typename vobj::vector_type vector_type;
|
typedef typename vobj::vector_type vector_type;
|
||||||
@ -53,40 +53,35 @@ inline ComplexD innerProduct(const Lattice<vobj> &left,
|
|||||||
|
|
||||||
GridBase *grid = left._grid;
|
GridBase *grid = left._grid;
|
||||||
|
|
||||||
std::vector<vector_type, alignedAllocator<vector_type> > sumarray(
|
std::vector<vector_type, alignedAllocator<vector_type> > sumarray(grid->SumArraySize());
|
||||||
grid->SumArraySize());
|
|
||||||
for (int i = 0; i < grid->SumArraySize(); i++) {
|
for (int i = 0; i < grid->SumArraySize(); i++) {
|
||||||
sumarray[i] = zero;
|
sumarray[i] = zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
|
||||||
for (int thr = 0; thr < grid->SumArraySize(); thr++) {
|
|
||||||
int nwork, mywork, myoff;
|
int nwork, mywork, myoff;
|
||||||
GridThread::GetWork(left._grid->oSites(), thr, mywork, myoff);
|
GridThread::GetWork(left._grid->oSites(), thr, mywork, myoff);
|
||||||
|
|
||||||
decltype(innerProduct(left._odata[0], right._odata[0])) vnrm =
|
decltype(innerProduct(left._odata[0], right._odata[0])) vnrm=zero; // private to thread; sub summation
|
||||||
zero; // private to thread; sub summation
|
for(int ss = myoff; ss<mywork + myoff; ss++){
|
||||||
for (int ss = myoff; ss < mywork + myoff; ss++) {
|
vnrm = vnrm + innerProduct(left._odata[ss],right._odata[ss]);
|
||||||
vnrm = vnrm + innerProduct(left._odata[ss], right._odata[ss]);
|
|
||||||
}
|
}
|
||||||
sumarray[thr] = TensorRemove(vnrm);
|
sumarray[thr]=TensorRemove(vnrm) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
vector_type vvnrm;
|
vector_type vvnrm;
|
||||||
vvnrm = zero; // sum across threads
|
vvnrm=zero; // sum across threads
|
||||||
for (int i = 0; i < grid->SumArraySize(); i++) {
|
for(int i=0; i < grid->SumArraySize(); i++){
|
||||||
vvnrm = vvnrm + sumarray[i];
|
vvnrm = vvnrm + sumarray[i];
|
||||||
}
|
}
|
||||||
nrm = Reduce(vvnrm); // sum across simd
|
nrm = Reduce(vvnrm);// sum across simd
|
||||||
right._grid->GlobalSum(nrm);
|
right._grid->GlobalSum(nrm);
|
||||||
return nrm;
|
return nrm;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Op, class T1>
|
template <class Op, class T1>
|
||||||
inline auto sum(const LatticeUnaryExpression<Op, T1> &expr) ->
|
inline auto sum(const LatticeUnaryExpression<Op, T1> &expr) ->
|
||||||
typename decltype(
|
typename decltype(expr.first.func(eval(0, std::get<0>(expr.second))))::scalar_object {
|
||||||
expr.first.func(eval(0, std::get<0>(expr.second))))::scalar_object {
|
|
||||||
return sum(closure(expr));
|
return sum(closure(expr));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -119,8 +114,7 @@ inline auto sum(const LatticeUnaryExpression<Op, T1> &expr) ->
|
|||||||
sumarray[i]=zero;
|
sumarray[i]=zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
|
||||||
for(int thr=0;thr<grid->SumArraySize();thr++){
|
|
||||||
int nwork, mywork, myoff;
|
int nwork, mywork, myoff;
|
||||||
GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
|
GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
|
||||||
|
|
||||||
@ -150,8 +144,8 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
GridBase *grid = Data._grid;
|
GridBase *grid = Data._grid;
|
||||||
assert(grid!=NULL);
|
assert(grid!=NULL);
|
||||||
@ -222,7 +216,7 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
|
|||||||
result[t]=gsum;
|
result[t]=gsum;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,19 @@
|
|||||||
#define GRID_LATTICE_RNG_H
|
#define GRID_LATTICE_RNG_H
|
||||||
|
|
||||||
#include <random>
|
#include <random>
|
||||||
|
|
||||||
|
#ifdef RNG_SITMO
|
||||||
#include <Grid/sitmo_rng/sitmo_prng_engine.hpp>
|
#include <Grid/sitmo_rng/sitmo_prng_engine.hpp>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(RNG_SITMO)
|
||||||
|
#define RNG_FAST_DISCARD
|
||||||
|
#else
|
||||||
|
#undef RNG_FAST_DISCARD
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
// Allow the RNG state to be less dense than the fine grid
|
// Allow the RNG state to be less dense than the fine grid
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
@ -64,12 +72,15 @@ namespace Grid {
|
|||||||
|
|
||||||
multiplicity = multiplicity *fine->_rdimensions[fd] / coarse->_rdimensions[d];
|
multiplicity = multiplicity *fine->_rdimensions[fd] / coarse->_rdimensions[d];
|
||||||
}
|
}
|
||||||
|
|
||||||
return multiplicity;
|
return multiplicity;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// merge of April 11 2017
|
||||||
|
//<<<<<<< HEAD
|
||||||
|
|
||||||
|
|
||||||
|
// this function is necessary for the LS vectorised field
|
||||||
inline int RNGfillable_general(GridBase *coarse,GridBase *fine)
|
inline int RNGfillable_general(GridBase *coarse,GridBase *fine)
|
||||||
{
|
{
|
||||||
int rngdims = coarse->_ndimension;
|
int rngdims = coarse->_ndimension;
|
||||||
@ -92,6 +103,7 @@ namespace Grid {
|
|||||||
return fine->lSites() / coarse->lSites();
|
return fine->lSites() / coarse->lSites();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
// Wrap seed_seq to give common interface with random_device
|
// Wrap seed_seq to give common interface with random_device
|
||||||
class fixedSeed {
|
class fixedSeed {
|
||||||
public:
|
public:
|
||||||
@ -108,89 +120,140 @@ namespace Grid {
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
=======
|
||||||
|
>>>>>>> develop
|
||||||
|
*/
|
||||||
|
|
||||||
// real scalars are one component
|
// real scalars are one component
|
||||||
template<class scalar,class distribution,class generator> void fillScalar(scalar &s,distribution &dist,generator & gen)
|
template<class scalar,class distribution,class generator>
|
||||||
|
void fillScalar(scalar &s,distribution &dist,generator & gen)
|
||||||
{
|
{
|
||||||
s=dist(gen);
|
s=dist(gen);
|
||||||
}
|
}
|
||||||
template<class distribution,class generator> void fillScalar(ComplexF &s,distribution &dist, generator &gen)
|
template<class distribution,class generator>
|
||||||
|
void fillScalar(ComplexF &s,distribution &dist, generator &gen)
|
||||||
{
|
{
|
||||||
s=ComplexF(dist(gen),dist(gen));
|
s=ComplexF(dist(gen),dist(gen));
|
||||||
}
|
}
|
||||||
template<class distribution,class generator> void fillScalar(ComplexD &s,distribution &dist,generator &gen)
|
template<class distribution,class generator>
|
||||||
|
void fillScalar(ComplexD &s,distribution &dist,generator &gen)
|
||||||
{
|
{
|
||||||
s=ComplexD(dist(gen),dist(gen));
|
s=ComplexD(dist(gen),dist(gen));
|
||||||
}
|
}
|
||||||
|
|
||||||
class GridRNGbase {
|
class GridRNGbase {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
int _seeded;
|
|
||||||
// One generator per site.
|
// One generator per site.
|
||||||
// Uniform and Gaussian distributions from these generators.
|
// Uniform and Gaussian distributions from these generators.
|
||||||
#ifdef RNG_RANLUX
|
#ifdef RNG_RANLUX
|
||||||
typedef uint64_t RngStateType;
|
|
||||||
typedef std::ranlux48 RngEngine;
|
typedef std::ranlux48 RngEngine;
|
||||||
|
typedef uint64_t RngStateType;
|
||||||
static const int RngStateCount = 15;
|
static const int RngStateCount = 15;
|
||||||
#elif RNG_MT19937
|
#endif
|
||||||
|
#ifdef RNG_MT19937
|
||||||
typedef std::mt19937 RngEngine;
|
typedef std::mt19937 RngEngine;
|
||||||
typedef uint32_t RngStateType;
|
typedef uint32_t RngStateType;
|
||||||
static const int RngStateCount = std::mt19937::state_size;
|
static const int RngStateCount = std::mt19937::state_size;
|
||||||
#elif RNG_SITMO
|
#endif
|
||||||
|
#ifdef RNG_SITMO
|
||||||
typedef sitmo::prng_engine RngEngine;
|
typedef sitmo::prng_engine RngEngine;
|
||||||
typedef uint64_t RngStateType;
|
typedef uint64_t RngStateType;
|
||||||
static const int RngStateCount = 4;
|
static const int RngStateCount = 4;
|
||||||
#endif
|
#endif
|
||||||
std::vector<RngEngine> _generators;
|
|
||||||
std::vector<std::uniform_real_distribution<RealD>> _uniform;
|
|
||||||
std::vector<std::normal_distribution<RealD>> _gaussian;
|
|
||||||
std::vector<std::discrete_distribution<int32_t>> _bernoulli;
|
|
||||||
|
|
||||||
void GetState(std::vector<RngStateType> & saved,int gen) {
|
std::vector<RngEngine> _generators;
|
||||||
|
std::vector<std::uniform_real_distribution<RealD> > _uniform;
|
||||||
|
std::vector<std::normal_distribution<RealD> > _gaussian;
|
||||||
|
std::vector<std::discrete_distribution<int32_t> > _bernoulli;
|
||||||
|
std::vector<std::uniform_int_distribution<uint32_t> > _uid;
|
||||||
|
|
||||||
|
///////////////////////
|
||||||
|
// support for parallel init
|
||||||
|
///////////////////////
|
||||||
|
#ifdef RNG_FAST_DISCARD
|
||||||
|
static void Skip(RngEngine &eng)
|
||||||
|
{
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Skip by 2^40 elements between successive lattice sites
|
||||||
|
// This goes by 10^12.
|
||||||
|
// Consider quenched updating; likely never exceeding rate of 1000 sweeps
|
||||||
|
// per second on any machine. This gives us of order 10^9 seconds, or 100 years
|
||||||
|
// skip ahead.
|
||||||
|
// For HMC unlikely to go at faster than a solve per second, and
|
||||||
|
// tens of seconds per trajectory so this is clean in all reasonable cases,
|
||||||
|
// and margin of safety is orders of magnitude.
|
||||||
|
// We could hack Sitmo to skip in the higher order words of state if necessary
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
uint64_t skip = 0x1; skip = skip<<40;
|
||||||
|
eng.discard(skip);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
static RngEngine Reseed(RngEngine &eng)
|
||||||
|
{
|
||||||
|
std::vector<uint32_t> newseed;
|
||||||
|
std::uniform_int_distribution<uint32_t> uid;
|
||||||
|
return Reseed(eng,newseed,uid);
|
||||||
|
}
|
||||||
|
static RngEngine Reseed(RngEngine &eng,std::vector<uint32_t> & newseed,
|
||||||
|
std::uniform_int_distribution<uint32_t> &uid)
|
||||||
|
{
|
||||||
|
const int reseeds=4;
|
||||||
|
|
||||||
|
newseed.resize(reseeds);
|
||||||
|
for(int i=0;i<reseeds;i++){
|
||||||
|
newseed[i] = uid(eng);
|
||||||
|
}
|
||||||
|
std::seed_seq sseq(newseed.begin(),newseed.end());
|
||||||
|
return RngEngine(sseq);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GetState(std::vector<RngStateType> & saved,RngEngine &eng) {
|
||||||
saved.resize(RngStateCount);
|
saved.resize(RngStateCount);
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
ss<<_generators[gen];
|
ss<<eng;
|
||||||
ss.seekg(0,ss.beg);
|
ss.seekg(0,ss.beg);
|
||||||
for(int i=0;i<RngStateCount;i++){
|
for(int i=0;i<RngStateCount;i++){
|
||||||
ss>>saved[i];
|
ss>>saved[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void SetState(std::vector<RngStateType> & saved,int gen){
|
void GetState(std::vector<RngStateType> & saved,int gen) {
|
||||||
|
GetState(saved,_generators[gen]);
|
||||||
|
}
|
||||||
|
void SetState(std::vector<RngStateType> & saved,RngEngine &eng){
|
||||||
assert(saved.size()==RngStateCount);
|
assert(saved.size()==RngStateCount);
|
||||||
std::stringstream ss;
|
std::stringstream ss;
|
||||||
for(int i=0;i<RngStateCount;i++){
|
for(int i=0;i<RngStateCount;i++){
|
||||||
ss<< saved[i]<<" ";
|
ss<< saved[i]<<" ";
|
||||||
}
|
}
|
||||||
ss.seekg(0,ss.beg);
|
ss.seekg(0,ss.beg);
|
||||||
ss>>_generators[gen];
|
ss>>eng;
|
||||||
|
}
|
||||||
|
void SetState(std::vector<RngStateType> & saved,int gen){
|
||||||
|
SetState(saved,_generators[gen]);
|
||||||
|
}
|
||||||
|
void SetEngine(RngEngine &Eng, int gen){
|
||||||
|
_generators[gen]=Eng;
|
||||||
|
}
|
||||||
|
void GetEngine(RngEngine &Eng, int gen){
|
||||||
|
Eng=_generators[gen];
|
||||||
|
}
|
||||||
|
template<class source> void Seed(source &src, int gen)
|
||||||
|
{
|
||||||
|
_generators[gen] = RngEngine(src);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class GridSerialRNG : public GridRNGbase {
|
class GridSerialRNG : public GridRNGbase {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// FIXME ... do we require lockstep draws of randoms
|
|
||||||
// from all nodes keeping seeds consistent.
|
|
||||||
// place a barrier/broadcast in the fill routine
|
|
||||||
template<class source> void Seed(source &src)
|
|
||||||
{
|
|
||||||
typename source::result_type init = src();
|
|
||||||
CartesianCommunicator::BroadcastWorld(0,(void *)&init,sizeof(init));
|
|
||||||
_generators[0] = RngEngine(init);
|
|
||||||
_seeded=1;
|
|
||||||
}
|
|
||||||
|
|
||||||
GridSerialRNG() : GridRNGbase() {
|
GridSerialRNG() : GridRNGbase() {
|
||||||
_generators.resize(1);
|
_generators.resize(1);
|
||||||
_uniform.resize(1,std::uniform_real_distribution<RealD>{0,1});
|
_uniform.resize(1,std::uniform_real_distribution<RealD>{0,1});
|
||||||
_gaussian.resize(1,std::normal_distribution<RealD>(0.0,1.0) );
|
_gaussian.resize(1,std::normal_distribution<RealD>(0.0,1.0) );
|
||||||
_bernoulli.resize(1,std::discrete_distribution<int32_t>{1,1});
|
_bernoulli.resize(1,std::discrete_distribution<int32_t>{1,1});
|
||||||
_seeded=0;
|
_uid.resize(1,std::uniform_int_distribution<uint32_t>() );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template <class sobj,class distribution> inline void fill(sobj &l,std::vector<distribution> &dist){
|
template <class sobj,class distribution> inline void fill(sobj &l,std::vector<distribution> &dist){
|
||||||
|
|
||||||
typedef typename sobj::scalar_type scalar_type;
|
typedef typename sobj::scalar_type scalar_type;
|
||||||
@ -262,16 +325,11 @@ namespace Grid {
|
|||||||
CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l));
|
CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SeedRandomDevice(void){
|
|
||||||
std::random_device rd;
|
|
||||||
Seed(rd);
|
|
||||||
}
|
|
||||||
void SeedFixedIntegers(const std::vector<int> &seeds){
|
void SeedFixedIntegers(const std::vector<int> &seeds){
|
||||||
fixedSeed src(seeds);
|
CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
|
||||||
Seed(src);
|
std::seed_seq src(seeds.begin(),seeds.end());
|
||||||
|
Seed(src,0);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class GridParallelRNG : public GridRNGbase {
|
class GridParallelRNG : public GridRNGbase {
|
||||||
@ -279,7 +337,6 @@ namespace Grid {
|
|||||||
double _time_counter;
|
double _time_counter;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
GridBase *_grid;
|
GridBase *_grid;
|
||||||
unsigned int _vol;
|
unsigned int _vol;
|
||||||
|
|
||||||
@ -295,61 +352,11 @@ namespace Grid {
|
|||||||
_uniform.resize(_vol,std::uniform_real_distribution<RealD>{0,1});
|
_uniform.resize(_vol,std::uniform_real_distribution<RealD>{0,1});
|
||||||
_gaussian.resize(_vol,std::normal_distribution<RealD>(0.0,1.0) );
|
_gaussian.resize(_vol,std::normal_distribution<RealD>(0.0,1.0) );
|
||||||
_bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
|
_bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
|
||||||
_seeded = 0;
|
_uid.resize(_vol,std::uniform_int_distribution<uint32_t>() );
|
||||||
|
|
||||||
_time_counter = 0.0;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class vobj,class distribution> inline void fill(Lattice<vobj> &l,std::vector<distribution> &dist){
|
||||||
|
|
||||||
// This loop could be made faster to avoid the Ahmdahl by
|
|
||||||
// i) seed generators on each timeslice, for x=y=z=0;
|
|
||||||
// ii) seed generators on each z for x=y=0
|
|
||||||
// iii)seed generators on each y,z for x=0
|
|
||||||
// iv) seed generators on each y,z,x
|
|
||||||
// made possible by physical indexing.
|
|
||||||
template<class source> void Seed(source &src)
|
|
||||||
{
|
|
||||||
std::vector<int> gcoor;
|
|
||||||
|
|
||||||
int gsites = _grid->_gsites;
|
|
||||||
|
|
||||||
typename source::result_type init = src();
|
|
||||||
RngEngine pseeder(init);
|
|
||||||
std::uniform_int_distribution<uint64_t> ui;
|
|
||||||
|
|
||||||
for(int gidx=0;gidx<gsites;gidx++){
|
|
||||||
|
|
||||||
int rank,o_idx,i_idx;
|
|
||||||
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
|
||||||
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
|
||||||
|
|
||||||
int l_idx=generator_idx(o_idx,i_idx);
|
|
||||||
|
|
||||||
const int num_rand_seed=16;
|
|
||||||
std::vector<int> site_seeds(num_rand_seed);
|
|
||||||
for(int i=0;i<site_seeds.size();i++){
|
|
||||||
site_seeds[i]= ui(pseeder);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
_grid->Broadcast(0,(void *)&site_seeds[0],sizeof(int)*site_seeds.size());
|
|
||||||
|
|
||||||
if( rank == _grid->ThisRank() ){
|
|
||||||
fixedSeed ssrc(site_seeds);
|
|
||||||
typename source::result_type sinit = ssrc();
|
|
||||||
_generators[l_idx] = RngEngine(sinit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_seeded=1;
|
|
||||||
}
|
|
||||||
|
|
||||||
//FIXME implement generic IO and create state save/restore
|
|
||||||
//void SaveState(const std::string<char> &file);
|
|
||||||
//void LoadState(const std::string<char> &file);
|
|
||||||
|
|
||||||
template <class vobj, class distribution>
|
|
||||||
inline void fill(Lattice<vobj> &l, std::vector<distribution> &dist) {
|
|
||||||
typedef typename vobj::scalar_object scalar_object;
|
typedef typename vobj::scalar_object scalar_object;
|
||||||
typedef typename vobj::scalar_type scalar_type;
|
typedef typename vobj::scalar_type scalar_type;
|
||||||
typedef typename vobj::vector_type vector_type;
|
typedef typename vobj::vector_type vector_type;
|
||||||
@ -357,14 +364,11 @@ namespace Grid {
|
|||||||
double inner_time_counter = usecond();
|
double inner_time_counter = usecond();
|
||||||
|
|
||||||
int multiplicity = RNGfillable_general(_grid, l._grid); // l has finer or same grid
|
int multiplicity = RNGfillable_general(_grid, l._grid); // l has finer or same grid
|
||||||
|
int Nsimd = _grid->Nsimd(); // guaranteed to be the same for l._grid too
|
||||||
int Nsimd = _grid->Nsimd();// guaranteed to be the same for l._grid too
|
int osites = _grid->oSites(); // guaranteed to be <= l._grid->oSites() by a factor multiplicity
|
||||||
int osites = _grid->oSites();// guaranteed to be <= l._grid->oSites() by a factor multiplicity
|
|
||||||
int words = sizeof(scalar_object) / sizeof(scalar_type);
|
int words = sizeof(scalar_object) / sizeof(scalar_type);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<osites;ss++){
|
||||||
for (int ss = 0; ss < osites; ss++) {
|
|
||||||
|
|
||||||
std::vector<scalar_object> buf(Nsimd);
|
std::vector<scalar_object> buf(Nsimd);
|
||||||
for (int m = 0; m < multiplicity; m++) { // Draw from same generator multiplicity times
|
for (int m = 0; m < multiplicity; m++) { // Draw from same generator multiplicity times
|
||||||
|
|
||||||
@ -386,13 +390,79 @@ namespace Grid {
|
|||||||
_time_counter += usecond()- inner_time_counter;
|
_time_counter += usecond()- inner_time_counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
void SeedRandomDevice(void) {
|
void SeedFixedIntegers(const std::vector<int> &seeds){
|
||||||
std::random_device rd;
|
|
||||||
Seed(rd);
|
// Everyone generates the same seed_seq based on input seeds
|
||||||
|
CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
|
||||||
|
|
||||||
|
std::seed_seq source(seeds.begin(),seeds.end());
|
||||||
|
|
||||||
|
RngEngine master_engine(source);
|
||||||
|
|
||||||
|
#ifdef RNG_FAST_DISCARD
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
// Skip ahead through a single stream.
|
||||||
|
// Applicable to SITMO and other has based/crypto RNGs
|
||||||
|
// Should be applicable to Mersenne Twister, but the C++11
|
||||||
|
// MT implementation does not implement fast discard even though
|
||||||
|
// in principle this is possible
|
||||||
|
////////////////////////////////////////////////
|
||||||
|
std::vector<int> gcoor;
|
||||||
|
int rank,o_idx,i_idx;
|
||||||
|
|
||||||
|
// Everybody loops over global volume.
|
||||||
|
for(int gidx=0;gidx<_grid->_gsites;gidx++){
|
||||||
|
|
||||||
|
Skip(master_engine); // Skip to next RNG sequence
|
||||||
|
|
||||||
|
// Where is it?
|
||||||
|
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||||
|
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||||
|
|
||||||
|
// If this is one of mine we take it
|
||||||
|
if( rank == _grid->ThisRank() ){
|
||||||
|
int l_idx=generator_idx(o_idx,i_idx);
|
||||||
|
_generators[l_idx] = master_engine;
|
||||||
}
|
}
|
||||||
void SeedFixedIntegers(const std::vector<int> &seeds) {
|
|
||||||
fixedSeed src(seeds);
|
}
|
||||||
Seed(src);
|
#else
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Machine and thread decomposition dependent seeding is efficient
|
||||||
|
// and maximally parallel; but NOT reproducible from machine to machine.
|
||||||
|
// Not ideal, but fastest way to reseed all nodes.
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
{
|
||||||
|
// Obtain one Reseed per processor
|
||||||
|
int Nproc = _grid->ProcessorCount();
|
||||||
|
std::vector<RngEngine> seeders(Nproc);
|
||||||
|
int me= _grid->ThisRank();
|
||||||
|
for(int p=0;p<Nproc;p++){
|
||||||
|
seeders[p] = Reseed(master_engine);
|
||||||
|
}
|
||||||
|
master_engine = seeders[me];
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Obtain one reseeded generator per thread
|
||||||
|
int Nthread = GridThread::GetThreads();
|
||||||
|
std::vector<RngEngine> seeders(Nthread);
|
||||||
|
for(int t=0;t<Nthread;t++){
|
||||||
|
seeders[t] = Reseed(master_engine);
|
||||||
|
}
|
||||||
|
|
||||||
|
parallel_for(int t=0;t<Nthread;t++) {
|
||||||
|
// set up one per local site in threaded fashion
|
||||||
|
std::vector<uint32_t> newseeds;
|
||||||
|
std::uniform_int_distribution<uint32_t> uid;
|
||||||
|
for(int l=0;l<_grid->lSites();l++) {
|
||||||
|
if ( (l%Nthread)==t ) {
|
||||||
|
_generators[l] = Reseed(seeders[t],newseeds,uid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void Report(){
|
void Report(){
|
||||||
@ -400,31 +470,39 @@ namespace Grid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Support for rigorous test of RNG's
|
||||||
|
// Return uniform random uint32_t from requested site generator
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
uint32_t GlobalU01(int gsite){
|
||||||
|
|
||||||
|
uint32_t the_number;
|
||||||
|
// who
|
||||||
|
std::vector<int> gcoor;
|
||||||
|
int rank,o_idx,i_idx;
|
||||||
|
_grid->GlobalIndexToGlobalCoor(gsite,gcoor);
|
||||||
|
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||||
|
|
||||||
|
// draw
|
||||||
|
int l_idx=generator_idx(o_idx,i_idx);
|
||||||
|
if( rank == _grid->ThisRank() ){
|
||||||
|
the_number = _uid[l_idx](_generators[l_idx]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// share & return
|
||||||
|
_grid->Broadcast(rank,(void *)&the_number,sizeof(the_number));
|
||||||
|
return the_number;
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class vobj> inline void random(GridParallelRNG &rng,Lattice<vobj> &l){
|
template <class vobj> inline void random(GridParallelRNG &rng,Lattice<vobj> &l) { rng.fill(l,rng._uniform); }
|
||||||
rng.fill(l,rng._uniform);
|
template <class vobj> inline void gaussian(GridParallelRNG &rng,Lattice<vobj> &l) { rng.fill(l,rng._gaussian); }
|
||||||
}
|
template <class vobj> inline void bernoulli(GridParallelRNG &rng,Lattice<vobj> &l){ rng.fill(l,rng._bernoulli);}
|
||||||
|
|
||||||
template <class vobj> inline void gaussian(GridParallelRNG &rng,Lattice<vobj> &l){
|
template <class sobj> inline void random(GridSerialRNG &rng,sobj &l) { rng.fill(l,rng._uniform ); }
|
||||||
rng.fill(l,rng._gaussian);
|
template <class sobj> inline void gaussian(GridSerialRNG &rng,sobj &l) { rng.fill(l,rng._gaussian ); }
|
||||||
}
|
template <class sobj> inline void bernoulli(GridSerialRNG &rng,sobj &l){ rng.fill(l,rng._bernoulli); }
|
||||||
|
|
||||||
template <class vobj> inline void bernoulli(GridParallelRNG &rng,Lattice<vobj> &l){
|
|
||||||
rng.fill(l,rng._bernoulli);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class sobj> inline void random(GridSerialRNG &rng,sobj &l){
|
|
||||||
rng.fill(l,rng._uniform);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class sobj> inline void gaussian(GridSerialRNG &rng,sobj &l){
|
|
||||||
rng.fill(l,rng._gaussian);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class sobj> inline void bernoulli(GridSerialRNG &rng,sobj &l){
|
|
||||||
rng.fill(l,rng._bernoulli);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -42,8 +42,7 @@ namespace Grid {
|
|||||||
-> Lattice<decltype(trace(lhs._odata[0]))>
|
-> Lattice<decltype(trace(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = trace(lhs._odata[ss]);
|
ret._odata[ss] = trace(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
|
inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
|
ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -51,7 +51,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
|||||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
||||||
half.checkerboard = cb;
|
half.checkerboard = cb;
|
||||||
int ssh=0;
|
int ssh=0;
|
||||||
//PARALLEL_FOR_LOOP
|
//parallel_for
|
||||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||||
std::vector<int> coor;
|
std::vector<int> coor;
|
||||||
int cbos;
|
int cbos;
|
||||||
@ -68,7 +68,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
|||||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
||||||
int cb = half.checkerboard;
|
int cb = half.checkerboard;
|
||||||
int ssh=0;
|
int ssh=0;
|
||||||
//PARALLEL_FOR_LOOP
|
//parallel_for
|
||||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||||
std::vector<int> coor;
|
std::vector<int> coor;
|
||||||
int cbos;
|
int cbos;
|
||||||
@ -153,8 +153,7 @@ inline void blockZAXPY(Lattice<vobj> &fineZ,
|
|||||||
assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int sf=0;sf<fine->oSites();sf++){
|
||||||
for(int sf=0;sf<fine->oSites();sf++){
|
|
||||||
|
|
||||||
int sc;
|
int sc;
|
||||||
std::vector<int> coor_c(_ndimension);
|
std::vector<int> coor_c(_ndimension);
|
||||||
@ -186,8 +185,7 @@ template<class vobj,class CComplex>
|
|||||||
|
|
||||||
fine_inner = localInnerProduct(fineX,fineY);
|
fine_inner = localInnerProduct(fineX,fineY);
|
||||||
blockSum(coarse_inner,fine_inner);
|
blockSum(coarse_inner,fine_inner);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<coarse->oSites();ss++){
|
||||||
for(int ss=0;ss<coarse->oSites();ss++){
|
|
||||||
CoarseInner._odata[ss] = coarse_inner._odata[ss];
|
CoarseInner._odata[ss] = coarse_inner._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -333,9 +331,6 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
|
|||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
typedef typename vvobj::scalar_object ssobj;
|
typedef typename vvobj::scalar_object ssobj;
|
||||||
|
|
||||||
sobj s;
|
|
||||||
ssobj ss;
|
|
||||||
|
|
||||||
GridBase *ig = in._grid;
|
GridBase *ig = in._grid;
|
||||||
GridBase *og = out._grid;
|
GridBase *og = out._grid;
|
||||||
|
|
||||||
@ -347,10 +342,13 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
|
|||||||
for(int d=0;d<no;d++){
|
for(int d=0;d<no;d++){
|
||||||
assert(ig->_processors[d] == og->_processors[d]);
|
assert(ig->_processors[d] == og->_processors[d]);
|
||||||
assert(ig->_ldimensions[d] == og->_ldimensions[d]);
|
assert(ig->_ldimensions[d] == og->_ldimensions[d]);
|
||||||
|
assert(ig->lSites() == og->lSites());
|
||||||
}
|
}
|
||||||
|
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<ig->lSites();idx++){
|
||||||
for(int idx=0;idx<ig->lSites();idx++){
|
sobj s;
|
||||||
|
ssobj ss;
|
||||||
|
|
||||||
std::vector<int> lcoor(ni);
|
std::vector<int> lcoor(ni);
|
||||||
ig->LocalIndexToLocalCoor(idx,lcoor);
|
ig->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
peekLocalSite(s,in,lcoor);
|
peekLocalSite(s,in,lcoor);
|
||||||
@ -364,7 +362,6 @@ template<class vobj>
|
|||||||
void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
|
void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -386,17 +383,16 @@ void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
// Guido: check the threading here
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
//PARALLEL_FOR_LOOP
|
sobj s;
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
dl=0;
|
int ddl=0;
|
||||||
hcoor[orthog] = slice;
|
hcoor[orthog] = slice;
|
||||||
for(int d=0;d<nh;d++){
|
for(int d=0;d<nh;d++){
|
||||||
if ( d!=orthog ) {
|
if ( d!=orthog ) {
|
||||||
hcoor[d]=lcoor[dl++];
|
hcoor[d]=lcoor[ddl++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
peekLocalSite(s,lowDim,lcoor);
|
peekLocalSite(s,lowDim,lcoor);
|
||||||
@ -408,7 +404,6 @@ template<class vobj>
|
|||||||
void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, int orthog)
|
void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -429,16 +424,16 @@ void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, in
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
dl=0;
|
int ddl=0;
|
||||||
hcoor[orthog] = slice;
|
hcoor[orthog] = slice;
|
||||||
for(int d=0;d<nh;d++){
|
for(int d=0;d<nh;d++){
|
||||||
if ( d!=orthog ) {
|
if ( d!=orthog ) {
|
||||||
hcoor[d]=lcoor[dl++];
|
hcoor[d]=lcoor[ddl++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
peekLocalSite(s,higherDim,hcoor);
|
peekLocalSite(s,higherDim,hcoor);
|
||||||
@ -452,7 +447,6 @@ template<class vobj>
|
|||||||
void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -469,8 +463,8 @@ void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
@ -488,7 +482,6 @@ template<class vobj>
|
|||||||
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -505,8 +498,8 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slic
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
@ -574,8 +567,7 @@ typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>
|
|||||||
in_grid->iCoorFromIindex(in_icoor[lane], lane);
|
in_grid->iCoorFromIindex(in_icoor[lane], lane);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
|
||||||
for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
|
|
||||||
//Assemble vector of pointers to output elements
|
//Assemble vector of pointers to output elements
|
||||||
std::vector<sobj*> out_ptrs(in_nsimd);
|
std::vector<sobj*> out_ptrs(in_nsimd);
|
||||||
|
|
||||||
@ -623,8 +615,7 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
|||||||
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
||||||
unvectorizeToLexOrdArray(in_slex_conv, in);
|
unvectorizeToLexOrdArray(in_slex_conv, in);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
|
||||||
for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
|
|
||||||
std::vector<int> out_ocoor(ndim);
|
std::vector<int> out_ocoor(ndim);
|
||||||
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
||||||
|
|
||||||
@ -643,9 +634,5 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -41,8 +41,7 @@ namespace Grid {
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
|
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = transpose(lhs._odata[ss]);
|
ret._odata[ss] = transpose(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -55,12 +54,10 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
|
inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
|
ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -37,8 +37,7 @@ namespace Grid {
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=pow(rhs._odata[ss],y);
|
ret._odata[ss]=pow(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -47,8 +46,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=mod(rhs._odata[ss],y);
|
ret._odata[ss]=mod(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -58,8 +56,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=div(rhs._odata[ss],y);
|
ret._odata[ss]=div(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -69,8 +66,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp);
|
ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -56,8 +56,7 @@ inline void whereWolf(Lattice<vobj> &ret,const Lattice<iobj> &predicate,Lattice<
|
|||||||
std::vector<scalar_object> truevals (Nsimd);
|
std::vector<scalar_object> truevals (Nsimd);
|
||||||
std::vector<scalar_object> falsevals(Nsimd);
|
std::vector<scalar_object> falsevals(Nsimd);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<iftrue._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<iftrue._grid->oSites(); ss++){
|
|
||||||
|
|
||||||
extract(iftrue._odata[ss] ,truevals);
|
extract(iftrue._odata[ss] ,truevals);
|
||||||
extract(iffalse._odata[ss] ,falsevals);
|
extract(iffalse._odata[ss] ,falsevals);
|
||||||
|
@ -29,9 +29,10 @@ See the full license in the file "LICENSE" in the top level distribution
|
|||||||
directory
|
directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#include <cxxabi.h>
|
#include <cxxabi.h>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
@ -30,42 +30,34 @@
|
|||||||
#define GRID_BINARY_IO_H
|
#define GRID_BINARY_IO_H
|
||||||
|
|
||||||
|
|
||||||
|
#include "IldgIOtypes.h"
|
||||||
|
|
||||||
#ifdef HAVE_ENDIAN_H
|
#ifdef HAVE_ENDIAN_H
|
||||||
#include <endian.h>
|
#include <endian.h>
|
||||||
#endif
|
#endif
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
// 64bit endian swap is a portability pain
|
|
||||||
#ifndef __has_builtin // Optional of course.
|
|
||||||
#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_DECL_BE64TOH
|
|
||||||
#undef Grid_ntohll
|
|
||||||
#define Grid_ntohll be64toh
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_DECL_NTOHLL
|
inline uint32_t byte_reverse32(uint32_t f) {
|
||||||
#undef Grid_ntohll
|
f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
#define Grid_ntohll ntohll
|
return f;
|
||||||
#endif
|
}
|
||||||
|
inline uint64_t byte_reverse64(uint64_t f) {
|
||||||
#ifndef Grid_ntohll
|
uint64_t g;
|
||||||
|
g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
|
g = g << 32;
|
||||||
|
f = f >> 32;
|
||||||
|
g|= ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
|
return g;
|
||||||
|
}
|
||||||
|
|
||||||
#if BYTE_ORDER == BIG_ENDIAN
|
#if BYTE_ORDER == BIG_ENDIAN
|
||||||
|
inline uint64_t Grid_ntohll(uint64_t A) { return A; }
|
||||||
#define Grid_ntohll(A) (A)
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
inline uint64_t Grid_ntohll(uint64_t A) {
|
||||||
#if __has_builtin(__builtin_bswap64)
|
return byte_reverse64(A);
|
||||||
#define Grid_ntohll(A) __builtin_bswap64(A)
|
}
|
||||||
#else
|
|
||||||
#error
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -236,7 +228,7 @@ class BinaryIO {
|
|||||||
std::vector<int> site({x,y,z,t});
|
std::vector<int> site({x,y,z,t});
|
||||||
|
|
||||||
if (grid->IsBoss()) {
|
if (grid->IsBoss()) {
|
||||||
fin.read((char *)&file_object, sizeof(file_object));
|
fin.read((char *)&file_object, sizeof(file_object));assert( fin.fail()==0);
|
||||||
bytes += sizeof(file_object);
|
bytes += sizeof(file_object);
|
||||||
if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object));
|
if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object));
|
||||||
if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object));
|
if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object));
|
||||||
@ -252,11 +244,13 @@ class BinaryIO {
|
|||||||
std::cout<<GridLogPerformance<<"readObjectSerial: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"readObjectSerial: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/ (double)timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/ (double)timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
|
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class vobj,class fobj,class munger>
|
template<class vobj,class fobj,class munger>
|
||||||
static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
|
static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
|
||||||
|
const std::string & format)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
|
||||||
@ -295,14 +289,13 @@ class BinaryIO {
|
|||||||
|
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
|
|
||||||
if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
|
if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee32) htole32_v((void *)&file_object,sizeof(file_object));
|
if(ieee32) htole32_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
|
if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee64) htole64_v((void *)&file_object,sizeof(file_object));
|
if(ieee64) htole64_v((void *)&file_object,sizeof(file_object));
|
||||||
|
|
||||||
// NB could gather an xstrip as an optimisation.
|
// NB could gather an xstrip as an optimisation.
|
||||||
fout.write((char *)&file_object,sizeof(file_object));
|
fout.write((char *)&file_object,sizeof(file_object));assert( fout.fail()==0);
|
||||||
bytes+=sizeof(file_object);
|
bytes+=sizeof(file_object);
|
||||||
}
|
}
|
||||||
}}}}
|
}}}}
|
||||||
@ -310,10 +303,12 @@ class BinaryIO {
|
|||||||
std::cout<<GridLogPerformance<<"writeObjectSerial: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"writeObjectSerial: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
|
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t writeRNGSerial(GridSerialRNG &serial, GridParallelRNG ¶llel, std::string file, int offset) {
|
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
|
||||||
|
{
|
||||||
typedef typename GridSerialRNG::RngStateType RngStateType;
|
typedef typename GridSerialRNG::RngStateType RngStateType;
|
||||||
const int RngStateCount = GridSerialRNG::RngStateCount;
|
const int RngStateCount = GridSerialRNG::RngStateCount;
|
||||||
|
|
||||||
@ -342,32 +337,30 @@ class BinaryIO {
|
|||||||
std::cout << GridLogDebug << "Type has " << bytes << " bytes" << std::endl;
|
std::cout << GridLogDebug << "Type has " << bytes << " bytes" << std::endl;
|
||||||
std::vector<int> gcoor;
|
std::vector<int> gcoor;
|
||||||
|
|
||||||
std::cout << GridLogDebug << "gsites: " << gsites << " loop" << std::endl;
|
for(int gidx=0;gidx<gsites;gidx++){
|
||||||
for (int gidx = 0; gidx < gsites; gidx++) {
|
|
||||||
int rank, o_idx, i_idx;
|
int rank,o_idx,i_idx;
|
||||||
grid->GlobalIndexToGlobalCoor(gidx, gcoor);
|
grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||||
grid->GlobalCoorToRankIndex(rank, o_idx, i_idx, gcoor);
|
grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||||
int l_idx = parallel.generator_idx(o_idx, i_idx);
|
int l_idx=parallel.generator_idx(o_idx,i_idx);
|
||||||
//std::cout << GridLogDebug << "l_idx " << l_idx << " o_idx " << o_idx
|
|
||||||
// << " i_idx " << i_idx << " rank " << rank << std::endl;
|
if( rank == grid->ThisRank() ){
|
||||||
if (rank == grid->ThisRank()) {
|
// std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
|
||||||
parallel.GetState(saved, l_idx);
|
parallel.GetState(saved,l_idx);
|
||||||
}
|
}
|
||||||
grid->Broadcast(rank, (void *)&saved[0], bytes);
|
grid->Broadcast(rank, (void *)&saved[0], bytes);
|
||||||
|
|
||||||
//grid->Barrier(); // necessary?
|
if ( grid->IsBoss() ) {
|
||||||
if (grid->IsBoss()) {
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
Uint32Checksum((uint32_t *)&saved[0], bytes, csum);
|
fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
|
||||||
fout.write((char *)&saved[0], bytes);
|
|
||||||
}
|
|
||||||
grid->Barrier(); // this can be necessary
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if (grid->IsBoss()) {
|
if ( grid->IsBoss() ) {
|
||||||
serial.GetState(saved, 0);
|
serial.GetState(saved,0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0], bytes, csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
fout.write((char *)&saved[0], bytes);
|
fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
grid->Broadcast(0, (void *)&csum, sizeof(csum));
|
grid->Broadcast(0, (void *)&csum, sizeof(csum));
|
||||||
@ -425,7 +418,7 @@ class BinaryIO {
|
|||||||
// << " i_idx " << i_idx << " rank " << rank << std::endl;
|
// << " i_idx " << i_idx << " rank " << rank << std::endl;
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
fin.read((char *)&saved[0],bytes);
|
fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +430,7 @@ class BinaryIO {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
fin.read((char *)&saved[0],bytes);
|
fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
|
||||||
serial.SetState(saved,0);
|
serial.SetState(saved,0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
}
|
}
|
||||||
@ -449,6 +442,7 @@ class BinaryIO {
|
|||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <class vobj, class fobj, class munger>
|
template <class vobj, class fobj, class munger>
|
||||||
static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,
|
static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,
|
||||||
std::string file,
|
std::string file,
|
||||||
@ -547,9 +541,7 @@ class BinaryIO {
|
|||||||
|
|
||||||
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
||||||
|
|
||||||
for(int d=0; d<nd; d++)
|
for(int d=0;d<nd;d++){
|
||||||
{
|
|
||||||
|
|
||||||
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
||||||
gsite[d] = tsite[d]+start[d]; // global site
|
gsite[d] = tsite[d]+start[d]; // global site
|
||||||
}
|
}
|
||||||
@ -567,6 +559,7 @@ class BinaryIO {
|
|||||||
////////////////////////////////
|
////////////////////////////////
|
||||||
if (myrank == iorank) {
|
if (myrank == iorank) {
|
||||||
|
|
||||||
|
|
||||||
if (ILDG.is_ILDG){
|
if (ILDG.is_ILDG){
|
||||||
// use C-LIME to populate the record
|
// use C-LIME to populate the record
|
||||||
#ifdef HAVE_LIME
|
#ifdef HAVE_LIME
|
||||||
@ -607,9 +600,8 @@ class BinaryIO {
|
|||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
|
|
||||||
timer.Stop();
|
timer.Stop();
|
||||||
std::cout <<GridLogPerformance<<"readObjectParallel: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"readObjectParallel: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -658,14 +650,14 @@ class BinaryIO {
|
|||||||
|
|
||||||
if (parallel[d]) {
|
if (parallel[d]) {
|
||||||
range[d] = grid->_ldimensions[d];
|
range[d] = grid->_ldimensions[d];
|
||||||
start[d] = grid->_processor_coor[d] * range[d];
|
start[d] = grid->_processor_coor[d]*range[d];
|
||||||
ioproc[d] = grid->_processor_coor[d];
|
ioproc[d]= grid->_processor_coor[d];
|
||||||
} else {
|
} else {
|
||||||
range[d] = grid->_gdimensions[d];
|
range[d] = grid->_gdimensions[d];
|
||||||
start[d] = 0;
|
start[d] = 0;
|
||||||
ioproc[d] = 0;
|
ioproc[d]= 0;
|
||||||
|
|
||||||
if (grid->_processor_coor[d] != 0) IOnode = 0;
|
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
slice_vol = slice_vol * range[d];
|
slice_vol = slice_vol * range[d];
|
||||||
@ -674,18 +666,19 @@ class BinaryIO {
|
|||||||
{
|
{
|
||||||
uint32_t tmp = IOnode;
|
uint32_t tmp = IOnode;
|
||||||
grid->GlobalSum(tmp);
|
grid->GlobalSum(tmp);
|
||||||
std::cout << GridLogMessage << "Parallel write I/O from " << file
|
std::cout<< GridLogMessage<< "Parallel write I/O from "<< file
|
||||||
<< " with " << tmp << " IOnodes for subslice ";
|
<< " with " <<tmp<< " IOnodes for subslice ";
|
||||||
for (int d = 0; d < grid->_ndimension; d++) {
|
for(int d=0;d<grid->_ndimension;d++){
|
||||||
std::cout << range[d];
|
std::cout<< range[d];
|
||||||
if (d < grid->_ndimension - 1) std::cout << " x ";
|
if( d< grid->_ndimension-1 )
|
||||||
|
std::cout<< " x ";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
GridStopWatch timer;
|
GridStopWatch timer;
|
||||||
timer.Start();
|
timer.Start();
|
||||||
uint64_t bytes = 0;
|
uint64_t bytes=0;
|
||||||
|
|
||||||
int myrank = grid->ThisRank();
|
int myrank = grid->ThisRank();
|
||||||
int iorank = grid->RankFromProcessorCoor(ioproc);
|
int iorank = grid->RankFromProcessorCoor(ioproc);
|
||||||
@ -729,7 +722,7 @@ class BinaryIO {
|
|||||||
|
|
||||||
Lexicographic::CoorFromIndex(tsite, tlex, range);
|
Lexicographic::CoorFromIndex(tsite, tlex, range);
|
||||||
|
|
||||||
for (int d = 0; d < nd; d++) {
|
for(int d = 0;d < nd; d++){
|
||||||
lsite[d] = tsite[d] % grid->_ldimensions[d]; // local site
|
lsite[d] = tsite[d] % grid->_ldimensions[d]; // local site
|
||||||
gsite[d] = tsite[d] + start[d]; // global site
|
gsite[d] = tsite[d] + start[d]; // global site
|
||||||
}
|
}
|
||||||
@ -749,11 +742,10 @@ class BinaryIO {
|
|||||||
peekLocalSite(siteObj, Umu, lsite);
|
peekLocalSite(siteObj, Umu, lsite);
|
||||||
|
|
||||||
// Pair of nodes may need to do pt2pt send
|
// Pair of nodes may need to do pt2pt send
|
||||||
if (rank != iorank) { // comms is necessary
|
if ( rank != iorank ) { // comms is necessary
|
||||||
if ((myrank == rank) || (myrank == iorank)) { // and we have to do it
|
if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
|
||||||
// Send to IOrank
|
// Send to IOrank
|
||||||
grid->SendRecvPacket((void *)&siteObj, (void *)&siteObj, rank, iorank,
|
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
|
||||||
sizeof(siteObj));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -778,7 +770,7 @@ class BinaryIO {
|
|||||||
|
|
||||||
else {
|
else {
|
||||||
fout.seekp(offset + g_idx * sizeof(fileObj));
|
fout.seekp(offset + g_idx * sizeof(fileObj));
|
||||||
fout.write((char *)&fileObj, sizeof(fileObj));
|
fout.write((char *)&fileObj, sizeof(fileObj));assert( fout.fail()==0);
|
||||||
}
|
}
|
||||||
bytes += sizeof(fileObj);
|
bytes += sizeof(fileObj);
|
||||||
}
|
}
|
||||||
@ -798,6 +790,7 @@ class BinaryIO {
|
|||||||
if (IOnode)
|
if (IOnode)
|
||||||
fout.close();
|
fout.close();
|
||||||
|
|
||||||
|
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -6,9 +6,9 @@
|
|||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Matt Spraggs <matthew.spraggs@gmail.com>
|
Author: Matt Spraggs <matthew.spraggs@gmail.com>
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -25,8 +25,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#ifndef GRID_NERSC_IO_H
|
#ifndef GRID_NERSC_IO_H
|
||||||
#define GRID_NERSC_IO_H
|
#define GRID_NERSC_IO_H
|
||||||
|
|
||||||
@ -41,23 +41,23 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// Some data types for intermediate storage
|
// Some data types for intermediate storage
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, 4 >;
|
template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, 4 >;
|
||||||
|
|
||||||
typedef iLorentzColour2x3<Complex> LorentzColour2x3;
|
typedef iLorentzColour2x3<Complex> LorentzColour2x3;
|
||||||
typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F;
|
typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F;
|
||||||
typedef iLorentzColour2x3<ComplexD> LorentzColour2x3D;
|
typedef iLorentzColour2x3<ComplexD> LorentzColour2x3D;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// header specification/interpretation
|
// header specification/interpretation
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
class NerscField {
|
class NerscField {
|
||||||
public:
|
public:
|
||||||
// header strings (not in order)
|
// header strings (not in order)
|
||||||
int dimension[4];
|
int dimension[4];
|
||||||
@ -78,14 +78,14 @@ class NerscField {
|
|||||||
std::string creation_date ;
|
std::string creation_date ;
|
||||||
std::string archive_date ;
|
std::string archive_date ;
|
||||||
std::string floating_point;
|
std::string floating_point;
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// Bit and Physical Checksumming and QA of data
|
// Bit and Physical Checksumming and QA of data
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
inline void NerscGrid(GridBase *grid,NerscField &header)
|
inline void NerscGrid(GridBase *grid,NerscField &header)
|
||||||
{
|
{
|
||||||
assert(grid->_ndimension==4);
|
assert(grid->_ndimension==4);
|
||||||
for(int d=0;d<4;d++) {
|
for(int d=0;d<4;d++) {
|
||||||
header.dimension[d] = grid->_fdimensions[d];
|
header.dimension[d] = grid->_fdimensions[d];
|
||||||
@ -93,17 +93,17 @@ inline void NerscGrid(GridBase *grid,NerscField &header)
|
|||||||
for(int d=0;d<4;d++) {
|
for(int d=0;d<4;d++) {
|
||||||
header.boundary[d] = std::string("PERIODIC");
|
header.boundary[d] = std::string("PERIODIC");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
template<class GaugeField>
|
template<class GaugeField>
|
||||||
inline void NerscStatistics(GaugeField & data,NerscField &header)
|
inline void NerscStatistics(GaugeField & data,NerscField &header)
|
||||||
{
|
{
|
||||||
// How to convert data precision etc...
|
// How to convert data precision etc...
|
||||||
header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data);
|
header.link_trace=Grid::QCD::WilsonLoops<PeriodicGimplR>::linkTrace(data);
|
||||||
header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data);
|
header.plaquette =Grid::QCD::WilsonLoops<PeriodicGimplR>::avgPlaquette(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void NerscMachineCharacteristics(NerscField &header)
|
inline void NerscMachineCharacteristics(NerscField &header)
|
||||||
{
|
{
|
||||||
// Who
|
// Who
|
||||||
struct passwd *pw = getpwuid (getuid());
|
struct passwd *pw = getpwuid (getuid());
|
||||||
if (pw) header.creator = std::string(pw->pw_name);
|
if (pw) header.creator = std::string(pw->pw_name);
|
||||||
@ -123,10 +123,10 @@ inline void NerscMachineCharacteristics(NerscField &header)
|
|||||||
header.creator_hardware+= std::string(name.sysname)+"-";
|
header.creator_hardware+= std::string(name.sysname)+"-";
|
||||||
header.creator_hardware+= std::string(name.release);
|
header.creator_hardware+= std::string(name.release);
|
||||||
|
|
||||||
}
|
}
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// Utilities ; these are QCD aware
|
// Utilities ; these are QCD aware
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
inline void NerscChecksum(uint32_t *buf,uint32_t buf_size_bytes,uint32_t &csum)
|
inline void NerscChecksum(uint32_t *buf,uint32_t buf_size_bytes,uint32_t &csum)
|
||||||
{
|
{
|
||||||
BinaryIO::Uint32Checksum(buf,buf_size_bytes,csum);
|
BinaryIO::Uint32Checksum(buf,buf_size_bytes,csum);
|
||||||
@ -206,54 +206,53 @@ inline void NerscMachineCharacteristics(NerscField &header)
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// Write and read from fstream; comput header offset for payload
|
// Write and read from fstream; comput header offset for payload
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
class NerscIO : public BinaryIO {
|
class NerscIO : public BinaryIO {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
static inline void truncate(std::string file){
|
static inline void truncate(std::string file){
|
||||||
std::ofstream fout(file,std::ios::out);
|
std::ofstream fout(file,std::ios::out);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dump_nersc_header(field, s)\
|
#define dump_nersc_header(field, s) \
|
||||||
s << "BEGIN_HEADER" << std::endl;\
|
s << "BEGIN_HEADER" << std::endl; \
|
||||||
s << "HDR_VERSION = " << field.hdr_version << std::endl;\
|
s << "HDR_VERSION = " << field.hdr_version << std::endl; \
|
||||||
s << "DATATYPE = " << field.data_type << std::endl;\
|
s << "DATATYPE = " << field.data_type << std::endl; \
|
||||||
s << "STORAGE_FORMAT = " << field.storage_format << std::endl;\
|
s << "STORAGE_FORMAT = " << field.storage_format << std::endl; \
|
||||||
for(int i=0;i<4;i++){\
|
for(int i=0;i<4;i++){ \
|
||||||
s << "DIMENSION_" << i+1 << " = " << field.dimension[i] << std::endl ;\
|
s << "DIMENSION_" << i+1 << " = " << field.dimension[i] << std::endl ; \
|
||||||
}\
|
} \
|
||||||
s << "LINK_TRACE = " << std::setprecision(10) << field.link_trace << std::endl;\
|
s << "LINK_TRACE = " << std::setprecision(10) << field.link_trace << std::endl; \
|
||||||
s << "PLAQUETTE = " << std::setprecision(10) << field.plaquette << std::endl;\
|
s << "PLAQUETTE = " << std::setprecision(10) << field.plaquette << std::endl; \
|
||||||
for(int i=0;i<4;i++){\
|
for(int i=0;i<4;i++){ \
|
||||||
s << "BOUNDARY_"<<i+1<<" = " << field.boundary[i] << std::endl;\
|
s << "BOUNDARY_"<<i+1<<" = " << field.boundary[i] << std::endl; \
|
||||||
}\
|
} \
|
||||||
\
|
\
|
||||||
s << "CHECKSUM = "<< std::hex << std::setw(10) << field.checksum << std::dec<<std::endl;\
|
s << "CHECKSUM = "<< std::hex << std::setw(10) << field.checksum << std::dec<<std::endl; \
|
||||||
s << "ENSEMBLE_ID = " << field.ensemble_id << std::endl;\
|
s << "ENSEMBLE_ID = " << field.ensemble_id << std::endl; \
|
||||||
s << "ENSEMBLE_LABEL = " << field.ensemble_label << std::endl;\
|
s << "ENSEMBLE_LABEL = " << field.ensemble_label << std::endl; \
|
||||||
s << "SEQUENCE_NUMBER = " << field.sequence_number << std::endl;\
|
s << "SEQUENCE_NUMBER = " << field.sequence_number << std::endl; \
|
||||||
s << "CREATOR = " << field.creator << std::endl;\
|
s << "CREATOR = " << field.creator << std::endl; \
|
||||||
s << "CREATOR_HARDWARE = "<< field.creator_hardware << std::endl;\
|
s << "CREATOR_HARDWARE = "<< field.creator_hardware << std::endl; \
|
||||||
s << "CREATION_DATE = " << field.creation_date << std::endl;\
|
s << "CREATION_DATE = " << field.creation_date << std::endl; \
|
||||||
s << "ARCHIVE_DATE = " << field.archive_date << std::endl;\
|
s << "ARCHIVE_DATE = " << field.archive_date << std::endl; \
|
||||||
s << "FLOATING_POINT = " << field.floating_point << std::endl;\
|
s << "FLOATING_POINT = " << field.floating_point << std::endl; \
|
||||||
s << "END_HEADER" << std::endl;
|
s << "END_HEADER" << std::endl;
|
||||||
|
|
||||||
static inline unsigned int writeHeader(NerscField &field,std::string file)
|
static inline unsigned int writeHeader(NerscField &field,std::string file)
|
||||||
{
|
{
|
||||||
std::ofstream fout(file,std::ios::out|std::ios::in);
|
std::ofstream fout(file,std::ios::out|std::ios::in);
|
||||||
|
|
||||||
fout.seekp(0,std::ios::beg);
|
fout.seekp(0,std::ios::beg);
|
||||||
dump_nersc_header(field, fout);
|
dump_nersc_header(field, fout);
|
||||||
field.data_start = fout.tellp();
|
field.data_start = fout.tellp();
|
||||||
return field.data_start;
|
return field.data_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// for the header-reader
|
// for the header-reader
|
||||||
static inline int readHeader(std::string file,GridBase *grid, NerscField &field)
|
static inline int readHeader(std::string file,GridBase *grid, NerscField &field)
|
||||||
{
|
{
|
||||||
int offset=0;
|
int offset=0;
|
||||||
std::map<std::string,std::string> header;
|
std::map<std::string,std::string> header;
|
||||||
std::string line;
|
std::string line;
|
||||||
@ -266,10 +265,13 @@ static inline int readHeader(std::string file,GridBase *grid, NerscField &field
|
|||||||
getline(fin,line); // read one line and insist is
|
getline(fin,line); // read one line and insist is
|
||||||
|
|
||||||
removeWhitespace(line);
|
removeWhitespace(line);
|
||||||
|
std::cout << GridLogMessage << "* " << line << std::endl;
|
||||||
|
|
||||||
assert(line==std::string("BEGIN_HEADER"));
|
assert(line==std::string("BEGIN_HEADER"));
|
||||||
|
|
||||||
do {
|
do {
|
||||||
getline(fin,line); // read one line
|
getline(fin,line); // read one line
|
||||||
|
std::cout << GridLogMessage << "* "<<line<< std::endl;
|
||||||
int eq = line.find("=");
|
int eq = line.find("=");
|
||||||
if(eq >0) {
|
if(eq >0) {
|
||||||
std::string key=line.substr(0,eq);
|
std::string key=line.substr(0,eq);
|
||||||
@ -319,15 +321,17 @@ static inline int readHeader(std::string file,GridBase *grid, NerscField &field
|
|||||||
field.floating_point = header["FLOATING_POINT"];
|
field.floating_point = header["FLOATING_POINT"];
|
||||||
|
|
||||||
return field.data_start;
|
return field.data_start;
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Now the meat: the object readers
|
// Now the meat: the object readers
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#define PARALLEL_READ
|
||||||
|
#define PARALLEL_WRITE
|
||||||
|
|
||||||
template<class vsimd>
|
template<class vsimd>
|
||||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
|
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
|
||||||
{
|
{
|
||||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||||
|
|
||||||
GridBase *grid = Umu._grid;
|
GridBase *grid = Umu._grid;
|
||||||
@ -347,25 +351,41 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
|||||||
// munger is a function of <floating point, Real, data_type>
|
// munger is a function of <floating point, Real, data_type>
|
||||||
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
// csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||||
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||||
|
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
if ( ieee64 || ieee64big ) {
|
if ( ieee64 || ieee64big ) {
|
||||||
//csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||||
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||||
|
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
//csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||||
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
if ( ieee64 || ieee64big ) {
|
if ( ieee64 || ieee64big ) {
|
||||||
// csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||||
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
@ -373,17 +393,22 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
|||||||
|
|
||||||
NerscStatistics<GaugeField>(Umu,clone);
|
NerscStatistics<GaugeField>(Umu,clone);
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<< csum<< std::dec
|
||||||
|
<<" header "<<std::hex<<header.checksum<<std::dec <<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" plaquette "<<clone.plaquette
|
||||||
|
<<" header "<<header.plaquette<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" link_trace "<<clone.link_trace
|
||||||
|
<<" header "<<header.link_trace<<std::endl;
|
||||||
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
|
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
|
||||||
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
|
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
|
||||||
|
|
||||||
assert(csum == header.checksum );
|
assert(csum == header.checksum );
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Read NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class vsimd>
|
template<class vsimd>
|
||||||
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,std::string file, int two_row,int bits32)
|
static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,std::string file, int two_row,int bits32)
|
||||||
{
|
{
|
||||||
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
typedef Lattice<iLorentzColourMatrix<vsimd> > GaugeField;
|
||||||
|
|
||||||
typedef iLorentzColourMatrix<vsimd> vobj;
|
typedef iLorentzColourMatrix<vsimd> vobj;
|
||||||
@ -416,27 +441,22 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
|
|||||||
Nersc3x2unmunger<fobj2D,sobj> munge;
|
Nersc3x2unmunger<fobj2D,sobj> munge;
|
||||||
BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum);
|
BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
|
#ifdef PARALLEL_WRITE
|
||||||
|
csum=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#else
|
||||||
csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#endif
|
||||||
std::string file1 = file+"para";
|
|
||||||
int offset1 = writeHeader(header,file1);
|
|
||||||
int csum1=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
|
|
||||||
//int csum1=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
|
|
||||||
|
|
||||||
|
|
||||||
//std::cout << GridLogMessage << " TESTING PARALLEL WRITE offsets " << offset1 << " "<< offset << std::endl;
|
|
||||||
//std::cout << GridLogMessage << " TESTING PARALLEL WRITE csums " << csum1 << " "<<std::hex<< csum << std::dec<< std::endl;
|
|
||||||
|
|
||||||
assert(offset1==offset);
|
|
||||||
assert(csum1==csum);
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
header.floating_point = std::string("IEEE64BIG");
|
header.floating_point = std::string("IEEE64BIG");
|
||||||
header.data_type = std::string("4D_SU3_GAUGE_3x3");
|
header.data_type = std::string("4D_SU3_GAUGE_3x3");
|
||||||
NerscSimpleUnmunger<fobj3D,sobj> munge;
|
NerscSimpleUnmunger<fobj3D,sobj> munge;
|
||||||
BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum);
|
BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
|
#ifdef PARALLEL_WRITE
|
||||||
csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl;
|
std::cout<<GridLogMessage <<"Written NERSC Configuration on "<< file << " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl;
|
||||||
@ -447,8 +467,8 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
|
|||||||
///////////////////////////////
|
///////////////////////////////
|
||||||
// RNG state
|
// RNG state
|
||||||
///////////////////////////////
|
///////////////////////////////
|
||||||
static inline void writeRNGState(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file)
|
static inline void writeRNGState(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file)
|
||||||
{
|
{
|
||||||
typedef typename GridParallelRNG::RngStateType RngStateType;
|
typedef typename GridParallelRNG::RngStateType RngStateType;
|
||||||
|
|
||||||
// Following should become arguments
|
// Following should become arguments
|
||||||
@ -470,10 +490,15 @@ static inline void writeRNGState(GridSerialRNG &serial,GridParallelRNG ¶llel
|
|||||||
#ifdef RNG_RANLUX
|
#ifdef RNG_RANLUX
|
||||||
header.floating_point = std::string("UINT64");
|
header.floating_point = std::string("UINT64");
|
||||||
header.data_type = std::string("RANLUX48");
|
header.data_type = std::string("RANLUX48");
|
||||||
#else
|
#endif
|
||||||
|
#ifdef RNG_MT19937
|
||||||
header.floating_point = std::string("UINT32");
|
header.floating_point = std::string("UINT32");
|
||||||
header.data_type = std::string("MT19937");
|
header.data_type = std::string("MT19937");
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef RNG_SITMO
|
||||||
|
header.floating_point = std::string("UINT64");
|
||||||
|
header.data_type = std::string("SITMO");
|
||||||
|
#endif
|
||||||
|
|
||||||
truncate(file);
|
truncate(file);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
@ -485,8 +510,8 @@ static inline void writeRNGState(GridSerialRNG &serial,GridParallelRNG ¶llel
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,NerscField& header,std::string file)
|
static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel,NerscField& header,std::string file)
|
||||||
{
|
{
|
||||||
typedef typename GridParallelRNG::RngStateType RngStateType;
|
typedef typename GridParallelRNG::RngStateType RngStateType;
|
||||||
|
|
||||||
GridBase *grid = parallel._grid;
|
GridBase *grid = parallel._grid;
|
||||||
@ -501,10 +526,15 @@ static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel
|
|||||||
#ifdef RNG_RANLUX
|
#ifdef RNG_RANLUX
|
||||||
assert(format == std::string("UINT64"));
|
assert(format == std::string("UINT64"));
|
||||||
assert(data_type == std::string("RANLUX48"));
|
assert(data_type == std::string("RANLUX48"));
|
||||||
#else
|
#endif
|
||||||
|
#ifdef RNG_MT19937
|
||||||
assert(format == std::string("UINT32"));
|
assert(format == std::string("UINT32"));
|
||||||
assert(data_type == std::string("MT19937"));
|
assert(data_type == std::string("MT19937"));
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef RNG_SITMO
|
||||||
|
assert(format == std::string("UINT64"));
|
||||||
|
assert(data_type == std::string("SITMO"));
|
||||||
|
#endif
|
||||||
|
|
||||||
// depending on datatype, set up munger;
|
// depending on datatype, set up munger;
|
||||||
// munger is a function of <floating point, Real, data_type>
|
// munger is a function of <floating point, Real, data_type>
|
||||||
@ -513,10 +543,10 @@ static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel
|
|||||||
assert(csum == header.checksum );
|
assert(csum == header.checksum );
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
|
std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
}}
|
}}
|
||||||
#endif
|
#endif
|
||||||
|
@ -26,8 +26,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <Grid/PerfCount.h>
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
@ -172,7 +172,7 @@ public:
|
|||||||
const char * name = PerformanceCounterConfigs[PCT].name;
|
const char * name = PerformanceCounterConfigs[PCT].name;
|
||||||
fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
||||||
if (fd == -1) {
|
if (fd == -1) {
|
||||||
fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
|
fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
|
||||||
perror("Error is");
|
perror("Error is");
|
||||||
}
|
}
|
||||||
int norm = PerformanceCounterConfigs[PCT].normalisation;
|
int norm = PerformanceCounterConfigs[PCT].normalisation;
|
||||||
@ -181,7 +181,7 @@ public:
|
|||||||
name = PerformanceCounterConfigs[norm].name;
|
name = PerformanceCounterConfigs[norm].name;
|
||||||
cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
||||||
if (cyclefd == -1) {
|
if (cyclefd == -1) {
|
||||||
fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
|
fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
|
||||||
perror("Error is");
|
perror("Error is");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
@ -1,11 +1,9 @@
|
|||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <Grid/PerfCount.h>
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
#include <Grid/Stat.h>
|
#include <Grid/perfmon/Stat.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
|
||||||
bool PmuStat::pmu_initialized=false;
|
bool PmuStat::pmu_initialized=false;
|
||||||
|
|
||||||
|
|
@ -29,11 +29,15 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#ifndef GRID_QCD_H
|
#ifndef GRID_QCD_BASE_H
|
||||||
#define GRID_QCD_H
|
#define GRID_QCD_BASE_H
|
||||||
namespace Grid{
|
namespace Grid{
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
|
static const int Xdir = 0;
|
||||||
|
static const int Ydir = 1;
|
||||||
|
static const int Zdir = 2;
|
||||||
|
static const int Tdir = 3;
|
||||||
|
|
||||||
static const int Xp = 0;
|
static const int Xp = 0;
|
||||||
static const int Yp = 1;
|
static const int Yp = 1;
|
||||||
@ -61,7 +65,6 @@ namespace QCD {
|
|||||||
#define SpinIndex 1
|
#define SpinIndex 1
|
||||||
#define LorentzIndex 0
|
#define LorentzIndex 0
|
||||||
|
|
||||||
|
|
||||||
// Also should make these a named enum type
|
// Also should make these a named enum type
|
||||||
static const int DaggerNo=0;
|
static const int DaggerNo=0;
|
||||||
static const int DaggerYes=1;
|
static const int DaggerYes=1;
|
||||||
@ -492,6 +495,8 @@ namespace QCD {
|
|||||||
} //namespace QCD
|
} //namespace QCD
|
||||||
} // Grid
|
} // Grid
|
||||||
|
|
||||||
|
/*
|
||||||
|
<<<<<<< HEAD
|
||||||
#include <Grid/qcd/utils/SpaceTimeGrid.h>
|
#include <Grid/qcd/utils/SpaceTimeGrid.h>
|
||||||
#include <Grid/qcd/spin/Dirac.h>
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
#include <Grid/qcd/spin/TwoSpinor.h>
|
#include <Grid/qcd/spin/TwoSpinor.h>
|
||||||
@ -513,9 +518,15 @@ namespace QCD {
|
|||||||
|
|
||||||
#include <Grid/qcd/hmc/integrators/Integrator.h>
|
#include <Grid/qcd/hmc/integrators/Integrator.h>
|
||||||
#include <Grid/qcd/hmc/integrators/Integrator_algorithm.h>
|
#include <Grid/qcd/hmc/integrators/Integrator_algorithm.h>
|
||||||
|
#include <Grid/qcd/observables/hmc_observable.h>
|
||||||
#include <Grid/qcd/hmc/HMC.h>
|
#include <Grid/qcd/hmc/HMC.h>
|
||||||
|
|
||||||
|
|
||||||
//#include <Grid/qcd/modules/mods.h>
|
//#include <Grid/qcd/modules/mods.h>
|
||||||
|
=======
|
||||||
|
|
||||||
|
>>>>>>> develop
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
50
lib/qcd/action/Action.h
Normal file
50
lib/qcd/action/Action.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/Actions.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: neo <cossu@post.kek.jp>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_ACTION_H
|
||||||
|
#define GRID_QCD_ACTION_H
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Abstract base interface
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Fermion actions; prevent coupling fermion.cc files to other headers
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/Fermion.h>
|
||||||
|
////////////////////////////////////////
|
||||||
|
// Pseudo fermion combinations for HMC
|
||||||
|
////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/pseudofermion/PseudoFermion.h>
|
||||||
|
|
||||||
|
#endif
|
61
lib/qcd/action/ActionCore.h
Normal file
61
lib/qcd/action/ActionCore.h
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/ActionCore.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: neo <cossu@post.kek.jp>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef QCD_ACTION_CORE
|
||||||
|
#define QCD_ACTION_CORE
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/ActionBase.h>
|
||||||
|
#include <Grid/qcd/action/ActionSet.h>
|
||||||
|
#include <Grid/qcd/action/ActionParams.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Gauge Actions
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/gauge/Gauge.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Fermion prereqs
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Scalar Actions
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/scalar/Scalar.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Utility functions
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/utils/Metric.h>
|
||||||
|
#include <Grid/qcd/utils/CovariantLaplacian.h>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif
|
@ -35,19 +35,23 @@ directory
|
|||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
// These can move into a params header and be given MacroMagic serialisation
|
// These can move into a params header and be given MacroMagic serialisation
|
||||||
struct GparityWilsonImplParams {
|
struct GparityWilsonImplParams {
|
||||||
bool overlapCommsCompute;
|
bool overlapCommsCompute;
|
||||||
std::vector<int> twists;
|
std::vector<int> twists;
|
||||||
GparityWilsonImplParams() : twists(Nd, 0), overlapCommsCompute(false){};
|
GparityWilsonImplParams() : twists(Nd, 0), overlapCommsCompute(false){};
|
||||||
};
|
};
|
||||||
|
|
||||||
struct WilsonImplParams {
|
struct WilsonImplParams {
|
||||||
bool overlapCommsCompute;
|
bool overlapCommsCompute;
|
||||||
WilsonImplParams() : overlapCommsCompute(false){};
|
WilsonImplParams() : overlapCommsCompute(false){};
|
||||||
};
|
};
|
||||||
|
|
||||||
struct OneFlavourRationalParams : Serializable {
|
struct StaggeredImplParams {
|
||||||
|
StaggeredImplParams() {};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct OneFlavourRationalParams : Serializable {
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(OneFlavourRationalParams,
|
GRID_SERIALIZABLE_CLASS_MEMBERS(OneFlavourRationalParams,
|
||||||
RealD, lo,
|
RealD, lo,
|
||||||
RealD, hi,
|
RealD, hi,
|
||||||
@ -71,10 +75,13 @@ struct OneFlavourRationalParams : Serializable {
|
|||||||
tolerance(tol),
|
tolerance(tol),
|
||||||
degree(_degree),
|
degree(_degree),
|
||||||
precision(_precision){};
|
precision(_precision){};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -30,8 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Eigen/Dense>
|
#include <Grid/Eigen/Dense>
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -57,10 +57,23 @@ void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
|
|||||||
{
|
{
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
|
|
||||||
this->DW(psi,this->tmp(),DaggerNo);
|
FermionField tmp_f(this->FermionGrid());
|
||||||
|
this->DW(psi,tmp_f,DaggerNo);
|
||||||
|
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
|
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
||||||
|
{
|
||||||
|
int Ls=this->Ls;
|
||||||
|
|
||||||
|
FermionField tmp_f(this->FermionGrid());
|
||||||
|
this->DW(psi,tmp_f,DaggerYes);
|
||||||
|
|
||||||
|
for(int s=0;s<Ls;s++){
|
||||||
|
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,17 +120,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
|
||||||
{
|
|
||||||
int Ls=this->Ls;
|
|
||||||
|
|
||||||
this->DW(psi,this->tmp(),DaggerYes);
|
|
||||||
|
|
||||||
for(int s=0;s<Ls;s++){
|
|
||||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
@ -168,7 +170,6 @@ void CayleyFermion5D<Impl>::Mooee (const FermionField &psi, FermionField &
|
|||||||
lower[0] =-mass*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5D(psi,psi,chi,lower,diag,upper);
|
M5D(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
@ -190,7 +191,12 @@ void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &
|
|||||||
lower[s]=-cee[s-1];
|
lower[s]=-cee[s-1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Conjugate the terms
|
||||||
|
for (int s=0;s<Ls;s++){
|
||||||
|
diag[s] =conjugate(diag[s]);
|
||||||
|
upper[s]=conjugate(upper[s]);
|
||||||
|
lower[s]=conjugate(lower[s]);
|
||||||
|
}
|
||||||
M5Ddag(psi,psi,chi,lower,diag,upper);
|
M5Ddag(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,8 +219,22 @@ void CayleyFermion5D<Impl>::MeooeDag5D (const FermionField &psi, FermionField
|
|||||||
std::vector<Coeff_t> diag =bs;
|
std::vector<Coeff_t> diag =bs;
|
||||||
std::vector<Coeff_t> upper=cs;
|
std::vector<Coeff_t> upper=cs;
|
||||||
std::vector<Coeff_t> lower=cs;
|
std::vector<Coeff_t> lower=cs;
|
||||||
upper[Ls-1]=-mass*upper[Ls-1];
|
|
||||||
lower[0] =-mass*lower[0];
|
for (int s=0;s<Ls;s++){
|
||||||
|
if ( s== 0 ) {
|
||||||
|
upper[s] = cs[s+1];
|
||||||
|
lower[s] =-mass*cs[Ls-1];
|
||||||
|
} else if ( s==(Ls-1) ) {
|
||||||
|
upper[s] =-mass*cs[0];
|
||||||
|
lower[s] = cs[s-1];
|
||||||
|
} else {
|
||||||
|
upper[s] = cs[s+1];
|
||||||
|
lower[s] = cs[s-1];
|
||||||
|
}
|
||||||
|
upper[s] = conjugate(upper[s]);
|
||||||
|
lower[s] = conjugate(lower[s]);
|
||||||
|
diag[s] = conjugate(diag[s]);
|
||||||
|
}
|
||||||
M5Ddag(psi,psi,Din,lower,diag,upper);
|
M5Ddag(psi,psi,Din,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +320,7 @@ void CayleyFermion5D<Impl>::MDeriv (GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDeriv(mat,U,Din,dag);
|
this->DhopDeriv(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDeriv(mat,Din,V,dag);
|
this->DhopDeriv(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -315,7 +335,7 @@ void CayleyFermion5D<Impl>::MoeDeriv(GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDerivOE(mat,U,Din,dag);
|
this->DhopDerivOE(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDerivOE(mat,Din,V,dag);
|
this->DhopDerivOE(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -330,7 +350,7 @@ void CayleyFermion5D<Impl>::MeoDeriv(GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDerivEO(mat,U,Din,dag);
|
this->DhopDerivEO(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDerivEO(mat,Din,V,dag);
|
this->DhopDerivEO(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_CAYLEY_FERMION_H
|
#ifndef GRID_QCD_CAYLEY_FERMION_H
|
||||||
#define GRID_QCD_CAYLEY_FERMION_H
|
#define GRID_QCD_CAYLEY_FERMION_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -192,7 +194,9 @@ template void CayleyFermion5D< A >::M5Ddag(const FermionField &psi,const Fermion
|
|||||||
template void CayleyFermion5D< A >::MooeeInv (const FermionField &psi, FermionField &chi); \
|
template void CayleyFermion5D< A >::MooeeInv (const FermionField &psi, FermionField &chi); \
|
||||||
template void CayleyFermion5D< A >::MooeeInvDag (const FermionField &psi, FermionField &chi);
|
template void CayleyFermion5D< A >::MooeeInvDag (const FermionField &psi, FermionField &chi);
|
||||||
|
|
||||||
|
#undef CAYLEY_DPERP_DENSE
|
||||||
#define CAYLEY_DPERP_CACHE
|
#define CAYLEY_DPERP_CACHE
|
||||||
#undef CAYLEY_DPERP_LINALG
|
#undef CAYLEY_DPERP_LINALG
|
||||||
|
#define CAYLEY_DPERP_VEC
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -54,8 +55,8 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
@ -98,8 +99,8 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
@ -137,8 +138,7 @@ void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &
|
|||||||
MooeeInvCalls++;
|
MooeeInvCalls++;
|
||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
|
|
||||||
// flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops
|
// flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops
|
||||||
@ -181,11 +181,22 @@ void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &
|
|||||||
assert(psi.checkerboard == psi.checkerboard);
|
assert(psi.checkerboard == psi.checkerboard);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
|
|
||||||
|
std::vector<Coeff_t> ueec(Ls);
|
||||||
|
std::vector<Coeff_t> deec(Ls);
|
||||||
|
std::vector<Coeff_t> leec(Ls);
|
||||||
|
std::vector<Coeff_t> ueemc(Ls);
|
||||||
|
std::vector<Coeff_t> leemc(Ls);
|
||||||
|
for(int s=0;s<ueec.size();s++){
|
||||||
|
ueec[s] = conjugate(uee[s]);
|
||||||
|
deec[s] = conjugate(dee[s]);
|
||||||
|
leec[s] = conjugate(lee[s]);
|
||||||
|
ueemc[s]= conjugate(ueem[s]);
|
||||||
|
leemc[s]= conjugate(leem[s]);
|
||||||
|
}
|
||||||
MooeeInvCalls++;
|
MooeeInvCalls++;
|
||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
|
||||||
|
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
|
|
||||||
@ -193,25 +204,25 @@ PARALLEL_FOR_LOOP
|
|||||||
chi[ss]=psi[ss];
|
chi[ss]=psi[ss];
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
spProj5m(tmp,chi[ss+s-1]);
|
spProj5m(tmp,chi[ss+s-1]);
|
||||||
chi[ss+s] = psi[ss+s]-uee[s-1]*tmp;
|
chi[ss+s] = psi[ss+s]-ueec[s-1]*tmp;
|
||||||
}
|
}
|
||||||
// U_m^{-\dagger}
|
// U_m^{-\dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
spProj5p(tmp,chi[ss+s]);
|
spProj5p(tmp,chi[ss+s]);
|
||||||
chi[ss+Ls-1] = chi[ss+Ls-1] - ueem[s]*tmp;
|
chi[ss+Ls-1] = chi[ss+Ls-1] - ueemc[s]*tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
// L_m^{-\dagger} D^{-dagger}
|
// L_m^{-\dagger} D^{-dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
spProj5m(tmp,chi[ss+Ls-1]);
|
spProj5m(tmp,chi[ss+Ls-1]);
|
||||||
chi[ss+s] = (1.0/dee[s])*chi[ss+s]-(leem[s]/dee[Ls-1])*tmp;
|
chi[ss+s] = (1.0/deec[s])*chi[ss+s]-(leemc[s]/deec[Ls-1])*tmp;
|
||||||
}
|
}
|
||||||
chi[ss+Ls-1]= (1.0/dee[Ls-1])*chi[ss+Ls-1];
|
chi[ss+Ls-1]= (1.0/deec[Ls-1])*chi[ss+Ls-1];
|
||||||
|
|
||||||
// Apply L^{-dagger}
|
// Apply L^{-dagger}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
spProj5p(tmp,chi[ss+s+1]);
|
spProj5p(tmp,chi[ss+s+1]);
|
||||||
chi[ss+s] = chi[ss+s] - lee[s]*tmp;
|
chi[ss+s] = chi[ss+s] - leec[s]*tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Eigen/Dense>
|
#include <Grid/Eigen/Dense>
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -38,20 +39,17 @@ namespace QCD {
|
|||||||
/*
|
/*
|
||||||
* Dense matrix versions of routines
|
* Dense matrix versions of routines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(psi,chi,DaggerYes,InverseYes);
|
this->MooeeInternal(psi,chi,DaggerYes,InverseYes);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(psi,chi,DaggerNo,InverseYes);
|
this->MooeeInternal(psi,chi,DaggerNo,InverseYes);
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv)
|
void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv)
|
||||||
{
|
{
|
||||||
@ -125,9 +123,20 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CAYLEY_DPERP_DENSE
|
||||||
|
INSTANTIATE_DPERP(GparityWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(GparityWilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(WilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(WilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplD);
|
||||||
|
|
||||||
template void CayleyFermion5D<GparityWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<GparityWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<GparityWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<GparityWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<WilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<WilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<WilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<WilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
template void CayleyFermion5D<ZWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
template void CayleyFermion5D<ZWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
#endif
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -47,17 +48,18 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
std::vector<Coeff_t> &diag,
|
std::vector<Coeff_t> &diag,
|
||||||
std::vector<Coeff_t> &upper)
|
std::vector<Coeff_t> &upper)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,Ls-1);
|
axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,Ls-1);
|
||||||
} else if ( s==(Ls-1)) {
|
} else if ( s==(Ls-1)) {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,0);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,0);
|
||||||
axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,s-1);
|
||||||
} else {
|
} else {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pplus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pplus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,17 +71,18 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
std::vector<Coeff_t> &diag,
|
std::vector<Coeff_t> &diag,
|
||||||
std::vector<Coeff_t> &upper)
|
std::vector<Coeff_t> &upper)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,Ls-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,Ls-1);
|
||||||
} else if ( s==(Ls-1)) {
|
} else if ( s==(Ls-1)) {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,0);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,0);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
} else {
|
} else {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,62 +90,68 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
|
Coeff_t czero(0.0);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
// Apply (L^{\prime})^{-1}
|
// Apply (L^{\prime})^{-1}
|
||||||
axpby_ssp (chi,1.0,psi, 0.0,psi,0,0); // chi[0]=psi[0]
|
axpby_ssp (chi,one,psi, czero,psi,0,0); // chi[0]=psi[0]
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
axpby_ssp_pplus(chi,1.0,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
axpby_ssp_pplus(chi,one,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
||||||
}
|
}
|
||||||
// L_m^{-1}
|
// L_m^{-1}
|
||||||
for (int s=0;s<Ls-1;s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
for (int s=0;s<Ls-1;s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||||
axpby_ssp_pminus(chi,1.0,chi,-leem[s],chi,Ls-1,s);
|
axpby_ssp_pminus(chi,one,chi,-leem[s],chi,Ls-1,s);
|
||||||
}
|
}
|
||||||
// U_m^{-1} D^{-1}
|
// U_m^{-1} D^{-1}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
// Chi[s] + 1/d chi[s]
|
// Chi[s] + 1/d chi[s]
|
||||||
axpby_ssp_pplus(chi,1.0/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
|
axpby_ssp_pplus(chi,one/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
|
||||||
}
|
}
|
||||||
axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable
|
axpby_ssp(chi,one/dee[Ls-1],chi,czero,chi,Ls-1,Ls-1); // Modest avoidable
|
||||||
|
|
||||||
// Apply U^{-1}
|
// Apply U^{-1}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
axpby_ssp_pminus (chi,1.0,chi,-uee[s],chi,s,s+1); // chi[Ls]
|
axpby_ssp_pminus (chi,one,chi,-uee[s],chi,s,s+1); // chi[Ls]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
|
Coeff_t czero(0.0);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
// Apply (U^{\prime})^{-dagger}
|
// Apply (U^{\prime})^{-dagger}
|
||||||
axpby_ssp (chi,1.0,psi, 0.0,psi,0,0); // chi[0]=psi[0]
|
axpby_ssp (chi,one,psi, czero,psi,0,0); // chi[0]=psi[0]
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
axpby_ssp_pminus(chi,1.0,psi,-uee[s-1],chi,s,s-1);
|
axpby_ssp_pminus(chi,one,psi,-conjugate(uee[s-1]),chi,s,s-1);
|
||||||
}
|
}
|
||||||
// U_m^{-\dagger}
|
// U_m^{-\dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
axpby_ssp_pplus(chi,1.0,chi,-ueem[s],chi,Ls-1,s);
|
axpby_ssp_pplus(chi,one,chi,-conjugate(ueem[s]),chi,Ls-1,s);
|
||||||
}
|
}
|
||||||
// L_m^{-\dagger} D^{-dagger}
|
// L_m^{-\dagger} D^{-dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
axpby_ssp_pminus(chi,1.0/dee[s],chi,-leem[s]/dee[Ls-1],chi,s,Ls-1);
|
axpby_ssp_pminus(chi,one/conjugate(dee[s]),chi,-conjugate(leem[s]/dee[Ls-1]),chi,s,Ls-1);
|
||||||
}
|
}
|
||||||
axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable
|
axpby_ssp(chi,one/conjugate(dee[Ls-1]),chi,czero,chi,Ls-1,Ls-1); // Modest avoidable
|
||||||
|
|
||||||
// Apply L^{-dagger}
|
// Apply L^{-dagger}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
axpby_ssp_pplus (chi,1.0,chi,-lee[s],chi,s,s+1); // chi[Ls]
|
axpby_ssp_pplus (chi,one,chi,-conjugate(lee[s]),chi,s,s+1); // chi[Ls]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef CAYLEY_DPERP_LINALG
|
#ifdef CAYLEY_DPERP_LINALG
|
||||||
INSTANTIATE(WilsonImplF);
|
INSTANTIATE_DPERP(WilsonImplF);
|
||||||
INSTANTIATE(WilsonImplD);
|
INSTANTIATE_DPERP(WilsonImplD);
|
||||||
INSTANTIATE(GparityWilsonImplF);
|
INSTANTIATE_DPERP(GparityWilsonImplF);
|
||||||
INSTANTIATE(GparityWilsonImplD);
|
INSTANTIATE_DPERP(GparityWilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplD);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD { /*
|
namespace QCD {
|
||||||
|
/*
|
||||||
* Dense matrix versions of routines
|
* Dense matrix versions of routines
|
||||||
*/
|
*/
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
@ -91,8 +93,7 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
|
|
||||||
assert(Nc==3);
|
assert(Nc==3);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
||||||
for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
|
||||||
#if 0
|
#if 0
|
||||||
alignas(64) SiteHalfSpinor hp;
|
alignas(64) SiteHalfSpinor hp;
|
||||||
alignas(64) SiteHalfSpinor hm;
|
alignas(64) SiteHalfSpinor hm;
|
||||||
@ -232,8 +233,7 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
|
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
||||||
for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
|
||||||
#if 0
|
#if 0
|
||||||
alignas(64) SiteHalfSpinor hp;
|
alignas(64) SiteHalfSpinor hp;
|
||||||
alignas(64) SiteHalfSpinor hm;
|
alignas(64) SiteHalfSpinor hm;
|
||||||
@ -792,13 +792,11 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
|
|||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
if ( switcheroo<Coeff_t>::iscomplex() ) {
|
if ( switcheroo<Coeff_t>::iscomplex() ) {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto site=0;site<vol;site++){
|
||||||
for(auto site=0;site<vol;site++){
|
|
||||||
MooeeInternalZAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
MooeeInternalZAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto site=0;site<vol;site++){
|
||||||
for(auto site=0;site<vol;site++){
|
|
||||||
MooeeInternalAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
MooeeInternalAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_CONTINUED_FRACTION_H
|
#ifndef GRID_QCD_CONTINUED_FRACTION_H
|
||||||
#define GRID_QCD_CONTINUED_FRACTION_H
|
#define GRID_QCD_CONTINUED_FRACTION_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_DOMAIN_WALL_FERMION_H
|
#ifndef GRID_QCD_DOMAIN_WALL_FERMION_H
|
||||||
#define GRID_QCD_DOMAIN_WALL_FERMION_H
|
#define GRID_QCD_DOMAIN_WALL_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -2,16 +2,11 @@
|
|||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/Actions.h
|
Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
|
||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: neo <cossu@post.kek.jp>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -29,89 +24,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_FERMION_H
|
||||||
#ifndef GRID_QCD_ACTIONS_H
|
#define GRID_QCD_FERMION_H
|
||||||
#define GRID_QCD_ACTIONS_H
|
|
||||||
|
|
||||||
// * Linear operators (Hermitian and non-hermitian) .. my LinearOperator
|
|
||||||
// * System solvers (Hermitian and non-hermitian) .. my OperatorFunction
|
|
||||||
// * MultiShift System solvers (Hermitian and non-hermitian) .. my OperatorFunction
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
|
||||||
// Abstract base interface
|
|
||||||
////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/ActionBase.h>
|
|
||||||
#include <Grid/qcd/action/ActionSet.h>
|
|
||||||
#include <Grid/qcd/action/ActionParams.h>
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
|
||||||
// Utility functions
|
|
||||||
////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/gauge/GaugeImplementations.h>
|
|
||||||
#include <Grid/qcd/utils/WilsonLoops.h>
|
|
||||||
#include <Grid/qcd/utils/Metric.h>
|
|
||||||
#include <Grid/qcd/utils/CovariantLaplacian.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/fermion/WilsonCompressor.h> //used by all wilson type fermions
|
|
||||||
#include <Grid/qcd/action/fermion/FermionOperatorImpl.h>
|
|
||||||
#include <Grid/qcd/action/fermion/FermionOperator.h>
|
|
||||||
#include <Grid/qcd/action/fermion/WilsonKernels.h> //used by all wilson type fermions
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
|
||||||
// Gauge Actions
|
|
||||||
////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/gauge/WilsonGaugeAction.h>
|
|
||||||
#include <Grid/qcd/action/gauge/PlaqPlusRectangleAction.h>
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
|
||||||
// Scalar Actions
|
|
||||||
////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/scalar/ScalarImpl.h>
|
|
||||||
#include <Grid/qcd/action/scalar/ScalarAction.h>
|
|
||||||
#include <Grid/qcd/action/scalar/ScalarInteractionAction.h>
|
|
||||||
|
|
||||||
namespace Grid {
|
|
||||||
namespace QCD {
|
|
||||||
|
|
||||||
typedef WilsonGaugeAction<PeriodicGimplR> WilsonGaugeActionR;
|
|
||||||
typedef WilsonGaugeAction<PeriodicGimplF> WilsonGaugeActionF;
|
|
||||||
typedef WilsonGaugeAction<PeriodicGimplD> WilsonGaugeActionD;
|
|
||||||
|
|
||||||
typedef PlaqPlusRectangleAction<PeriodicGimplR> PlaqPlusRectangleActionR;
|
|
||||||
typedef PlaqPlusRectangleAction<PeriodicGimplF> PlaqPlusRectangleActionF;
|
|
||||||
typedef PlaqPlusRectangleAction<PeriodicGimplD> PlaqPlusRectangleActionD;
|
|
||||||
typedef IwasakiGaugeAction<PeriodicGimplR> IwasakiGaugeActionR;
|
|
||||||
typedef IwasakiGaugeAction<PeriodicGimplF> IwasakiGaugeActionF;
|
|
||||||
typedef IwasakiGaugeAction<PeriodicGimplD> IwasakiGaugeActionD;
|
|
||||||
typedef SymanzikGaugeAction<PeriodicGimplR> SymanzikGaugeActionR;
|
|
||||||
typedef SymanzikGaugeAction<PeriodicGimplF> SymanzikGaugeActionF;
|
|
||||||
typedef SymanzikGaugeAction<PeriodicGimplD> SymanzikGaugeActionD;
|
|
||||||
|
|
||||||
|
|
||||||
typedef WilsonGaugeAction<ConjugateGimplR> ConjugateWilsonGaugeActionR;
|
|
||||||
typedef WilsonGaugeAction<ConjugateGimplF> ConjugateWilsonGaugeActionF;
|
|
||||||
typedef WilsonGaugeAction<ConjugateGimplD> ConjugateWilsonGaugeActionD;
|
|
||||||
typedef PlaqPlusRectangleAction<ConjugateGimplR> ConjugatePlaqPlusRectangleActionR;
|
|
||||||
typedef PlaqPlusRectangleAction<ConjugateGimplF> ConjugatePlaqPlusRectangleActionF;
|
|
||||||
typedef PlaqPlusRectangleAction<ConjugateGimplD> ConjugatePlaqPlusRectangleActionD;
|
|
||||||
typedef IwasakiGaugeAction<ConjugateGimplR> ConjugateIwasakiGaugeActionR;
|
|
||||||
typedef IwasakiGaugeAction<ConjugateGimplF> ConjugateIwasakiGaugeActionF;
|
|
||||||
typedef IwasakiGaugeAction<ConjugateGimplD> ConjugateIwasakiGaugeActionD;
|
|
||||||
typedef SymanzikGaugeAction<ConjugateGimplR> ConjugateSymanzikGaugeActionR;
|
|
||||||
typedef SymanzikGaugeAction<ConjugateGimplF> ConjugateSymanzikGaugeActionF;
|
|
||||||
typedef SymanzikGaugeAction<ConjugateGimplD> ConjugateSymanzikGaugeActionD;
|
|
||||||
|
|
||||||
|
|
||||||
typedef ScalarAction<ScalarImplR> ScalarActionR;
|
|
||||||
typedef ScalarAction<ScalarImplF> ScalarActionF;
|
|
||||||
typedef ScalarAction<ScalarImplD> ScalarActionD;
|
|
||||||
|
|
||||||
typedef ScalarInteractionAction<ScalarAdjImplR> ScalarAdjActionR;
|
|
||||||
typedef ScalarInteractionAction<ScalarAdjImplF> ScalarAdjActionF;
|
|
||||||
typedef ScalarInteractionAction<ScalarAdjImplD> ScalarAdjActionD;
|
|
||||||
|
|
||||||
}}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Explicit explicit template instantiation is still required in the .cc files
|
// Explicit explicit template instantiation is still required in the .cc files
|
||||||
@ -128,36 +43,6 @@ typedef ScalarInteractionAction<ScalarAdjImplD> ScalarAdjActionD
|
|||||||
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
|
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
#define FermOp4dVecTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonImplF>; \
|
|
||||||
template class A<WilsonImplD>; \
|
|
||||||
template class A<ZWilsonImplF>; \
|
|
||||||
template class A<ZWilsonImplD>; \
|
|
||||||
template class A<GparityWilsonImplF>; \
|
|
||||||
template class A<GparityWilsonImplD>;
|
|
||||||
|
|
||||||
#define AdjointFermOpTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonAdjImplF>; \
|
|
||||||
template class A<WilsonAdjImplD>;
|
|
||||||
|
|
||||||
#define TwoIndexFermOpTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonTwoIndexSymmetricImplF>; \
|
|
||||||
template class A<WilsonTwoIndexSymmetricImplD>;
|
|
||||||
|
|
||||||
#define FermOp5dVecTemplateInstantiate(A) \
|
|
||||||
template class A<DomainWallVec5dImplF>; \
|
|
||||||
template class A<DomainWallVec5dImplD>; \
|
|
||||||
template class A<ZDomainWallVec5dImplF>; \
|
|
||||||
template class A<ZDomainWallVec5dImplD>;
|
|
||||||
|
|
||||||
#define FermOpTemplateInstantiate(A) \
|
|
||||||
FermOp4dVecTemplateInstantiate(A) \
|
|
||||||
FermOp5dVecTemplateInstantiate(A)
|
|
||||||
|
|
||||||
|
|
||||||
#define GparityFermOpTemplateInstantiate(A)
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
// Fermion operators / actions
|
// Fermion operators / actions
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
@ -165,9 +50,9 @@ typedef ScalarInteractionAction<ScalarAdjImplD> ScalarAdjActionD
|
|||||||
#include <Grid/qcd/action/fermion/WilsonFermion.h> // 4d wilson like
|
#include <Grid/qcd/action/fermion/WilsonFermion.h> // 4d wilson like
|
||||||
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
||||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
||||||
|
|
||||||
//#include <Grid/qcd/action/fermion/CloverFermion.h>
|
//#include <Grid/qcd/action/fermion/CloverFermion.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
|
||||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h> // Cayley types
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h> // Cayley types
|
||||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||||
@ -178,14 +63,16 @@ typedef ScalarInteractionAction<ScalarAdjImplD> ScalarAdjActionD
|
|||||||
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h>
|
||||||
|
|
||||||
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h> // Continued fraction
|
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h> // Continued fraction
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h>
|
||||||
|
|
||||||
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h> // Partial fraction
|
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h> // Partial fraction
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h>
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
|
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
|
||||||
@ -289,27 +176,19 @@ typedef MobiusFermion<GparityWilsonImplR> GparityMobiusFermionR;
|
|||||||
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
||||||
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplR> ImprovedStaggeredFermion5DR;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplR> ImprovedStaggeredFermionVec5dR;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplF> ImprovedStaggeredFermionVec5dF;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplD> ImprovedStaggeredFermionVec5dD;
|
||||||
|
|
||||||
|
|
||||||
}}
|
}}
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
|
|
||||||
|
|
||||||
////////////////////////////////////////
|
|
||||||
// Pseudo fermion combinations for HMC
|
|
||||||
////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/pseudofermion/EvenOddSchurDifferentiable.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavour.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourRatio.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOdd.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOddRatio.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRational.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRationalRatio.h>
|
|
||||||
|
|
||||||
#endif
|
#endif
|
80
lib/qcd/action/fermion/FermionCore.h
Normal file
80
lib/qcd/action/fermion/FermionCore.h
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_FERMION_CORE_H
|
||||||
|
#define GRID_QCD_FERMION_CORE_H
|
||||||
|
|
||||||
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/GridQCDcore.h>
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Fermion prereqs
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonCompressor.h> //used by all wilson type fermions
|
||||||
|
#include <Grid/qcd/action/fermion/FermionOperatorImpl.h>
|
||||||
|
#include <Grid/qcd/action/fermion/FermionOperator.h>
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonKernels.h> //used by all wilson type fermions
|
||||||
|
#include <Grid/qcd/action/fermion/StaggeredKernels.h> //used by all wilson type fermions
|
||||||
|
|
||||||
|
#define FermOpStaggeredTemplateInstantiate(A) \
|
||||||
|
template class A<StaggeredImplF>; \
|
||||||
|
template class A<StaggeredImplD>;
|
||||||
|
|
||||||
|
#define FermOpStaggeredVec5dTemplateInstantiate(A) \
|
||||||
|
template class A<StaggeredVec5dImplF>; \
|
||||||
|
template class A<StaggeredVec5dImplD>;
|
||||||
|
|
||||||
|
#define FermOp4dVecTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonImplF>; \
|
||||||
|
template class A<WilsonImplD>; \
|
||||||
|
template class A<ZWilsonImplF>; \
|
||||||
|
template class A<ZWilsonImplD>; \
|
||||||
|
template class A<GparityWilsonImplF>; \
|
||||||
|
template class A<GparityWilsonImplD>;
|
||||||
|
|
||||||
|
#define AdjointFermOpTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonAdjImplF>; \
|
||||||
|
template class A<WilsonAdjImplD>;
|
||||||
|
|
||||||
|
#define TwoIndexFermOpTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonTwoIndexSymmetricImplF>; \
|
||||||
|
template class A<WilsonTwoIndexSymmetricImplD>;
|
||||||
|
|
||||||
|
#define FermOp5dVecTemplateInstantiate(A) \
|
||||||
|
template class A<DomainWallVec5dImplF>; \
|
||||||
|
template class A<DomainWallVec5dImplD>; \
|
||||||
|
template class A<ZDomainWallVec5dImplF>; \
|
||||||
|
template class A<ZDomainWallVec5dImplD>;
|
||||||
|
|
||||||
|
#define FermOpTemplateInstantiate(A) \
|
||||||
|
FermOp4dVecTemplateInstantiate(A) \
|
||||||
|
FermOp5dVecTemplateInstantiate(A)
|
||||||
|
|
||||||
|
#define GparityFermOpTemplateInstantiate(A)
|
||||||
|
|
||||||
|
#endif
|
@ -194,8 +194,7 @@ namespace QCD {
|
|||||||
GaugeLinkField tmp(mat._grid);
|
GaugeLinkField tmp(mat._grid);
|
||||||
tmp = zero;
|
tmp = zero;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int sss=0;sss<tmp._grid->oSites();sss++){
|
||||||
for(int sss=0;sss<tmp._grid->oSites();sss++){
|
|
||||||
int sU=sss;
|
int sU=sss;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
int sF = s+Ls*sU;
|
int sF = s+Ls*sU;
|
||||||
@ -235,11 +234,13 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
|
|||||||
typedef Lattice<SiteSpinor> FermionField;
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
typedef Lattice<SitePropagator> PropagatorField;
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////
|
||||||
// Make the doubled gauge field a *scalar*
|
// Make the doubled gauge field a *scalar*
|
||||||
|
/////////////////////////////////////////////////
|
||||||
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
||||||
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
||||||
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
||||||
|
|
||||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
|
||||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||||
@ -482,8 +483,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
|||||||
Uconj = where(coor==neglink,-Uconj,Uconj);
|
Uconj = where(coor==neglink,-Uconj,Uconj);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
|
||||||
Uds[ss](0)(mu) = U[ss]();
|
Uds[ss](0)(mu) = U[ss]();
|
||||||
Uds[ss](1)(mu) = Uconj[ss]();
|
Uds[ss](1)(mu) = Uconj[ss]();
|
||||||
}
|
}
|
||||||
@ -496,8 +496,8 @@ PARALLEL_FOR_LOOP
|
|||||||
Utmp = where(coor==0,Uconj,Utmp);
|
Utmp = where(coor==0,Uconj,Utmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
Uds[ss](0)(mu+4) = Utmp[ss]();
|
Uds[ss](0)(mu+4) = Utmp[ss]();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -506,8 +506,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Utmp = where(coor==0,U,Utmp);
|
Utmp = where(coor==0,U,Utmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
|
||||||
Uds[ss](1)(mu+4) = Utmp[ss]();
|
Uds[ss](1)(mu+4) = Utmp[ss]();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -521,8 +520,7 @@ PARALLEL_FOR_LOOP
|
|||||||
GaugeLinkField link(mat._grid);
|
GaugeLinkField link(mat._grid);
|
||||||
// use lorentz for flavour as hack.
|
// use lorentz for flavour as hack.
|
||||||
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
||||||
for (auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
|
||||||
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
||||||
}
|
}
|
||||||
PokeIndex<LorentzIndex>(mat, link, mu);
|
PokeIndex<LorentzIndex>(mat, link, mu);
|
||||||
@ -535,8 +533,7 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
GaugeLinkField tmp(mat._grid);
|
GaugeLinkField tmp(mat._grid);
|
||||||
tmp = zero;
|
tmp = zero;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
||||||
for (int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
|
||||||
for (int s = 0; s < Ls; s++) {
|
for (int s = 0; s < Ls; s++) {
|
||||||
int sF = s + Ls * ss;
|
int sF = s + Ls * ss;
|
||||||
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
||||||
@ -549,6 +546,323 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Single flavour one component spinors with colour index
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class S, class Representation = FundamentalRepresentation >
|
||||||
|
class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
typedef RealD _Coeff_t ;
|
||||||
|
static const int Dimension = Representation::Dimension;
|
||||||
|
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||||
|
|
||||||
|
//Necessary?
|
||||||
|
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||||
|
|
||||||
|
const bool LsVectorised=false;
|
||||||
|
typedef _Coeff_t Coeff_t;
|
||||||
|
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
|
||||||
|
template <typename vtype> using iImplSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplHalfSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||||
|
template <typename vtype> using iImplPropagator = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
|
||||||
|
typedef iImplScalar<Simd> SiteComplex;
|
||||||
|
typedef iImplSpinor<Simd> SiteSpinor;
|
||||||
|
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||||
|
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||||
|
typedef iImplPropagator<Simd> SitePropagator;
|
||||||
|
|
||||||
|
typedef Lattice<SiteComplex> ComplexField;
|
||||||
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
typedef SimpleCompressor<SiteSpinor> Compressor;
|
||||||
|
typedef StaggeredImplParams ImplParams;
|
||||||
|
typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
|
||||||
|
|
||||||
|
ImplParams Params;
|
||||||
|
|
||||||
|
StaggeredImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||||
|
|
||||||
|
inline void multLink(SiteSpinor &phi,
|
||||||
|
const SiteDoubledGaugeField &U,
|
||||||
|
const SiteSpinor &chi,
|
||||||
|
int mu){
|
||||||
|
mult(&phi(), &U(mu), &chi());
|
||||||
|
}
|
||||||
|
inline void multLinkAdd(SiteSpinor &phi,
|
||||||
|
const SiteDoubledGaugeField &U,
|
||||||
|
const SiteSpinor &chi,
|
||||||
|
int mu){
|
||||||
|
mac(&phi(), &U(mu), &chi());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class ref>
|
||||||
|
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||||
|
reg = memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void DoubleStore(GridBase *GaugeGrid,
|
||||||
|
DoubledGaugeField &UUUds, // for Naik term
|
||||||
|
DoubledGaugeField &Uds,
|
||||||
|
const GaugeField &Uthin,
|
||||||
|
const GaugeField &Ufat) {
|
||||||
|
conformable(Uds._grid, GaugeGrid);
|
||||||
|
conformable(Uthin._grid, GaugeGrid);
|
||||||
|
conformable(Ufat._grid, GaugeGrid);
|
||||||
|
GaugeLinkField U(GaugeGrid);
|
||||||
|
GaugeLinkField UU(GaugeGrid);
|
||||||
|
GaugeLinkField UUU(GaugeGrid);
|
||||||
|
GaugeLinkField Udag(GaugeGrid);
|
||||||
|
GaugeLinkField UUUdag(GaugeGrid);
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
// Staggered Phase.
|
||||||
|
Lattice<iScalar<vInteger> > coor(GaugeGrid);
|
||||||
|
Lattice<iScalar<vInteger> > x(GaugeGrid); LatticeCoordinate(x,0);
|
||||||
|
Lattice<iScalar<vInteger> > y(GaugeGrid); LatticeCoordinate(y,1);
|
||||||
|
Lattice<iScalar<vInteger> > z(GaugeGrid); LatticeCoordinate(z,2);
|
||||||
|
Lattice<iScalar<vInteger> > t(GaugeGrid); LatticeCoordinate(t,3);
|
||||||
|
|
||||||
|
Lattice<iScalar<vInteger> > lin_z(GaugeGrid); lin_z=x+y;
|
||||||
|
Lattice<iScalar<vInteger> > lin_t(GaugeGrid); lin_t=x+y+z;
|
||||||
|
|
||||||
|
ComplexField phases(GaugeGrid); phases=1.0;
|
||||||
|
|
||||||
|
if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
|
||||||
|
|
||||||
|
// 1 hop based on fat links
|
||||||
|
U = PeekIndex<LorentzIndex>(Ufat, mu);
|
||||||
|
Udag = adj( Cshift(U, mu, -1));
|
||||||
|
|
||||||
|
U = U *phases;
|
||||||
|
Udag = Udag *phases;
|
||||||
|
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Uds, Udag, mu + 4);
|
||||||
|
|
||||||
|
// 3 hop based on thin links. Crazy huh ?
|
||||||
|
U = PeekIndex<LorentzIndex>(Uthin, mu);
|
||||||
|
UU = Gimpl::CovShiftForward(U,mu,U);
|
||||||
|
UUU= Gimpl::CovShiftForward(U,mu,UU);
|
||||||
|
|
||||||
|
UUUdag = adj( Cshift(UUU, mu, -3));
|
||||||
|
|
||||||
|
UUU = UUU *phases;
|
||||||
|
UUUdag = UUUdag *phases;
|
||||||
|
|
||||||
|
PokeIndex<LorentzIndex>(UUUds, UUU, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUds, UUUdag, mu+4);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||||
|
GaugeLinkField link(mat._grid);
|
||||||
|
link = TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||||
|
PokeIndex<LorentzIndex>(mat,link,mu);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||||
|
assert (0);
|
||||||
|
// Must never hit
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Single flavour one component spinors with colour index. 5d vec
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class S, class Representation = FundamentalRepresentation >
|
||||||
|
class StaggeredVec5dImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
typedef RealD _Coeff_t ;
|
||||||
|
static const int Dimension = Representation::Dimension;
|
||||||
|
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||||
|
|
||||||
|
//Necessary?
|
||||||
|
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||||
|
|
||||||
|
const bool LsVectorised=true;
|
||||||
|
|
||||||
|
typedef _Coeff_t Coeff_t;
|
||||||
|
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
|
||||||
|
template <typename vtype> using iImplSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplHalfSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||||
|
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nd>;
|
||||||
|
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplPropagator = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
|
||||||
|
// Make the doubled gauge field a *scalar*
|
||||||
|
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
||||||
|
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
||||||
|
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
||||||
|
typedef iImplPropagator<Simd> SitePropagator;
|
||||||
|
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
typedef iImplScalar<Simd> SiteComplex;
|
||||||
|
typedef iImplSpinor<Simd> SiteSpinor;
|
||||||
|
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||||
|
|
||||||
|
|
||||||
|
typedef Lattice<SiteComplex> ComplexField;
|
||||||
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
|
|
||||||
|
typedef SimpleCompressor<SiteSpinor> Compressor;
|
||||||
|
typedef StaggeredImplParams ImplParams;
|
||||||
|
typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
|
||||||
|
|
||||||
|
ImplParams Params;
|
||||||
|
|
||||||
|
StaggeredVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||||
|
|
||||||
|
template <class ref>
|
||||||
|
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||||
|
vsplat(reg, memory);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||||
|
const SiteHalfSpinor &chi, int mu) {
|
||||||
|
SiteGaugeLink UU;
|
||||||
|
for (int i = 0; i < Dimension; i++) {
|
||||||
|
for (int j = 0; j < Dimension; j++) {
|
||||||
|
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mult(&phi(), &UU(), &chi());
|
||||||
|
}
|
||||||
|
inline void multLinkAdd(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||||
|
const SiteHalfSpinor &chi, int mu) {
|
||||||
|
SiteGaugeLink UU;
|
||||||
|
for (int i = 0; i < Dimension; i++) {
|
||||||
|
for (int j = 0; j < Dimension; j++) {
|
||||||
|
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mac(&phi(), &UU(), &chi());
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void DoubleStore(GridBase *GaugeGrid,
|
||||||
|
DoubledGaugeField &UUUds, // for Naik term
|
||||||
|
DoubledGaugeField &Uds,
|
||||||
|
const GaugeField &Uthin,
|
||||||
|
const GaugeField &Ufat)
|
||||||
|
{
|
||||||
|
|
||||||
|
GridBase * InputGrid = Uthin._grid;
|
||||||
|
conformable(InputGrid,Ufat._grid);
|
||||||
|
|
||||||
|
GaugeLinkField U(InputGrid);
|
||||||
|
GaugeLinkField UU(InputGrid);
|
||||||
|
GaugeLinkField UUU(InputGrid);
|
||||||
|
GaugeLinkField Udag(InputGrid);
|
||||||
|
GaugeLinkField UUUdag(InputGrid);
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
// Staggered Phase.
|
||||||
|
Lattice<iScalar<vInteger> > coor(InputGrid);
|
||||||
|
Lattice<iScalar<vInteger> > x(InputGrid); LatticeCoordinate(x,0);
|
||||||
|
Lattice<iScalar<vInteger> > y(InputGrid); LatticeCoordinate(y,1);
|
||||||
|
Lattice<iScalar<vInteger> > z(InputGrid); LatticeCoordinate(z,2);
|
||||||
|
Lattice<iScalar<vInteger> > t(InputGrid); LatticeCoordinate(t,3);
|
||||||
|
|
||||||
|
Lattice<iScalar<vInteger> > lin_z(InputGrid); lin_z=x+y;
|
||||||
|
Lattice<iScalar<vInteger> > lin_t(InputGrid); lin_t=x+y+z;
|
||||||
|
|
||||||
|
ComplexField phases(InputGrid); phases=1.0;
|
||||||
|
|
||||||
|
if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
|
||||||
|
|
||||||
|
// 1 hop based on fat links
|
||||||
|
U = PeekIndex<LorentzIndex>(Ufat, mu);
|
||||||
|
Udag = adj( Cshift(U, mu, -1));
|
||||||
|
|
||||||
|
U = U *phases;
|
||||||
|
Udag = Udag *phases;
|
||||||
|
|
||||||
|
|
||||||
|
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||||
|
SiteScalarGaugeLink ScalarU;
|
||||||
|
SiteDoubledGaugeField ScalarUds;
|
||||||
|
|
||||||
|
std::vector<int> lcoor;
|
||||||
|
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||||
|
peekLocalSite(ScalarUds, Uds, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, U, lcoor);
|
||||||
|
ScalarUds(mu) = ScalarU();
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, Udag, lcoor);
|
||||||
|
ScalarUds(mu + 4) = ScalarU();
|
||||||
|
|
||||||
|
pokeLocalSite(ScalarUds, Uds, lcoor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3 hop based on thin links. Crazy huh ?
|
||||||
|
U = PeekIndex<LorentzIndex>(Uthin, mu);
|
||||||
|
UU = Gimpl::CovShiftForward(U,mu,U);
|
||||||
|
UUU= Gimpl::CovShiftForward(U,mu,UU);
|
||||||
|
|
||||||
|
UUUdag = adj( Cshift(UUU, mu, -3));
|
||||||
|
|
||||||
|
UUU = UUU *phases;
|
||||||
|
UUUdag = UUUdag *phases;
|
||||||
|
|
||||||
|
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||||
|
|
||||||
|
SiteScalarGaugeLink ScalarU;
|
||||||
|
SiteDoubledGaugeField ScalarUds;
|
||||||
|
|
||||||
|
std::vector<int> lcoor;
|
||||||
|
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarUds, UUUds, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, UUU, lcoor);
|
||||||
|
ScalarUds(mu) = ScalarU();
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, UUUdag, lcoor);
|
||||||
|
ScalarUds(mu + 4) = ScalarU();
|
||||||
|
|
||||||
|
pokeLocalSite(ScalarUds, UUUds, lcoor);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||||
|
assert (0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
||||||
@ -577,6 +891,14 @@ PARALLEL_FOR_LOOP
|
|||||||
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
||||||
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
||||||
|
|
||||||
|
typedef StaggeredImpl<vComplex, FundamentalRepresentation > StaggeredImplR; // Real.. whichever prec
|
||||||
|
typedef StaggeredImpl<vComplexF, FundamentalRepresentation > StaggeredImplF; // Float
|
||||||
|
typedef StaggeredImpl<vComplexD, FundamentalRepresentation > StaggeredImplD; // Double
|
||||||
|
|
||||||
|
typedef StaggeredVec5dImpl<vComplex, FundamentalRepresentation > StaggeredVec5dImplR; // Real.. whichever prec
|
||||||
|
typedef StaggeredVec5dImpl<vComplexF, FundamentalRepresentation > StaggeredVec5dImplF; // Float
|
||||||
|
typedef StaggeredVec5dImpl<vComplexD, FundamentalRepresentation > StaggeredVec5dImplD; // Double
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
403
lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
Normal file
403
lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
Normal file
@ -0,0 +1,403 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid.h>
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3});
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
|
||||||
|
|
||||||
|
/////////////////////////////////
|
||||||
|
// Constructor and gauge import
|
||||||
|
/////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid,
|
||||||
|
RealD _mass,
|
||||||
|
const ImplParams &p)
|
||||||
|
: Kernels(p),
|
||||||
|
_grid(&Fgrid),
|
||||||
|
_cbgrid(&Hgrid),
|
||||||
|
Stencil(&Fgrid, npoint, Even, directions, displacements),
|
||||||
|
StencilEven(&Hgrid, npoint, Even, directions, displacements), // source is Even
|
||||||
|
StencilOdd(&Hgrid, npoint, Odd, directions, displacements), // source is Odd
|
||||||
|
mass(_mass),
|
||||||
|
Lebesgue(_grid),
|
||||||
|
LebesgueEvenOdd(_cbgrid),
|
||||||
|
Umu(&Fgrid),
|
||||||
|
UmuEven(&Hgrid),
|
||||||
|
UmuOdd(&Hgrid),
|
||||||
|
UUUmu(&Fgrid),
|
||||||
|
UUUmuEven(&Hgrid),
|
||||||
|
UUUmuOdd(&Hgrid) ,
|
||||||
|
_tmp(&Hgrid)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
RealD _c1, RealD _c2,RealD _u0,
|
||||||
|
const ImplParams &p)
|
||||||
|
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
|
||||||
|
{
|
||||||
|
c1=_c1;
|
||||||
|
c2=_c2;
|
||||||
|
u0=_u0;
|
||||||
|
ImportGauge(_Uthin,_Ufat);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin,GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p)
|
||||||
|
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
|
||||||
|
{
|
||||||
|
ImportGaugeSimple(_Utriple,_Ufat);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
// Momentum space propagator should be
|
||||||
|
// https://arxiv.org/pdf/hep-lat/9712010.pdf
|
||||||
|
//
|
||||||
|
// mom space action.
|
||||||
|
// gamma_mu i ( c1 sin pmu + c2 sin 3 pmu ) + m
|
||||||
|
//
|
||||||
|
// must track through staggered flavour/spin reduction in literature to
|
||||||
|
// turn to free propagator for the one component chi field, a la page 4/5
|
||||||
|
// of above link to implmement fourier based solver.
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin)
|
||||||
|
{
|
||||||
|
ImportGauge(_Uthin,_Uthin);
|
||||||
|
};
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const GaugeField &_Utriple,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
// Trivial import; phases and fattening and such like preapplied
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
GaugeLinkField U(GaugeGrid());
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(_Utriple, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U, mu );
|
||||||
|
|
||||||
|
U = adj( Cshift(U, mu, -3));
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, -U, mu+4 );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(_Ufat, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U, mu);
|
||||||
|
|
||||||
|
U = adj( Cshift(U, mu, -1));
|
||||||
|
PokeIndex<LorentzIndex>(Umu, -U, mu+4);
|
||||||
|
|
||||||
|
}
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven,UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
GaugeLinkField U(GaugeGrid());
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Double Store should take two fields for Naik and one hop separately.
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Apply scale factors to get the right fermion Kinetic term
|
||||||
|
// Could pass coeffs into the double store to save work.
|
||||||
|
// 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) )
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << " Umu " << Umu._odata[0]<<std::endl;
|
||||||
|
std::cout << " UUUmu " << UUUmu._odata[0]<<std::endl;
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven, UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////
|
||||||
|
// Implement the interface
|
||||||
|
/////////////////////////////
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion<Impl>::M(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerNo);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion<Impl>::Mdag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerYes);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerNo);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerYes);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerYes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
typename FermionField::scalar_type scal(mass);
|
||||||
|
out = scal * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Mooee(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
out = (1.0 / (mass)) * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeInvDag(const FermionField &in,
|
||||||
|
FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
MooeeInv(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Internal
|
||||||
|
///////////////////////////////////
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU,
|
||||||
|
GaugeField & mat,
|
||||||
|
const FermionField &A, const FermionField &B, int dag) {
|
||||||
|
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
|
||||||
|
FermionField Btilde(B._grid);
|
||||||
|
FermionField Atilde(B._grid);
|
||||||
|
Atilde = A;
|
||||||
|
|
||||||
|
st.HaloExchange(B, compressor);
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
////////////////////////
|
||||||
|
// Call the single hop
|
||||||
|
////////////////////////
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < B._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopDir(st, U, UUU, st.CommBuf(), sss, sss, B, Btilde, mu,1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force in three link terms
|
||||||
|
//
|
||||||
|
// Impl::InsertForce4D(mat, Btilde, Atilde, mu);
|
||||||
|
//
|
||||||
|
// dU_ac(x)/dt = i p_ab U_bc(x)
|
||||||
|
//
|
||||||
|
// => dS_f/dt = dS_f/dU_ac(x) . dU_ac(x)/dt = i p_ab U_bc(x) dS_f/dU_ac(x)
|
||||||
|
//
|
||||||
|
// One link: form fragments S_f = A U B
|
||||||
|
//
|
||||||
|
// write Btilde = U(x) B(x+mu)
|
||||||
|
//
|
||||||
|
// mat+= TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||||
|
//
|
||||||
|
// Three link: form fragments S_f = A UUU B
|
||||||
|
//
|
||||||
|
// mat+= outer ( A, UUUB) <-- Best take DhopDeriv with one linke or identity matrix
|
||||||
|
// mat+= outer ( AU, UUB) <-- and then use covariant cshift?
|
||||||
|
// mat+= outer ( AUU, UB) <-- Returned from call to DhopDir
|
||||||
|
|
||||||
|
assert(0);// need to figure out the force interface with a blasted three link term.
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _grid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
mat.checkerboard = U.checkerboard;
|
||||||
|
|
||||||
|
DerivInternal(Stencil, Umu, UUUmu, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _cbgrid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
assert(V.checkerboard == Even);
|
||||||
|
assert(U.checkerboard == Odd);
|
||||||
|
mat.checkerboard = Odd;
|
||||||
|
|
||||||
|
DerivInternal(StencilEven, UmuOdd, UUUmuOdd, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _cbgrid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
assert(V.checkerboard == Odd);
|
||||||
|
assert(U.checkerboard == Even);
|
||||||
|
mat.checkerboard = Even;
|
||||||
|
|
||||||
|
DerivInternal(StencilOdd, UmuEven, UUUmuEven, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _grid); // verifies full grid
|
||||||
|
conformable(in._grid, out._grid);
|
||||||
|
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
|
||||||
|
DhopInternal(Stencil, Lebesgue, Umu, UUUmu, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard == Even);
|
||||||
|
out.checkerboard = Odd;
|
||||||
|
|
||||||
|
DhopInternal(StencilEven, LebesgueEvenOdd, UmuOdd, UUUmuOdd, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopEO(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard == Odd);
|
||||||
|
out.checkerboard = Even;
|
||||||
|
|
||||||
|
DhopInternal(StencilOdd, LebesgueEvenOdd, UmuEven, UUUmuEven, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
DhopDir(in, out, dir, disp);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
Stencil.HaloExchange(in, compressor);
|
||||||
|
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sss, sss, in, out, dir, disp);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out, int dag) {
|
||||||
|
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
st.HaloExchange(in, compressor);
|
||||||
|
|
||||||
|
if (dag == DaggerYes) {
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
|
||||||
|
//AdjointFermOpTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
//TwoIndexFermOpTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
|
||||||
|
}}
|
167
lib/qcd/action/fermion/ImprovedStaggeredFermion.h
Normal file
167
lib/qcd/action/fermion/ImprovedStaggeredFermion.h
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggered.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_IMPR_STAG_FERMION_H
|
||||||
|
#define GRID_QCD_IMPR_STAG_FERMION_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
class ImprovedStaggeredFermionStatic {
|
||||||
|
public:
|
||||||
|
static const std::vector<int> directions;
|
||||||
|
static const std::vector<int> displacements;
|
||||||
|
static const int npoint = 16;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedStaggeredFermionStatic {
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
typedef StaggeredKernels<Impl> Kernels;
|
||||||
|
|
||||||
|
FermionField _tmp;
|
||||||
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Implement the abstract base
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
GridBase *GaugeGrid(void) { return _grid; }
|
||||||
|
GridBase *GaugeRedBlackGrid(void) { return _cbgrid; }
|
||||||
|
GridBase *FermionGrid(void) { return _grid; }
|
||||||
|
GridBase *FermionRedBlackGrid(void) { return _cbgrid; }
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
// override multiply; cut number routines if pass dagger argument
|
||||||
|
// and also make interface more uniformly consistent
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
RealD M(const FermionField &in, FermionField &out);
|
||||||
|
RealD Mdag(const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////
|
||||||
|
// half checkerboard operations
|
||||||
|
/////////////////////////////////////////////////////////
|
||||||
|
void Meooe(const FermionField &in, FermionField &out);
|
||||||
|
void MeooeDag(const FermionField &in, FermionField &out);
|
||||||
|
void Mooee(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeDag(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInv(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInvDag(const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
////////////////////////
|
||||||
|
// Derivative interface
|
||||||
|
////////////////////////
|
||||||
|
// Interface calls an internal routine
|
||||||
|
void DhopDeriv (GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
void DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
void DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// non-hermitian hopping term; half cb or both
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void Dhop (const FermionField &in, FermionField &out, int dag);
|
||||||
|
void DhopOE(const FermionField &in, FermionField &out, int dag);
|
||||||
|
void DhopEO(const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Multigrid assistance; force term uses too
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void Mdir(const FermionField &in, FermionField &out, int dir, int disp);
|
||||||
|
void DhopDir(const FermionField &in, FermionField &out, int dir, int disp);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Extra methods added by derived
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void DerivInternal(StencilImpl &st,
|
||||||
|
DoubledGaugeField &U,DoubledGaugeField &UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A, const FermionField &B, int dag);
|
||||||
|
|
||||||
|
void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
// Constructor
|
||||||
|
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
|
||||||
|
// DoubleStore impl dependent
|
||||||
|
void ImportGaugeSimple(const GaugeField &_Utriple, const GaugeField &_Ufat);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Data members require to support the functionality
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// protected:
|
||||||
|
public:
|
||||||
|
// any other parameters of action ???
|
||||||
|
|
||||||
|
RealD mass;
|
||||||
|
RealD u0;
|
||||||
|
RealD c1;
|
||||||
|
RealD c2;
|
||||||
|
|
||||||
|
GridBase *_grid;
|
||||||
|
GridBase *_cbgrid;
|
||||||
|
|
||||||
|
// Defines the stencils for even and odd
|
||||||
|
StencilImpl Stencil;
|
||||||
|
StencilImpl StencilEven;
|
||||||
|
StencilImpl StencilOdd;
|
||||||
|
|
||||||
|
// Copy of the gauge field , with even and odd subsets
|
||||||
|
DoubledGaugeField Umu;
|
||||||
|
DoubledGaugeField UmuEven;
|
||||||
|
DoubledGaugeField UmuOdd;
|
||||||
|
|
||||||
|
DoubledGaugeField UUUmu;
|
||||||
|
DoubledGaugeField UUUmuEven;
|
||||||
|
DoubledGaugeField UUUmuOdd;
|
||||||
|
|
||||||
|
LebesgueOrder Lebesgue;
|
||||||
|
LebesgueOrder LebesgueEvenOdd;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
355
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
Normal file
355
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermion5DStatic::directions({1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4});
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermion5DStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
|
||||||
|
|
||||||
|
// 5d lattice for DWF.
|
||||||
|
template<class Impl>
|
||||||
|
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat,
|
||||||
|
GridCartesian &FiveDimGrid,
|
||||||
|
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||||
|
GridCartesian &FourDimGrid,
|
||||||
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
|
RealD _mass,
|
||||||
|
RealD _c1,RealD _c2, RealD _u0,
|
||||||
|
const ImplParams &p) :
|
||||||
|
Kernels(p),
|
||||||
|
_FiveDimGrid (&FiveDimGrid),
|
||||||
|
_FiveDimRedBlackGrid(&FiveDimRedBlackGrid),
|
||||||
|
_FourDimGrid (&FourDimGrid),
|
||||||
|
_FourDimRedBlackGrid(&FourDimRedBlackGrid),
|
||||||
|
Stencil (&FiveDimGrid,npoint,Even,directions,displacements),
|
||||||
|
StencilEven(&FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even
|
||||||
|
StencilOdd (&FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd
|
||||||
|
mass(_mass),
|
||||||
|
c1(_c1),
|
||||||
|
c2(_c2),
|
||||||
|
u0(_u0),
|
||||||
|
Umu(&FourDimGrid),
|
||||||
|
UmuEven(&FourDimRedBlackGrid),
|
||||||
|
UmuOdd (&FourDimRedBlackGrid),
|
||||||
|
UUUmu(&FourDimGrid),
|
||||||
|
UUUmuEven(&FourDimRedBlackGrid),
|
||||||
|
UUUmuOdd(&FourDimRedBlackGrid),
|
||||||
|
Lebesgue(&FourDimGrid),
|
||||||
|
LebesgueEvenOdd(&FourDimRedBlackGrid),
|
||||||
|
_tmp(&FiveDimRedBlackGrid)
|
||||||
|
{
|
||||||
|
|
||||||
|
// some assertions
|
||||||
|
assert(FiveDimGrid._ndimension==5);
|
||||||
|
assert(FourDimGrid._ndimension==4);
|
||||||
|
assert(FourDimRedBlackGrid._ndimension==4);
|
||||||
|
assert(FiveDimRedBlackGrid._ndimension==5);
|
||||||
|
assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
|
||||||
|
|
||||||
|
// extent of fifth dim and not spread out
|
||||||
|
Ls=FiveDimGrid._fdimensions[0];
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
|
||||||
|
assert(FiveDimGrid._processors[0] ==1);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[0] ==1);
|
||||||
|
|
||||||
|
// Other dimensions must match the decomposition of the four-D fields
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Impl::LsVectorised) {
|
||||||
|
|
||||||
|
int nsimd = Simd::Nsimd();
|
||||||
|
|
||||||
|
// Dimension zero of the five-d is the Ls direction
|
||||||
|
assert(FiveDimGrid._simd_layout[0] ==nsimd);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
|
||||||
|
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
assert(FourDimGrid._simd_layout[d]=1);
|
||||||
|
assert(FourDimRedBlackGrid._simd_layout[d]=1);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// Dimension zero of the five-d is the Ls direction
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[0]==1);
|
||||||
|
assert(FiveDimGrid._simd_layout[0] ==1);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate the required comms buffer
|
||||||
|
ImportGauge(_Uthin,_Ufat);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin)
|
||||||
|
{
|
||||||
|
ImportGauge(_Uthin,_Uthin);
|
||||||
|
};
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Double Store should take two fields for Naik and one hop separately.
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Apply scale factors to get the right fermion Kinetic term
|
||||||
|
// Could pass coeffs into the double store to save work.
|
||||||
|
// 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) )
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
auto U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
|
||||||
|
}
|
||||||
|
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven, UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,int dir5,int disp)
|
||||||
|
{
|
||||||
|
int dir = dir5-1; // Maps to the ordering above in "directions" that is passed to stencil
|
||||||
|
// we drop off the innermost fifth dimension
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
Stencil.HaloExchange(in,compressor);
|
||||||
|
|
||||||
|
parallel_for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||||
|
for(int s=0;s<Ls;s++){
|
||||||
|
int sU=ss;
|
||||||
|
int sF = s+Ls*sU;
|
||||||
|
Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sF, sU, in, out, dir, disp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DerivInternal(StencilImpl & st,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
DoubledGaugeField & UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
// No force terms in multi-rhs solver staggered
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDeriv(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDerivEO(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField & U,DoubledGaugeField & UUU,
|
||||||
|
const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
Compressor compressor;
|
||||||
|
int LLs = in._grid->_rdimensions[0];
|
||||||
|
st.HaloExchange(in,compressor);
|
||||||
|
|
||||||
|
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
||||||
|
if (dag == DaggerYes) {
|
||||||
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
|
int sU=ss;
|
||||||
|
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
|
int sU=ss;
|
||||||
|
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard==Even);
|
||||||
|
out.checkerboard = Odd;
|
||||||
|
|
||||||
|
DhopInternal(StencilEven,LebesgueEvenOdd,UmuOdd,UUUmuOdd,in,out,dag);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard==Odd);
|
||||||
|
out.checkerboard = Even;
|
||||||
|
|
||||||
|
DhopInternal(StencilOdd,LebesgueEvenOdd,UmuEven,UUUmuEven,in,out,dag);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionGrid()); // verifies full grid
|
||||||
|
conformable(in._grid,out._grid);
|
||||||
|
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
|
||||||
|
DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
// Implement the general interface. Here we use SAME mass on all slices
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
DhopDir(in, out, dir, disp);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion5D<Impl>::M(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerNo);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion5D<Impl>::Mdag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerYes);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerNo);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerYes);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerYes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
typename FermionField::scalar_type scal(mass);
|
||||||
|
out = scal * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Mooee(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
out = (1.0 / (mass)) * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeInvDag(const FermionField &in,
|
||||||
|
FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
MooeeInv(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||||
|
FermOpStaggeredVec5dTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
||||||
|
|
||||||
|
|
167
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
Normal file
167
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
|
||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: AzusaYamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
|
||||||
|
#define GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// This is the 4d red black case appropriate to support
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
class ImprovedStaggeredFermion5DStatic {
|
||||||
|
public:
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
static const std::vector<int> directions;
|
||||||
|
static const std::vector<int> displacements;
|
||||||
|
const int npoint = 16;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
class ImprovedStaggeredFermion5D : public StaggeredKernels<Impl>, public ImprovedStaggeredFermion5DStatic
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
typedef StaggeredKernels<Impl> Kernels;
|
||||||
|
|
||||||
|
FermionField _tmp;
|
||||||
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Implement the abstract base
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
GridBase *GaugeGrid(void) { return _FourDimGrid ;}
|
||||||
|
GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;}
|
||||||
|
GridBase *FermionGrid(void) { return _FiveDimGrid;}
|
||||||
|
GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;}
|
||||||
|
|
||||||
|
// full checkerboard operations; leave unimplemented as abstract for now
|
||||||
|
RealD M (const FermionField &in, FermionField &out);
|
||||||
|
RealD Mdag (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
// half checkerboard operations
|
||||||
|
void Meooe (const FermionField &in, FermionField &out);
|
||||||
|
void Mooee (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInv (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void MeooeDag (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeDag (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInvDag (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void Mdir (const FermionField &in, FermionField &out,int dir,int disp);
|
||||||
|
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||||
|
|
||||||
|
// These can be overridden by fancy 5d chiral action
|
||||||
|
void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
|
||||||
|
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||||
|
void Dhop (const FermionField &in, FermionField &out,int dag);
|
||||||
|
void DhopOE(const FermionField &in, FermionField &out,int dag);
|
||||||
|
void DhopEO(const FermionField &in, FermionField &out,int dag);
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// New methods added
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void DerivInternal(StencilImpl & st,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
DoubledGaugeField & UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag);
|
||||||
|
|
||||||
|
void DhopInternal(StencilImpl & st,
|
||||||
|
LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out,
|
||||||
|
int dag);
|
||||||
|
|
||||||
|
// Constructors
|
||||||
|
ImprovedStaggeredFermion5D(GaugeField &_Uthin,
|
||||||
|
GaugeField &_Ufat,
|
||||||
|
GridCartesian &FiveDimGrid,
|
||||||
|
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||||
|
GridCartesian &FourDimGrid,
|
||||||
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
|
double _mass,
|
||||||
|
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
|
||||||
|
const ImplParams &p= ImplParams());
|
||||||
|
|
||||||
|
// DoubleStore
|
||||||
|
void ImportGauge(const GaugeField &_U);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Data members require to support the functionality
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
public:
|
||||||
|
|
||||||
|
GridBase *_FourDimGrid;
|
||||||
|
GridBase *_FourDimRedBlackGrid;
|
||||||
|
GridBase *_FiveDimGrid;
|
||||||
|
GridBase *_FiveDimRedBlackGrid;
|
||||||
|
|
||||||
|
RealD mass;
|
||||||
|
RealD c1;
|
||||||
|
RealD c2;
|
||||||
|
RealD u0;
|
||||||
|
int Ls;
|
||||||
|
|
||||||
|
//Defines the stencils for even and odd
|
||||||
|
StencilImpl Stencil;
|
||||||
|
StencilImpl StencilEven;
|
||||||
|
StencilImpl StencilOdd;
|
||||||
|
|
||||||
|
// Copy of the gauge field , with even and odd subsets
|
||||||
|
DoubledGaugeField Umu;
|
||||||
|
DoubledGaugeField UmuEven;
|
||||||
|
DoubledGaugeField UmuOdd;
|
||||||
|
|
||||||
|
DoubledGaugeField UUUmu;
|
||||||
|
DoubledGaugeField UUUmuEven;
|
||||||
|
DoubledGaugeField UUUmuOdd;
|
||||||
|
|
||||||
|
LebesgueOrder Lebesgue;
|
||||||
|
LebesgueOrder LebesgueEvenOdd;
|
||||||
|
|
||||||
|
// Comms buffer
|
||||||
|
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
||||||
|
#endif
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_MOBIUS_FERMION_H
|
#ifndef GRID_QCD_MOBIUS_FERMION_H
|
||||||
#define GRID_QCD_MOBIUS_FERMION_H
|
#define GRID_QCD_MOBIUS_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
#ifndef GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
||||||
#define GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
#define GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
#define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
#define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
#define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -26,7 +26,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
|
@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_PARTIAL_FRACTION_H
|
#ifndef GRID_QCD_PARTIAL_FRACTION_H
|
||||||
#define GRID_QCD_PARTIAL_FRACTION_H
|
#define GRID_QCD_PARTIAL_FRACTION_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_SCALED_SHAMIR_FERMION_H
|
#ifndef GRID_QCD_SCALED_SHAMIR_FERMION_H
|
||||||
#define GRID_QCD_SCALED_SHAMIR_FERMION_H
|
#define GRID_QCD_SCALED_SHAMIR_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
|
#ifndef GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
|
||||||
#define GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
|
#define GRID_QCD_SHAMIR_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
276
lib/qcd/action/fermion/StaggeredKernels.cc
Normal file
276
lib/qcd/action/fermion/StaggeredKernels.cc
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
int StaggeredKernelsStatic::Opt= StaggeredKernelsStatic::OptGeneric;
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
StaggeredKernels<Impl>::StaggeredKernels(const ImplParams &p) : Base(p){};
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Generic implementation; move to different file?
|
||||||
|
////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||||
|
SiteSpinor *buf, int sF,
|
||||||
|
int sU, const FermionField &in, SiteSpinor &out,int threeLink) {
|
||||||
|
const SiteSpinor *chi_p;
|
||||||
|
SiteSpinor chi;
|
||||||
|
SiteSpinor Uchi;
|
||||||
|
StencilEntry *SE;
|
||||||
|
int ptype;
|
||||||
|
int skew = 0;
|
||||||
|
if (threeLink) skew=8;
|
||||||
|
///////////////////////////
|
||||||
|
// Xp
|
||||||
|
///////////////////////////
|
||||||
|
|
||||||
|
SE = st.GetEntry(ptype, Xp+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLink(Uchi, U._odata[sU], *chi_p, Xp);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Yp
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Yp+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Yp);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Zp
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Zp+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zp);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Tp
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Tp+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tp);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Xm
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Xm+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Xm);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Ym
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Ym+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Ym);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Zm
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Zm+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Zm);
|
||||||
|
|
||||||
|
///////////////////////////
|
||||||
|
// Tm
|
||||||
|
///////////////////////////
|
||||||
|
SE = st.GetEntry(ptype, Tm+skew, sF);
|
||||||
|
if (SE->_is_local) {
|
||||||
|
if (SE->_permute) {
|
||||||
|
chi_p = χ
|
||||||
|
permute(chi, in._odata[SE->_offset], ptype);
|
||||||
|
} else {
|
||||||
|
chi_p = &in._odata[SE->_offset];
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
chi_p = &buf[SE->_offset];
|
||||||
|
}
|
||||||
|
Impl::multLinkAdd(Uchi, U._odata[sU], *chi_p, Tm);
|
||||||
|
|
||||||
|
vstream(out, Uchi);
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs, int sU,
|
||||||
|
const FermionField &in, FermionField &out) {
|
||||||
|
SiteSpinor naik;
|
||||||
|
SiteSpinor naive;
|
||||||
|
int oneLink =0;
|
||||||
|
int threeLink=1;
|
||||||
|
int dag=1;
|
||||||
|
switch(Opt) {
|
||||||
|
#ifdef AVX512
|
||||||
|
//FIXME; move the sign into the Asm routine
|
||||||
|
case OptInlineAsm:
|
||||||
|
DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
|
||||||
|
for(int s=0;s<LLs;s++) {
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
out._odata[sF]=-out._odata[sF];
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case OptHandUnroll:
|
||||||
|
DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
|
||||||
|
break;
|
||||||
|
case OptGeneric:
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
|
||||||
|
DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
|
||||||
|
out._odata[sF] =-naive-naik;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
std::cout<<"Oops Opt = "<<Opt<<std::endl;
|
||||||
|
assert(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
int oneLink =0;
|
||||||
|
int threeLink=1;
|
||||||
|
SiteSpinor naik;
|
||||||
|
SiteSpinor naive;
|
||||||
|
int dag=0;
|
||||||
|
switch(Opt) {
|
||||||
|
#ifdef AVX512
|
||||||
|
case OptInlineAsm:
|
||||||
|
DhopSiteAsm(st,lo,U,UUU,buf,LLs,sU,in,out);
|
||||||
|
break;
|
||||||
|
#endif
|
||||||
|
case OptHandUnroll:
|
||||||
|
DhopSiteHand(st,lo,U,UUU,buf,LLs,sU,in,out,dag);
|
||||||
|
break;
|
||||||
|
case OptGeneric:
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
int sF=LLs*sU+s;
|
||||||
|
// assert(sF<in._odata.size());
|
||||||
|
// assert(sU< U._odata.size());
|
||||||
|
// assert(sF>=0); assert(sU>=0);
|
||||||
|
DhopSiteDepth(st,lo,U,buf,sF,sU,in,naive,oneLink);
|
||||||
|
DhopSiteDepth(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
|
||||||
|
out._odata[sF] =naive+naik;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
std::cout<<"Oops Opt = "<<Opt<<std::endl;
|
||||||
|
assert(0);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopDir( StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf, int sF,
|
||||||
|
int sU, const FermionField &in, FermionField &out, int dir, int disp)
|
||||||
|
{
|
||||||
|
// Disp should be either +1,-1,+3,-3
|
||||||
|
// What about "dag" ?
|
||||||
|
// Because we work out pU . dS/dU
|
||||||
|
// U
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
FermOpStaggeredTemplateInstantiate(StaggeredKernels);
|
||||||
|
FermOpStaggeredVec5dTemplateInstantiate(StaggeredKernels);
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
83
lib/qcd/action/fermion/StaggeredKernels.h
Normal file
83
lib/qcd/action/fermion/StaggeredKernels.h
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/StaggeredKernels.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_STAGGERED_KERNELS_H
|
||||||
|
#define GRID_QCD_STAGGERED_KERNELS_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Helper routines that implement Staggered stencil for a single site.
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
class StaggeredKernelsStatic {
|
||||||
|
public:
|
||||||
|
enum { OptGeneric, OptHandUnroll, OptInlineAsm };
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
static int Opt; // these are a temporary hack
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl> class StaggeredKernels : public FermionOperator<Impl> , public StaggeredKernelsStatic {
|
||||||
|
public:
|
||||||
|
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
typedef FermionOperator<Impl> Base;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
void DhopDir(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
|
||||||
|
int sF, int sU, const FermionField &in, FermionField &out, int dir,int disp);
|
||||||
|
|
||||||
|
void DhopSiteDepth(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
|
||||||
|
int sF, int sU, const FermionField &in, SiteSpinor &out,int threeLink);
|
||||||
|
|
||||||
|
|
||||||
|
void DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, SiteSpinor * buf,
|
||||||
|
int sF, int sU, const FermionField &in, SiteSpinor&out,int threeLink);
|
||||||
|
|
||||||
|
void DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU,SiteSpinor * buf,
|
||||||
|
int LLs, int sU, const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
void DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU, SiteSpinor * buf,
|
||||||
|
int LLs, int sU, const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void DhopSite(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor * buf,
|
||||||
|
int sF, int sU, const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void DhopSiteDag(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, DoubledGaugeField &UUU, SiteSpinor *buf,
|
||||||
|
int LLs, int sU, const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
StaggeredKernels(const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
||||||
|
#endif
|
920
lib/qcd/action/fermion/StaggeredKernelsAsm.cc
Normal file
920
lib/qcd/action/fermion/StaggeredKernelsAsm.cc
Normal file
@ -0,0 +1,920 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid.h>
|
||||||
|
|
||||||
|
#ifdef AVX512
|
||||||
|
#include <simd/Intel512common.h>
|
||||||
|
#include <simd/Intel512avx.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Interleave operations from two directions
|
||||||
|
// This looks just like a 2 spin multiply and reuse same sequence from the Wilson
|
||||||
|
// Kernel. But the spin index becomes a mu index instead.
|
||||||
|
#define Chi_00 %zmm0
|
||||||
|
#define Chi_01 %zmm1
|
||||||
|
#define Chi_02 %zmm2
|
||||||
|
#define Chi_10 %zmm3
|
||||||
|
#define Chi_11 %zmm4
|
||||||
|
#define Chi_12 %zmm5
|
||||||
|
#define Chi_20 %zmm6
|
||||||
|
#define Chi_21 %zmm7
|
||||||
|
#define Chi_22 %zmm8
|
||||||
|
#define Chi_30 %zmm9
|
||||||
|
#define Chi_31 %zmm10
|
||||||
|
#define Chi_32 %zmm11
|
||||||
|
|
||||||
|
#define UChi_00 %zmm12
|
||||||
|
#define UChi_01 %zmm13
|
||||||
|
#define UChi_02 %zmm14
|
||||||
|
#define UChi_10 %zmm15
|
||||||
|
#define UChi_11 %zmm16
|
||||||
|
#define UChi_12 %zmm17
|
||||||
|
#define UChi_20 %zmm18
|
||||||
|
#define UChi_21 %zmm19
|
||||||
|
#define UChi_22 %zmm20
|
||||||
|
#define UChi_30 %zmm21
|
||||||
|
#define UChi_31 %zmm22
|
||||||
|
#define UChi_32 %zmm23
|
||||||
|
|
||||||
|
#define pChi_00 %%zmm0
|
||||||
|
#define pChi_01 %%zmm1
|
||||||
|
#define pChi_02 %%zmm2
|
||||||
|
#define pChi_10 %%zmm3
|
||||||
|
#define pChi_11 %%zmm4
|
||||||
|
#define pChi_12 %%zmm5
|
||||||
|
#define pChi_20 %%zmm6
|
||||||
|
#define pChi_21 %%zmm7
|
||||||
|
#define pChi_22 %%zmm8
|
||||||
|
#define pChi_30 %%zmm9
|
||||||
|
#define pChi_31 %%zmm10
|
||||||
|
#define pChi_32 %%zmm11
|
||||||
|
|
||||||
|
#define pUChi_00 %%zmm12
|
||||||
|
#define pUChi_01 %%zmm13
|
||||||
|
#define pUChi_02 %%zmm14
|
||||||
|
#define pUChi_10 %%zmm15
|
||||||
|
#define pUChi_11 %%zmm16
|
||||||
|
#define pUChi_12 %%zmm17
|
||||||
|
#define pUChi_20 %%zmm18
|
||||||
|
#define pUChi_21 %%zmm19
|
||||||
|
#define pUChi_22 %%zmm20
|
||||||
|
#define pUChi_30 %%zmm21
|
||||||
|
#define pUChi_31 %%zmm22
|
||||||
|
#define pUChi_32 %%zmm23
|
||||||
|
|
||||||
|
#define T0 %zmm24
|
||||||
|
#define T1 %zmm25
|
||||||
|
#define T2 %zmm26
|
||||||
|
#define T3 %zmm27
|
||||||
|
|
||||||
|
#define Z00 %zmm26
|
||||||
|
#define Z10 %zmm27
|
||||||
|
#define Z0 Z00
|
||||||
|
#define Z1 %zmm28
|
||||||
|
#define Z2 %zmm29
|
||||||
|
|
||||||
|
#define Z3 %zmm30
|
||||||
|
#define Z4 %zmm31
|
||||||
|
#define Z5 Chi_31
|
||||||
|
#define Z6 Chi_32
|
||||||
|
|
||||||
|
#define MULT_ADD_LS(g0,g1,g2,g3) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" \
|
||||||
|
"movq %2, %%r10 \n\t" \
|
||||||
|
"movq %3, %%r11 \n\t" : : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\
|
||||||
|
asm ( \
|
||||||
|
VSHUF(Chi_00,T0) VSHUF(Chi_10,T1) \
|
||||||
|
VSHUF(Chi_20,T2) VSHUF(Chi_30,T3) \
|
||||||
|
VMADDSUBIDUP(0,%r8,T0,UChi_00) VMADDSUBIDUP(0,%r9,T1,UChi_10) \
|
||||||
|
VMADDSUBIDUP(3,%r8,T0,UChi_01) VMADDSUBIDUP(3,%r9,T1,UChi_11) \
|
||||||
|
VMADDSUBIDUP(6,%r8,T0,UChi_02) VMADDSUBIDUP(6,%r9,T1,UChi_12) \
|
||||||
|
VMADDSUBIDUP(0,%r10,T2,UChi_20) VMADDSUBIDUP(0,%r11,T3,UChi_30) \
|
||||||
|
VMADDSUBIDUP(3,%r10,T2,UChi_21) VMADDSUBIDUP(3,%r11,T3,UChi_31) \
|
||||||
|
VMADDSUBIDUP(6,%r10,T2,UChi_22) VMADDSUBIDUP(6,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \
|
||||||
|
VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \
|
||||||
|
VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \
|
||||||
|
VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \
|
||||||
|
VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \
|
||||||
|
VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \
|
||||||
|
VSHUF(Chi_01,T0) VSHUF(Chi_11,T1) \
|
||||||
|
VSHUF(Chi_21,T2) VSHUF(Chi_31,T3) \
|
||||||
|
VMADDSUBIDUP(1,%r8,T0,UChi_00) VMADDSUBIDUP(1,%r9,T1,UChi_10) \
|
||||||
|
VMADDSUBIDUP(4,%r8,T0,UChi_01) VMADDSUBIDUP(4,%r9,T1,UChi_11) \
|
||||||
|
VMADDSUBIDUP(7,%r8,T0,UChi_02) VMADDSUBIDUP(7,%r9,T1,UChi_12) \
|
||||||
|
VMADDSUBIDUP(1,%r10,T2,UChi_20) VMADDSUBIDUP(1,%r11,T3,UChi_30) \
|
||||||
|
VMADDSUBIDUP(4,%r10,T2,UChi_21) VMADDSUBIDUP(4,%r11,T3,UChi_31) \
|
||||||
|
VMADDSUBIDUP(7,%r10,T2,UChi_22) VMADDSUBIDUP(7,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \
|
||||||
|
VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \
|
||||||
|
VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \
|
||||||
|
VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \
|
||||||
|
VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \
|
||||||
|
VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \
|
||||||
|
VSHUF(Chi_02,T0) VSHUF(Chi_12,T1) \
|
||||||
|
VSHUF(Chi_22,T2) VSHUF(Chi_32,T3) \
|
||||||
|
VMADDSUBIDUP(2,%r8,T0,UChi_00) VMADDSUBIDUP(2,%r9,T1,UChi_10) \
|
||||||
|
VMADDSUBIDUP(5,%r8,T0,UChi_01) VMADDSUBIDUP(5,%r9,T1,UChi_11) \
|
||||||
|
VMADDSUBIDUP(8,%r8,T0,UChi_02) VMADDSUBIDUP(8,%r9,T1,UChi_12) \
|
||||||
|
VMADDSUBIDUP(2,%r10,T2,UChi_20) VMADDSUBIDUP(2,%r11,T3,UChi_30) \
|
||||||
|
VMADDSUBIDUP(5,%r10,T2,UChi_21) VMADDSUBIDUP(5,%r11,T3,UChi_31) \
|
||||||
|
VMADDSUBIDUP(8,%r10,T2,UChi_22) VMADDSUBIDUP(8,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \
|
||||||
|
VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \
|
||||||
|
VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \
|
||||||
|
VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \
|
||||||
|
VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \
|
||||||
|
VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) );
|
||||||
|
|
||||||
|
#define MULT_LS(g0,g1,g2,g3) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" \
|
||||||
|
"movq %2, %%r10 \n\t" \
|
||||||
|
"movq %3, %%r11 \n\t" : : "r"(g0), "r"(g1), "r"(g2), "r"(g3) : "%r8","%r9","%r10","%r11" );\
|
||||||
|
asm ( \
|
||||||
|
VSHUF(Chi_00,T0) VSHUF(Chi_10,T1) \
|
||||||
|
VSHUF(Chi_20,T2) VSHUF(Chi_30,T3) \
|
||||||
|
VMULIDUP(0,%r8,T0,UChi_00) VMULIDUP(0,%r9,T1,UChi_10) \
|
||||||
|
VMULIDUP(3,%r8,T0,UChi_01) VMULIDUP(3,%r9,T1,UChi_11) \
|
||||||
|
VMULIDUP(6,%r8,T0,UChi_02) VMULIDUP(6,%r9,T1,UChi_12) \
|
||||||
|
VMULIDUP(0,%r10,T2,UChi_20) VMULIDUP(0,%r11,T3,UChi_30) \
|
||||||
|
VMULIDUP(3,%r10,T2,UChi_21) VMULIDUP(3,%r11,T3,UChi_31) \
|
||||||
|
VMULIDUP(6,%r10,T2,UChi_22) VMULIDUP(6,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(0,%r8,Chi_00,UChi_00) VMADDSUBRDUP(0,%r9,Chi_10,UChi_10) \
|
||||||
|
VMADDSUBRDUP(3,%r8,Chi_00,UChi_01) VMADDSUBRDUP(3,%r9,Chi_10,UChi_11) \
|
||||||
|
VMADDSUBRDUP(6,%r8,Chi_00,UChi_02) VMADDSUBRDUP(6,%r9,Chi_10,UChi_12) \
|
||||||
|
VMADDSUBRDUP(0,%r10,Chi_20,UChi_20) VMADDSUBRDUP(0,%r11,Chi_30,UChi_30) \
|
||||||
|
VMADDSUBRDUP(3,%r10,Chi_20,UChi_21) VMADDSUBRDUP(3,%r11,Chi_30,UChi_31) \
|
||||||
|
VMADDSUBRDUP(6,%r10,Chi_20,UChi_22) VMADDSUBRDUP(6,%r11,Chi_30,UChi_32) \
|
||||||
|
VSHUF(Chi_01,T0) VSHUF(Chi_11,T1) \
|
||||||
|
VSHUF(Chi_21,T2) VSHUF(Chi_31,T3) \
|
||||||
|
VMADDSUBIDUP(1,%r8,T0,UChi_00) VMADDSUBIDUP(1,%r9,T1,UChi_10) \
|
||||||
|
VMADDSUBIDUP(4,%r8,T0,UChi_01) VMADDSUBIDUP(4,%r9,T1,UChi_11) \
|
||||||
|
VMADDSUBIDUP(7,%r8,T0,UChi_02) VMADDSUBIDUP(7,%r9,T1,UChi_12) \
|
||||||
|
VMADDSUBIDUP(1,%r10,T2,UChi_20) VMADDSUBIDUP(1,%r11,T3,UChi_30) \
|
||||||
|
VMADDSUBIDUP(4,%r10,T2,UChi_21) VMADDSUBIDUP(4,%r11,T3,UChi_31) \
|
||||||
|
VMADDSUBIDUP(7,%r10,T2,UChi_22) VMADDSUBIDUP(7,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(1,%r8,Chi_01,UChi_00) VMADDSUBRDUP(1,%r9,Chi_11,UChi_10) \
|
||||||
|
VMADDSUBRDUP(4,%r8,Chi_01,UChi_01) VMADDSUBRDUP(4,%r9,Chi_11,UChi_11) \
|
||||||
|
VMADDSUBRDUP(7,%r8,Chi_01,UChi_02) VMADDSUBRDUP(7,%r9,Chi_11,UChi_12) \
|
||||||
|
VMADDSUBRDUP(1,%r10,Chi_21,UChi_20) VMADDSUBRDUP(1,%r11,Chi_31,UChi_30) \
|
||||||
|
VMADDSUBRDUP(4,%r10,Chi_21,UChi_21) VMADDSUBRDUP(4,%r11,Chi_31,UChi_31) \
|
||||||
|
VMADDSUBRDUP(7,%r10,Chi_21,UChi_22) VMADDSUBRDUP(7,%r11,Chi_31,UChi_32) \
|
||||||
|
VSHUF(Chi_02,T0) VSHUF(Chi_12,T1) \
|
||||||
|
VSHUF(Chi_22,T2) VSHUF(Chi_32,T3) \
|
||||||
|
VMADDSUBIDUP(2,%r8,T0,UChi_00) VMADDSUBIDUP(2,%r9,T1,UChi_10) \
|
||||||
|
VMADDSUBIDUP(5,%r8,T0,UChi_01) VMADDSUBIDUP(5,%r9,T1,UChi_11) \
|
||||||
|
VMADDSUBIDUP(8,%r8,T0,UChi_02) VMADDSUBIDUP(8,%r9,T1,UChi_12) \
|
||||||
|
VMADDSUBIDUP(2,%r10,T2,UChi_20) VMADDSUBIDUP(2,%r11,T3,UChi_30) \
|
||||||
|
VMADDSUBIDUP(5,%r10,T2,UChi_21) VMADDSUBIDUP(5,%r11,T3,UChi_31) \
|
||||||
|
VMADDSUBIDUP(8,%r10,T2,UChi_22) VMADDSUBIDUP(8,%r11,T3,UChi_32) \
|
||||||
|
VMADDSUBRDUP(2,%r8,Chi_02,UChi_00) VMADDSUBRDUP(2,%r9,Chi_12,UChi_10) \
|
||||||
|
VMADDSUBRDUP(5,%r8,Chi_02,UChi_01) VMADDSUBRDUP(5,%r9,Chi_12,UChi_11) \
|
||||||
|
VMADDSUBRDUP(8,%r8,Chi_02,UChi_02) VMADDSUBRDUP(8,%r9,Chi_12,UChi_12) \
|
||||||
|
VMADDSUBRDUP(2,%r10,Chi_22,UChi_20) VMADDSUBRDUP(2,%r11,Chi_32,UChi_30) \
|
||||||
|
VMADDSUBRDUP(5,%r10,Chi_22,UChi_21) VMADDSUBRDUP(5,%r11,Chi_32,UChi_31) \
|
||||||
|
VMADDSUBRDUP(8,%r10,Chi_22,UChi_22) VMADDSUBRDUP(8,%r11,Chi_32,UChi_32) );
|
||||||
|
|
||||||
|
#define MULT_ADD_XYZTa(g0,g1) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9");\
|
||||||
|
__asm__ ( \
|
||||||
|
VSHUF(Chi_00,T0) \
|
||||||
|
VSHUF(Chi_10,T1) \
|
||||||
|
VMOVIDUP(0,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(3,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(6,%r8,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T0,UChi_00) \
|
||||||
|
VMADDSUB(Z1,T0,UChi_01) \
|
||||||
|
VMADDSUB(Z2,T0,UChi_02) \
|
||||||
|
\
|
||||||
|
VMOVIDUP(0,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(3,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(6,%r9,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T1,UChi_10) \
|
||||||
|
VMADDSUB(Z1,T1,UChi_11) \
|
||||||
|
VMADDSUB(Z2,T1,UChi_12) \
|
||||||
|
\
|
||||||
|
\
|
||||||
|
VMOVRDUP(0,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(3,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(6,%r8,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_00,UChi_00)/*rr * ir = ri rr*/ \
|
||||||
|
VMADDSUB(Z4,Chi_00,UChi_01) \
|
||||||
|
VMADDSUB(Z5,Chi_00,UChi_02) \
|
||||||
|
\
|
||||||
|
VMOVRDUP(0,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(3,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(6,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_10,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_10,UChi_11)\
|
||||||
|
VMADDSUB(Z5,Chi_10,UChi_12) \
|
||||||
|
\
|
||||||
|
\
|
||||||
|
VMOVIDUP(1,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(4,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(7,%r8,Z2 ) \
|
||||||
|
VSHUF(Chi_01,T0) \
|
||||||
|
VMADDSUB(Z0,T0,UChi_00) \
|
||||||
|
VMADDSUB(Z1,T0,UChi_01) \
|
||||||
|
VMADDSUB(Z2,T0,UChi_02) \
|
||||||
|
\
|
||||||
|
VMOVIDUP(1,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(4,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(7,%r9,Z2 ) \
|
||||||
|
VSHUF(Chi_11,T1) \
|
||||||
|
VMADDSUB(Z0,T1,UChi_10) \
|
||||||
|
VMADDSUB(Z1,T1,UChi_11) \
|
||||||
|
VMADDSUB(Z2,T1,UChi_12) \
|
||||||
|
\
|
||||||
|
VMOVRDUP(1,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(4,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(7,%r8,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_01,UChi_00) \
|
||||||
|
VMADDSUB(Z4,Chi_01,UChi_01) \
|
||||||
|
VMADDSUB(Z5,Chi_01,UChi_02) \
|
||||||
|
\
|
||||||
|
VMOVRDUP(1,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(4,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(7,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_11,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_11,UChi_11) \
|
||||||
|
VMADDSUB(Z5,Chi_11,UChi_12) \
|
||||||
|
\
|
||||||
|
VSHUF(Chi_02,T0) \
|
||||||
|
VSHUF(Chi_12,T1) \
|
||||||
|
VMOVIDUP(2,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(5,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(8,%r8,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T0,UChi_00) \
|
||||||
|
VMADDSUB(Z1,T0,UChi_01) \
|
||||||
|
VMADDSUB(Z2,T0,UChi_02) \
|
||||||
|
VMOVIDUP(2,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(5,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(8,%r9,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T1,UChi_10) \
|
||||||
|
VMADDSUB(Z1,T1,UChi_11) \
|
||||||
|
VMADDSUB(Z2,T1,UChi_12) \
|
||||||
|
/*55*/ \
|
||||||
|
VMOVRDUP(2,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(5,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(8,%r8,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_02,UChi_00) \
|
||||||
|
VMADDSUB(Z4,Chi_02,UChi_01) \
|
||||||
|
VMADDSUB(Z5,Chi_02,UChi_02) \
|
||||||
|
VMOVRDUP(2,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(5,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(8,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_12,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_12,UChi_11) \
|
||||||
|
VMADDSUB(Z5,Chi_12,UChi_12) \
|
||||||
|
/*61 insns*/ );
|
||||||
|
|
||||||
|
#define MULT_ADD_XYZT(g0,g1) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9");\
|
||||||
|
__asm__ ( \
|
||||||
|
VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \
|
||||||
|
VRDUP(Chi_00,T0) VIDUP(Chi_00,Chi_00) \
|
||||||
|
VRDUP(Chi_10,T1) VIDUP(Chi_10,Chi_10) \
|
||||||
|
VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \
|
||||||
|
VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \
|
||||||
|
VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \
|
||||||
|
VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \
|
||||||
|
VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \
|
||||||
|
VMADDMEM(0,%r8,T0,UChi_00) VMADDMEM(0,%r9,T1,UChi_10) \
|
||||||
|
VMADDMEM(3,%r8,T0,UChi_01) VMADDMEM(3,%r9,T1,UChi_11) \
|
||||||
|
VMADDMEM(6,%r8,T0,UChi_02) VMADDMEM(6,%r9,T1,UChi_12) \
|
||||||
|
VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \
|
||||||
|
VRDUP(Chi_01,T0) VIDUP(Chi_01,Chi_01) \
|
||||||
|
VRDUP(Chi_11,T1) VIDUP(Chi_11,Chi_11) \
|
||||||
|
VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \
|
||||||
|
VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \
|
||||||
|
VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \
|
||||||
|
VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10) \
|
||||||
|
VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11) \
|
||||||
|
VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12) \
|
||||||
|
VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \
|
||||||
|
VRDUP(Chi_02,T0) VIDUP(Chi_02,Chi_02) \
|
||||||
|
VRDUP(Chi_12,T1) VIDUP(Chi_12,Chi_12) \
|
||||||
|
VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \
|
||||||
|
VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \
|
||||||
|
VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \
|
||||||
|
VMADDSUBMEM(2,%r8,T0,Z1) VMADDSUBMEM(2,%r9,T1,Z2) \
|
||||||
|
VMADDSUBMEM(5,%r8,T0,Z3) VMADDSUBMEM(5,%r9,T1,Z4) \
|
||||||
|
VMADDSUBMEM(8,%r8,T0,Z5) VMADDSUBMEM(8,%r9,T1,Z6) \
|
||||||
|
VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \
|
||||||
|
VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \
|
||||||
|
VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) );
|
||||||
|
|
||||||
|
#define MULT_XYZT(g0,g1) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9" ); \
|
||||||
|
__asm__ ( \
|
||||||
|
VSHUF(Chi_00,T0) \
|
||||||
|
VSHUF(Chi_10,T1) \
|
||||||
|
VMOVIDUP(0,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(3,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(6,%r8,Z2 ) \
|
||||||
|
/*6*/ \
|
||||||
|
VMUL(Z0,T0,UChi_00) \
|
||||||
|
VMUL(Z1,T0,UChi_01) \
|
||||||
|
VMUL(Z2,T0,UChi_02) \
|
||||||
|
VMOVIDUP(0,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(3,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(6,%r9,Z2 ) \
|
||||||
|
VMUL(Z0,T1,UChi_10) \
|
||||||
|
VMUL(Z1,T1,UChi_11) \
|
||||||
|
VMUL(Z2,T1,UChi_12) \
|
||||||
|
VMOVRDUP(0,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(3,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(6,%r8,Z5 ) \
|
||||||
|
/*18*/ \
|
||||||
|
VMADDSUB(Z3,Chi_00,UChi_00) \
|
||||||
|
VMADDSUB(Z4,Chi_00,UChi_01)\
|
||||||
|
VMADDSUB(Z5,Chi_00,UChi_02) \
|
||||||
|
VMOVRDUP(0,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(3,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(6,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_10,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_10,UChi_11)\
|
||||||
|
VMADDSUB(Z5,Chi_10,UChi_12) \
|
||||||
|
VMOVIDUP(1,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(4,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(7,%r8,Z2 ) \
|
||||||
|
/*28*/ \
|
||||||
|
VSHUF(Chi_01,T0) \
|
||||||
|
VMADDSUB(Z0,T0,UChi_00) \
|
||||||
|
VMADDSUB(Z1,T0,UChi_01) \
|
||||||
|
VMADDSUB(Z2,T0,UChi_02) \
|
||||||
|
VMOVIDUP(1,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(4,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(7,%r9,Z2 ) \
|
||||||
|
VSHUF(Chi_11,T1) \
|
||||||
|
VMADDSUB(Z0,T1,UChi_10) \
|
||||||
|
VMADDSUB(Z1,T1,UChi_11) \
|
||||||
|
VMADDSUB(Z2,T1,UChi_12) \
|
||||||
|
VMOVRDUP(1,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(4,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(7,%r8,Z5 ) \
|
||||||
|
/*38*/ \
|
||||||
|
VMADDSUB(Z3,Chi_01,UChi_00) \
|
||||||
|
VMADDSUB(Z4,Chi_01,UChi_01) \
|
||||||
|
VMADDSUB(Z5,Chi_01,UChi_02) \
|
||||||
|
VMOVRDUP(1,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(4,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(7,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_11,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_11,UChi_11) \
|
||||||
|
VMADDSUB(Z5,Chi_11,UChi_12) \
|
||||||
|
/*48*/ \
|
||||||
|
VSHUF(Chi_02,T0) \
|
||||||
|
VSHUF(Chi_12,T1) \
|
||||||
|
VMOVIDUP(2,%r8,Z0 ) \
|
||||||
|
VMOVIDUP(5,%r8,Z1 ) \
|
||||||
|
VMOVIDUP(8,%r8,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T0,UChi_00) \
|
||||||
|
VMADDSUB(Z1,T0,UChi_01) \
|
||||||
|
VMADDSUB(Z2,T0,UChi_02) \
|
||||||
|
VMOVIDUP(2,%r9,Z0 ) \
|
||||||
|
VMOVIDUP(5,%r9,Z1 ) \
|
||||||
|
VMOVIDUP(8,%r9,Z2 ) \
|
||||||
|
VMADDSUB(Z0,T1,UChi_10) \
|
||||||
|
VMADDSUB(Z1,T1,UChi_11) \
|
||||||
|
VMADDSUB(Z2,T1,UChi_12) \
|
||||||
|
/*55*/ \
|
||||||
|
VMOVRDUP(2,%r8,Z3 ) \
|
||||||
|
VMOVRDUP(5,%r8,Z4 ) \
|
||||||
|
VMOVRDUP(8,%r8,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_02,UChi_00) \
|
||||||
|
VMADDSUB(Z4,Chi_02,UChi_01) \
|
||||||
|
VMADDSUB(Z5,Chi_02,UChi_02) \
|
||||||
|
VMOVRDUP(2,%r9,Z3 ) \
|
||||||
|
VMOVRDUP(5,%r9,Z4 ) \
|
||||||
|
VMOVRDUP(8,%r9,Z5 ) \
|
||||||
|
VMADDSUB(Z3,Chi_12,UChi_10) \
|
||||||
|
VMADDSUB(Z4,Chi_12,UChi_11) \
|
||||||
|
VMADDSUB(Z5,Chi_12,UChi_12) \
|
||||||
|
/*61 insns*/ );
|
||||||
|
|
||||||
|
#define MULT_XYZTa(g0,g1) \
|
||||||
|
asm ( "movq %0, %%r8 \n\t" \
|
||||||
|
"movq %1, %%r9 \n\t" : : "r"(g0), "r"(g1) : "%r8","%r9" ); \
|
||||||
|
__asm__ ( \
|
||||||
|
VSHUFMEM(0,%r8,Z00) VSHUFMEM(0,%r9,Z10) \
|
||||||
|
VRDUP(Chi_00,T0) VIDUP(Chi_00,Chi_00) \
|
||||||
|
VRDUP(Chi_10,T1) VIDUP(Chi_10,Chi_10) \
|
||||||
|
VMUL(Z00,Chi_00,Z1) VMUL(Z10,Chi_10,Z2) \
|
||||||
|
VSHUFMEM(3,%r8,Z00) VSHUFMEM(3,%r9,Z10) \
|
||||||
|
VMUL(Z00,Chi_00,Z3) VMUL(Z10,Chi_10,Z4) \
|
||||||
|
VSHUFMEM(6,%r8,Z00) VSHUFMEM(6,%r9,Z10) \
|
||||||
|
VMUL(Z00,Chi_00,Z5) VMUL(Z10,Chi_10,Z6) \
|
||||||
|
VMULMEM(0,%r8,T0,UChi_00) VMULMEM(0,%r9,T1,UChi_10) \
|
||||||
|
VMULMEM(3,%r8,T0,UChi_01) VMULMEM(3,%r9,T1,UChi_11) \
|
||||||
|
VMULMEM(6,%r8,T0,UChi_02) VMULMEM(6,%r9,T1,UChi_12) \
|
||||||
|
VSHUFMEM(1,%r8,Z00) VSHUFMEM(1,%r9,Z10) \
|
||||||
|
VRDUP(Chi_01,T0) VIDUP(Chi_01,Chi_01) \
|
||||||
|
VRDUP(Chi_11,T1) VIDUP(Chi_11,Chi_11) \
|
||||||
|
VMADD(Z00,Chi_01,Z1) VMADD(Z10,Chi_11,Z2) \
|
||||||
|
VSHUFMEM(4,%r8,Z00) VSHUFMEM(4,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_01,Z3) VMADD(Z10,Chi_11,Z4) \
|
||||||
|
VSHUFMEM(7,%r8,Z00) VSHUFMEM(7,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_01,Z5) VMADD(Z10,Chi_11,Z6) \
|
||||||
|
VMADDMEM(1,%r8,T0,UChi_00) VMADDMEM(1,%r9,T1,UChi_10) \
|
||||||
|
VMADDMEM(4,%r8,T0,UChi_01) VMADDMEM(4,%r9,T1,UChi_11) \
|
||||||
|
VMADDMEM(7,%r8,T0,UChi_02) VMADDMEM(7,%r9,T1,UChi_12) \
|
||||||
|
VSHUFMEM(2,%r8,Z00) VSHUFMEM(2,%r9,Z10) \
|
||||||
|
VRDUP(Chi_02,T0) VIDUP(Chi_02,Chi_02) \
|
||||||
|
VRDUP(Chi_12,T1) VIDUP(Chi_12,Chi_12) \
|
||||||
|
VMADD(Z00,Chi_02,Z1) VMADD(Z10,Chi_12,Z2) \
|
||||||
|
VSHUFMEM(5,%r8,Z00) VSHUFMEM(5,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_02,Z3) VMADD(Z10,Chi_12,Z4) \
|
||||||
|
VSHUFMEM(8,%r8,Z00) VSHUFMEM(8,%r9,Z10) \
|
||||||
|
VMADD(Z00,Chi_02,Z5) VMADD(Z10,Chi_12,Z6) \
|
||||||
|
VMADDSUBMEM(2,%r8,T0,Z1) VMADDSUBMEM(2,%r9,T1,Z2) \
|
||||||
|
VMADDSUBMEM(5,%r8,T0,Z3) VMADDSUBMEM(5,%r9,T1,Z4) \
|
||||||
|
VMADDSUBMEM(8,%r8,T0,Z5) VMADDSUBMEM(8,%r9,T1,Z6) \
|
||||||
|
VADD(Z1,UChi_00,UChi_00) VADD(Z2,UChi_10,UChi_10) \
|
||||||
|
VADD(Z3,UChi_01,UChi_01) VADD(Z4,UChi_11,UChi_11) \
|
||||||
|
VADD(Z5,UChi_02,UChi_02) VADD(Z6,UChi_12,UChi_12) );
|
||||||
|
|
||||||
|
|
||||||
|
#define LOAD_CHI(a0,a1,a2,a3) \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_00) \
|
||||||
|
VLOAD(1,%%r8,pChi_01) \
|
||||||
|
VLOAD(2,%%r8,pChi_02) \
|
||||||
|
: : "r" (a0) : "%r8" ); \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_10) \
|
||||||
|
VLOAD(1,%%r8,pChi_11) \
|
||||||
|
VLOAD(2,%%r8,pChi_12) \
|
||||||
|
: : "r" (a1) : "%r8" ); \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_20) \
|
||||||
|
VLOAD(1,%%r8,pChi_21) \
|
||||||
|
VLOAD(2,%%r8,pChi_22) \
|
||||||
|
: : "r" (a2) : "%r8" ); \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_30) \
|
||||||
|
VLOAD(1,%%r8,pChi_31) \
|
||||||
|
VLOAD(2,%%r8,pChi_32) \
|
||||||
|
: : "r" (a3) : "%r8" );
|
||||||
|
|
||||||
|
#define LOAD_CHIa(a0,a1) \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_00) \
|
||||||
|
VLOAD(1,%%r8,pChi_01) \
|
||||||
|
VLOAD(2,%%r8,pChi_02) \
|
||||||
|
: : "r" (a0) : "%r8" ); \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VLOAD(0,%%r8,pChi_10) \
|
||||||
|
VLOAD(1,%%r8,pChi_11) \
|
||||||
|
VLOAD(2,%%r8,pChi_12) \
|
||||||
|
: : "r" (a1) : "%r8" );
|
||||||
|
|
||||||
|
#define PF_CHI(a0)
|
||||||
|
#define PF_CHIa(a0) \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VPREFETCH1(0,%%r8) \
|
||||||
|
VPREFETCH1(1,%%r8) \
|
||||||
|
VPREFETCH1(2,%%r8) \
|
||||||
|
: : "r" (a0) : "%r8" ); \
|
||||||
|
|
||||||
|
#define PF_GAUGE_XYZT(a0)
|
||||||
|
#define PF_GAUGE_XYZTa(a0) \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VPREFETCH1(0,%%r8) \
|
||||||
|
VPREFETCH1(1,%%r8) \
|
||||||
|
VPREFETCH1(2,%%r8) \
|
||||||
|
VPREFETCH1(3,%%r8) \
|
||||||
|
VPREFETCH1(4,%%r8) \
|
||||||
|
VPREFETCH1(5,%%r8) \
|
||||||
|
VPREFETCH1(6,%%r8) \
|
||||||
|
VPREFETCH1(7,%%r8) \
|
||||||
|
VPREFETCH1(8,%%r8) \
|
||||||
|
: : "r" (a0) : "%r8" ); \
|
||||||
|
|
||||||
|
#define PF_GAUGE_LS(a0)
|
||||||
|
#define PF_GAUGE_LSa(a0) \
|
||||||
|
asm ( \
|
||||||
|
"movq %0, %%r8 \n\t" \
|
||||||
|
VPREFETCH1(0,%%r8) \
|
||||||
|
VPREFETCH1(1,%%r8) \
|
||||||
|
: : "r" (a0) : "%r8" ); \
|
||||||
|
|
||||||
|
|
||||||
|
#define REDUCE(out) \
|
||||||
|
asm ( \
|
||||||
|
VADD(UChi_00,UChi_10,UChi_00) \
|
||||||
|
VADD(UChi_01,UChi_11,UChi_01) \
|
||||||
|
VADD(UChi_02,UChi_12,UChi_02) \
|
||||||
|
VADD(UChi_30,UChi_20,UChi_30) \
|
||||||
|
VADD(UChi_31,UChi_21,UChi_31) \
|
||||||
|
VADD(UChi_32,UChi_22,UChi_32) \
|
||||||
|
VADD(UChi_00,UChi_30,UChi_00) \
|
||||||
|
VADD(UChi_01,UChi_31,UChi_01) \
|
||||||
|
VADD(UChi_02,UChi_32,UChi_02) ); \
|
||||||
|
asm ( \
|
||||||
|
VSTORE(0,%0,pUChi_00) \
|
||||||
|
VSTORE(1,%0,pUChi_01) \
|
||||||
|
VSTORE(2,%0,pUChi_02) \
|
||||||
|
: : "r" (out) : "memory" );
|
||||||
|
|
||||||
|
#define REDUCEa(out) \
|
||||||
|
asm ( \
|
||||||
|
VADD(UChi_00,UChi_10,UChi_00) \
|
||||||
|
VADD(UChi_01,UChi_11,UChi_01) \
|
||||||
|
VADD(UChi_02,UChi_12,UChi_02) ); \
|
||||||
|
asm ( \
|
||||||
|
VSTORE(0,%0,pUChi_00) \
|
||||||
|
VSTORE(1,%0,pUChi_01) \
|
||||||
|
VSTORE(2,%0,pUChi_02) \
|
||||||
|
: : "r" (out) : "memory" );
|
||||||
|
|
||||||
|
#define PERMUTE_DIR(dir) \
|
||||||
|
permute##dir(Chi_0,Chi_0);\
|
||||||
|
permute##dir(Chi_1,Chi_1);\
|
||||||
|
permute##dir(Chi_2,Chi_2);
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
//#define CONDITIONAL_MOVE(l,o,out) if ( l ) { out = (uint64_t) &in._odata[o] ; } else { out =(uint64_t) &buf[o]; }
|
||||||
|
|
||||||
|
#define CONDITIONAL_MOVE(l,o,out) { const SiteSpinor *ptr = l? in_p : buf; out = (uint64_t) &ptr[o]; }
|
||||||
|
|
||||||
|
#define PREPARE_XYZT(X,Y,Z,T,skew,UU) \
|
||||||
|
PREPARE(X,Y,Z,T,skew,UU); \
|
||||||
|
PF_GAUGE_XYZT(gauge0); \
|
||||||
|
PF_GAUGE_XYZT(gauge1); \
|
||||||
|
PF_GAUGE_XYZT(gauge2); \
|
||||||
|
PF_GAUGE_XYZT(gauge3);
|
||||||
|
|
||||||
|
#define PREPARE_LS(X,Y,Z,T,skew,UU) \
|
||||||
|
PREPARE(X,Y,Z,T,skew,UU); \
|
||||||
|
PF_GAUGE_LS(gauge0); \
|
||||||
|
PF_GAUGE_LS(gauge1); \
|
||||||
|
PF_GAUGE_LS(gauge2); \
|
||||||
|
PF_GAUGE_LS(gauge3);
|
||||||
|
|
||||||
|
#define PREPARE(X,Y,Z,T,skew,UU) \
|
||||||
|
SE0=st.GetEntry(ptype,X+skew,sF); \
|
||||||
|
o0 = SE0->_offset; \
|
||||||
|
l0 = SE0->_is_local; \
|
||||||
|
p0 = SE0->_permute; \
|
||||||
|
CONDITIONAL_MOVE(l0,o0,addr0); \
|
||||||
|
PF_CHI(addr0); \
|
||||||
|
\
|
||||||
|
SE1=st.GetEntry(ptype,Y+skew,sF); \
|
||||||
|
o1 = SE1->_offset; \
|
||||||
|
l1 = SE1->_is_local; \
|
||||||
|
p1 = SE1->_permute; \
|
||||||
|
CONDITIONAL_MOVE(l1,o1,addr1); \
|
||||||
|
PF_CHI(addr1); \
|
||||||
|
\
|
||||||
|
SE2=st.GetEntry(ptype,Z+skew,sF); \
|
||||||
|
o2 = SE2->_offset; \
|
||||||
|
l2 = SE2->_is_local; \
|
||||||
|
p2 = SE2->_permute; \
|
||||||
|
CONDITIONAL_MOVE(l2,o2,addr2); \
|
||||||
|
PF_CHI(addr2); \
|
||||||
|
\
|
||||||
|
SE3=st.GetEntry(ptype,T+skew,sF); \
|
||||||
|
o3 = SE3->_offset; \
|
||||||
|
l3 = SE3->_is_local; \
|
||||||
|
p3 = SE3->_permute; \
|
||||||
|
CONDITIONAL_MOVE(l3,o3,addr3); \
|
||||||
|
PF_CHI(addr3); \
|
||||||
|
\
|
||||||
|
gauge0 =(uint64_t)&UU._odata[sU]( X ); \
|
||||||
|
gauge1 =(uint64_t)&UU._odata[sU]( Y ); \
|
||||||
|
gauge2 =(uint64_t)&UU._odata[sU]( Z ); \
|
||||||
|
gauge3 =(uint64_t)&UU._odata[sU]( T );
|
||||||
|
|
||||||
|
// This is the single precision 5th direction vectorised kernel
|
||||||
|
#include <simd/Intel512single.h>
|
||||||
|
template <> void StaggeredKernels<StaggeredVec5dImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
#ifdef AVX512
|
||||||
|
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||||
|
uint64_t addr0,addr1,addr2,addr3;
|
||||||
|
const SiteSpinor *in_p; in_p = &in._odata[0];
|
||||||
|
|
||||||
|
int o0,o1,o2,o3; // offsets
|
||||||
|
int l0,l1,l2,l3; // local
|
||||||
|
int p0,p1,p2,p3; // perm
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE0;
|
||||||
|
StencilEntry *SE1;
|
||||||
|
StencilEntry *SE2;
|
||||||
|
StencilEntry *SE3;
|
||||||
|
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
// Xp, Yp, Zp, Tp
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,0,U);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,0,U);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,8,UUU);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,8,UUU);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
addr0 = (uint64_t) &out._odata[sF];
|
||||||
|
REDUCE(addr0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
assert(0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#include <simd/Intel512double.h>
|
||||||
|
template <> void StaggeredKernels<StaggeredVec5dImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
#ifdef AVX512
|
||||||
|
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||||
|
uint64_t addr0,addr1,addr2,addr3;
|
||||||
|
const SiteSpinor *in_p; in_p = &in._odata[0];
|
||||||
|
|
||||||
|
int o0,o1,o2,o3; // offsets
|
||||||
|
int l0,l1,l2,l3; // local
|
||||||
|
int p0,p1,p2,p3; // perm
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE0;
|
||||||
|
StencilEntry *SE1;
|
||||||
|
StencilEntry *SE2;
|
||||||
|
StencilEntry *SE3;
|
||||||
|
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
// Xp, Yp, Zp, Tp
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,0,U);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,0,U);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,8,UUU);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,8,UUU);
|
||||||
|
LOAD_CHI(addr0,addr1,addr2,addr3);
|
||||||
|
MULT_ADD_LS(gauge0,gauge1,gauge2,gauge3);
|
||||||
|
|
||||||
|
addr0 = (uint64_t) &out._odata[sF];
|
||||||
|
REDUCE(addr0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
assert(0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#define PERMUTE_DIR3 __asm__ ( \
|
||||||
|
VPERM3(Chi_00,Chi_00) \
|
||||||
|
VPERM3(Chi_01,Chi_01) \
|
||||||
|
VPERM3(Chi_02,Chi_02) );
|
||||||
|
|
||||||
|
#define PERMUTE_DIR2 __asm__ ( \
|
||||||
|
VPERM2(Chi_10,Chi_10) \
|
||||||
|
VPERM2(Chi_11,Chi_11) \
|
||||||
|
VPERM2(Chi_12,Chi_12) );
|
||||||
|
|
||||||
|
#define PERMUTE_DIR1 __asm__ ( \
|
||||||
|
VPERM1(Chi_00,Chi_00) \
|
||||||
|
VPERM1(Chi_01,Chi_01) \
|
||||||
|
VPERM1(Chi_02,Chi_02) );
|
||||||
|
|
||||||
|
#define PERMUTE_DIR0 __asm__ ( \
|
||||||
|
VPERM0(Chi_10,Chi_10) \
|
||||||
|
VPERM0(Chi_11,Chi_11) \
|
||||||
|
VPERM0(Chi_12,Chi_12) );
|
||||||
|
|
||||||
|
#define PERMUTE01 \
|
||||||
|
if ( p0 ) { PERMUTE_DIR3; }\
|
||||||
|
if ( p1 ) { PERMUTE_DIR2; }
|
||||||
|
|
||||||
|
#define PERMUTE23 \
|
||||||
|
if ( p2 ) { PERMUTE_DIR1; }\
|
||||||
|
if ( p3 ) { PERMUTE_DIR0; }
|
||||||
|
|
||||||
|
// This is the single precision 5th direction vectorised kernel
|
||||||
|
|
||||||
|
#include <simd/Intel512single.h>
|
||||||
|
template <> void StaggeredKernels<StaggeredImplF>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
#ifdef AVX512
|
||||||
|
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||||
|
uint64_t addr0,addr1,addr2,addr3;
|
||||||
|
const SiteSpinor *in_p; in_p = &in._odata[0];
|
||||||
|
|
||||||
|
int o0,o1,o2,o3; // offsets
|
||||||
|
int l0,l1,l2,l3; // local
|
||||||
|
int p0,p1,p2,p3; // perm
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE0;
|
||||||
|
StencilEntry *SE1;
|
||||||
|
StencilEntry *SE2;
|
||||||
|
StencilEntry *SE3;
|
||||||
|
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
// Xp, Yp, Zp, Tp
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,0,U);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,0,U);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,8,UUU);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,8,UUU);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
addr0 = (uint64_t) &out._odata[sF];
|
||||||
|
REDUCEa(addr0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
assert(0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#include <simd/Intel512double.h>
|
||||||
|
template <> void StaggeredKernels<StaggeredImplD>::DhopSiteAsm(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out)
|
||||||
|
{
|
||||||
|
#ifdef AVX512
|
||||||
|
uint64_t gauge0,gauge1,gauge2,gauge3;
|
||||||
|
uint64_t addr0,addr1,addr2,addr3;
|
||||||
|
const SiteSpinor *in_p; in_p = &in._odata[0];
|
||||||
|
|
||||||
|
int o0,o1,o2,o3; // offsets
|
||||||
|
int l0,l1,l2,l3; // local
|
||||||
|
int p0,p1,p2,p3; // perm
|
||||||
|
int ptype;
|
||||||
|
StencilEntry *SE0;
|
||||||
|
StencilEntry *SE1;
|
||||||
|
StencilEntry *SE2;
|
||||||
|
StencilEntry *SE3;
|
||||||
|
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
// Xp, Yp, Zp, Tp
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,0,U);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,0,U);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xp,Yp,Zp,Tp,8,UUU);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
PREPARE(Xm,Ym,Zm,Tm,8,UUU);
|
||||||
|
LOAD_CHIa(addr0,addr1);
|
||||||
|
PERMUTE01;
|
||||||
|
MULT_ADD_XYZT(gauge0,gauge1);
|
||||||
|
LOAD_CHIa(addr2,addr3);
|
||||||
|
PERMUTE23;
|
||||||
|
MULT_ADD_XYZT(gauge2,gauge3);
|
||||||
|
|
||||||
|
addr0 = (uint64_t) &out._odata[sF];
|
||||||
|
REDUCEa(addr0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
assert(0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#define KERNEL_INSTANTIATE(CLASS,FUNC,IMPL) \
|
||||||
|
template void CLASS<IMPL>::FUNC(StencilImpl &st, LebesgueOrder &lo, \
|
||||||
|
DoubledGaugeField &U, \
|
||||||
|
DoubledGaugeField &UUU, \
|
||||||
|
SiteSpinor *buf, int LLs, \
|
||||||
|
int sU, const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplD);
|
||||||
|
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredImplF);
|
||||||
|
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredVec5dImplD);
|
||||||
|
KERNEL_INSTANTIATE(StaggeredKernels,DhopSiteAsm,StaggeredVec5dImplF);
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
322
lib/qcd/action/fermion/StaggeredKernelsHand.cc
Normal file
322
lib/qcd/action/fermion/StaggeredKernelsHand.cc
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/StaggerdKernelsHand.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid.h>
|
||||||
|
|
||||||
|
#define REGISTER
|
||||||
|
|
||||||
|
#define LOAD_CHI(b) \
|
||||||
|
const SiteSpinor & ref (b[offset]); \
|
||||||
|
Chi_0=ref()()(0);\
|
||||||
|
Chi_1=ref()()(1);\
|
||||||
|
Chi_2=ref()()(2);
|
||||||
|
|
||||||
|
|
||||||
|
// To splat or not to splat depends on the implementation
|
||||||
|
#define MULT(A,UChi) \
|
||||||
|
auto & ref(U._odata[sU](A)); \
|
||||||
|
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
||||||
|
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
||||||
|
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
||||||
|
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
||||||
|
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
||||||
|
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
||||||
|
Impl::loadLinkElement(U_02,ref()(0,2)); \
|
||||||
|
Impl::loadLinkElement(U_12,ref()(1,2)); \
|
||||||
|
Impl::loadLinkElement(U_22,ref()(2,2)); \
|
||||||
|
UChi ## _0 = U_00*Chi_0; \
|
||||||
|
UChi ## _1 = U_10*Chi_0;\
|
||||||
|
UChi ## _2 = U_20*Chi_0;\
|
||||||
|
UChi ## _0 += U_01*Chi_1;\
|
||||||
|
UChi ## _1 += U_11*Chi_1;\
|
||||||
|
UChi ## _2 += U_21*Chi_1;\
|
||||||
|
UChi ## _0 += U_02*Chi_2;\
|
||||||
|
UChi ## _1 += U_12*Chi_2;\
|
||||||
|
UChi ## _2 += U_22*Chi_2;
|
||||||
|
|
||||||
|
#define MULT_ADD(A,UChi) \
|
||||||
|
auto & ref(U._odata[sU](A)); \
|
||||||
|
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
||||||
|
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
||||||
|
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
||||||
|
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
||||||
|
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
||||||
|
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
||||||
|
Impl::loadLinkElement(U_02,ref()(0,2)); \
|
||||||
|
Impl::loadLinkElement(U_12,ref()(1,2)); \
|
||||||
|
Impl::loadLinkElement(U_22,ref()(2,2)); \
|
||||||
|
UChi ## _0 += U_00*Chi_0; \
|
||||||
|
UChi ## _1 += U_10*Chi_0;\
|
||||||
|
UChi ## _2 += U_20*Chi_0;\
|
||||||
|
UChi ## _0 += U_01*Chi_1;\
|
||||||
|
UChi ## _1 += U_11*Chi_1;\
|
||||||
|
UChi ## _2 += U_21*Chi_1;\
|
||||||
|
UChi ## _0 += U_02*Chi_2;\
|
||||||
|
UChi ## _1 += U_12*Chi_2;\
|
||||||
|
UChi ## _2 += U_22*Chi_2;
|
||||||
|
|
||||||
|
|
||||||
|
#define PERMUTE_DIR(dir) \
|
||||||
|
permute##dir(Chi_0,Chi_0);\
|
||||||
|
permute##dir(Chi_1,Chi_1);\
|
||||||
|
permute##dir(Chi_2,Chi_2);
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
|
||||||
|
SiteSpinor *buf, int LLs,
|
||||||
|
int sU, const FermionField &in, FermionField &out, int dag)
|
||||||
|
{
|
||||||
|
SiteSpinor naik;
|
||||||
|
SiteSpinor naive;
|
||||||
|
int oneLink =0;
|
||||||
|
int threeLink=1;
|
||||||
|
int skew(0);
|
||||||
|
Real scale(1.0);
|
||||||
|
|
||||||
|
if(dag) scale = -1.0;
|
||||||
|
|
||||||
|
for(int s=0;s<LLs;s++){
|
||||||
|
int sF=s+LLs*sU;
|
||||||
|
DhopSiteDepthHand(st,lo,U,buf,sF,sU,in,naive,oneLink);
|
||||||
|
DhopSiteDepthHand(st,lo,UUU,buf,sF,sU,in,naik,threeLink);
|
||||||
|
out._odata[sF] =scale*(naive+naik);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void StaggeredKernels<Impl>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,
|
||||||
|
SiteSpinor *buf, int sF,
|
||||||
|
int sU, const FermionField &in, SiteSpinor &out,int threeLink)
|
||||||
|
{
|
||||||
|
typedef typename Simd::scalar_type S;
|
||||||
|
typedef typename Simd::vector_type V;
|
||||||
|
|
||||||
|
REGISTER Simd even_0; // 12 regs on knc
|
||||||
|
REGISTER Simd even_1;
|
||||||
|
REGISTER Simd even_2;
|
||||||
|
REGISTER Simd odd_0; // 12 regs on knc
|
||||||
|
REGISTER Simd odd_1;
|
||||||
|
REGISTER Simd odd_2;
|
||||||
|
|
||||||
|
REGISTER Simd Chi_0; // two spinor; 6 regs
|
||||||
|
REGISTER Simd Chi_1;
|
||||||
|
REGISTER Simd Chi_2;
|
||||||
|
|
||||||
|
REGISTER Simd U_00; // two rows of U matrix
|
||||||
|
REGISTER Simd U_10;
|
||||||
|
REGISTER Simd U_20;
|
||||||
|
REGISTER Simd U_01;
|
||||||
|
REGISTER Simd U_11;
|
||||||
|
REGISTER Simd U_21; // 2 reg left.
|
||||||
|
REGISTER Simd U_02;
|
||||||
|
REGISTER Simd U_12;
|
||||||
|
REGISTER Simd U_22;
|
||||||
|
|
||||||
|
int skew = 0;
|
||||||
|
if (threeLink) skew=8;
|
||||||
|
|
||||||
|
int offset,local,perm, ptype;
|
||||||
|
StencilEntry *SE;
|
||||||
|
|
||||||
|
// Xp
|
||||||
|
SE=st.GetEntry(ptype,Xp+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT(Xp,even);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yp
|
||||||
|
SE=st.GetEntry(ptype,Yp+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT(Yp,odd);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Zp
|
||||||
|
SE=st.GetEntry(ptype,Zp+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Zp,even);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tp
|
||||||
|
SE=st.GetEntry(ptype,Tp+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Tp,odd);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Xm
|
||||||
|
SE=st.GetEntry(ptype,Xm+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(3); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Xm,even);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Ym
|
||||||
|
SE=st.GetEntry(ptype,Ym+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(2); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Ym,odd);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zm
|
||||||
|
SE=st.GetEntry(ptype,Zm+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(1); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Zm,even);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tm
|
||||||
|
SE=st.GetEntry(ptype,Tm+skew,sF);
|
||||||
|
offset = SE->_offset;
|
||||||
|
local = SE->_is_local;
|
||||||
|
perm = SE->_permute;
|
||||||
|
|
||||||
|
if ( local ) {
|
||||||
|
LOAD_CHI(in._odata);
|
||||||
|
if ( perm) {
|
||||||
|
PERMUTE_DIR(0); // T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOAD_CHI(buf);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
MULT_ADD(Tm,odd);
|
||||||
|
}
|
||||||
|
|
||||||
|
vstream(out()()(0),even_0+odd_0);
|
||||||
|
vstream(out()()(1),even_1+odd_1);
|
||||||
|
vstream(out()()(2),even_2+odd_2);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#define DHOP_SITE_HAND_INSTANTIATE(IMPL) \
|
||||||
|
template void StaggeredKernels<IMPL>::DhopSiteHand(StencilImpl &st, LebesgueOrder &lo, \
|
||||||
|
DoubledGaugeField &U,DoubledGaugeField &UUU, \
|
||||||
|
SiteSpinor *buf, int LLs, \
|
||||||
|
int sU, const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
#define DHOP_SITE_DEPTH_HAND_INSTANTIATE(IMPL) \
|
||||||
|
template void StaggeredKernels<IMPL>::DhopSiteDepthHand(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U, \
|
||||||
|
SiteSpinor *buf, int sF, \
|
||||||
|
int sU, const FermionField &in, SiteSpinor &out,int threeLink) ;
|
||||||
|
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplD);
|
||||||
|
DHOP_SITE_HAND_INSTANTIATE(StaggeredImplF);
|
||||||
|
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplD);
|
||||||
|
DHOP_SITE_HAND_INSTANTIATE(StaggeredVec5dImplF);
|
||||||
|
|
||||||
|
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplD);
|
||||||
|
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredImplF);
|
||||||
|
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplD);
|
||||||
|
DHOP_SITE_DEPTH_HAND_INSTANTIATE(StaggeredVec5dImplF);
|
||||||
|
|
||||||
|
}}
|
@ -171,6 +171,8 @@ namespace QCD {
|
|||||||
class WilsonStencil : public CartesianStencil<vobj,cobj> {
|
class WilsonStencil : public CartesianStencil<vobj,cobj> {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
|
||||||
|
|
||||||
WilsonStencil(GridBase *grid,
|
WilsonStencil(GridBase *grid,
|
||||||
int npoints,
|
int npoints,
|
||||||
int checkerboard,
|
int checkerboard,
|
||||||
@ -179,75 +181,74 @@ namespace QCD {
|
|||||||
{ };
|
{ };
|
||||||
|
|
||||||
template < class compressor>
|
template < class compressor>
|
||||||
std::thread HaloExchangeOptBegin(const Lattice<vobj> &source,compressor &compress) {
|
void HaloExchangeOpt(const Lattice<vobj> &source,compressor &compress)
|
||||||
this->Mergers.resize(0);
|
{
|
||||||
this->Packets.resize(0);
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||||
this->HaloGatherOpt(source,compress);
|
HaloExchangeOptGather(source,compress);
|
||||||
return std::thread([&] { this->Communicate(); });
|
this->CommunicateBegin(reqs);
|
||||||
|
this->calls++;
|
||||||
|
this->CommunicateComplete(reqs);
|
||||||
|
this->CommsMerge();
|
||||||
}
|
}
|
||||||
|
|
||||||
template < class compressor>
|
template < class compressor>
|
||||||
void HaloExchangeOpt(const Lattice<vobj> &source,compressor &compress)
|
void HaloExchangeOptGather(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
auto thr = this->HaloExchangeOptBegin(source,compress);
|
this->calls++;
|
||||||
this->HaloExchangeOptComplete(thr);
|
this->Mergers.resize(0);
|
||||||
|
this->Packets.resize(0);
|
||||||
|
this->HaloGatherOpt(source,compress);
|
||||||
}
|
}
|
||||||
|
|
||||||
void HaloExchangeOptComplete(std::thread &thr)
|
|
||||||
{
|
|
||||||
this->CommsMerge(); // spins
|
|
||||||
this->jointime-=usecond();
|
|
||||||
thr.join();
|
|
||||||
this->jointime+=usecond();
|
|
||||||
}
|
|
||||||
|
|
||||||
template < class compressor>
|
template < class compressor>
|
||||||
void HaloGatherOpt(const Lattice<vobj> &source,compressor &compress)
|
void HaloGatherOpt(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
|
this->_grid->StencilBarrier();
|
||||||
// conformable(source._grid,_grid);
|
// conformable(source._grid,_grid);
|
||||||
assert(source._grid==this->_grid);
|
assert(source._grid==this->_grid);
|
||||||
this->halogtime-=usecond();
|
this->halogtime-=usecond();
|
||||||
|
|
||||||
assert (this->comm_buf.size() == this->_unified_buffer_size );
|
|
||||||
this->u_comm_offset=0;
|
this->u_comm_offset=0;
|
||||||
|
|
||||||
int dag = compress.dag;
|
int dag = compress.dag;
|
||||||
static std::vector<int> dirs(Nd*2);
|
|
||||||
for(int mu=0;mu<Nd;mu++){
|
|
||||||
if ( dag ) {
|
|
||||||
dirs[mu] =mu;
|
|
||||||
dirs[mu+4]=mu+Nd;
|
|
||||||
} else {
|
|
||||||
dirs[mu] =mu+Nd;
|
|
||||||
dirs[mu+Nd]=mu;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
WilsonXpCompressor<cobj,vobj> XpCompress;
|
WilsonXpCompressor<cobj,vobj> XpCompress;
|
||||||
this->HaloGatherDir(source,XpCompress,dirs[0]);
|
|
||||||
|
|
||||||
WilsonYpCompressor<cobj,vobj> YpCompress;
|
WilsonYpCompressor<cobj,vobj> YpCompress;
|
||||||
this->HaloGatherDir(source,YpCompress,dirs[1]);
|
|
||||||
|
|
||||||
WilsonZpCompressor<cobj,vobj> ZpCompress;
|
WilsonZpCompressor<cobj,vobj> ZpCompress;
|
||||||
this->HaloGatherDir(source,ZpCompress,dirs[2]);
|
|
||||||
|
|
||||||
WilsonTpCompressor<cobj,vobj> TpCompress;
|
WilsonTpCompressor<cobj,vobj> TpCompress;
|
||||||
this->HaloGatherDir(source,TpCompress,dirs[3]);
|
|
||||||
|
|
||||||
WilsonXmCompressor<cobj,vobj> XmCompress;
|
WilsonXmCompressor<cobj,vobj> XmCompress;
|
||||||
this->HaloGatherDir(source,XmCompress,dirs[4]);
|
|
||||||
|
|
||||||
WilsonYmCompressor<cobj,vobj> YmCompress;
|
WilsonYmCompressor<cobj,vobj> YmCompress;
|
||||||
this->HaloGatherDir(source,YmCompress,dirs[5]);
|
|
||||||
|
|
||||||
WilsonZmCompressor<cobj,vobj> ZmCompress;
|
WilsonZmCompressor<cobj,vobj> ZmCompress;
|
||||||
this->HaloGatherDir(source,ZmCompress,dirs[6]);
|
|
||||||
|
|
||||||
WilsonTmCompressor<cobj,vobj> TmCompress;
|
WilsonTmCompressor<cobj,vobj> TmCompress;
|
||||||
this->HaloGatherDir(source,TmCompress,dirs[7]);
|
|
||||||
|
|
||||||
|
// Gather all comms buffers
|
||||||
|
// for(int point = 0 ; point < _npoints; point++) {
|
||||||
|
// compress.Point(point);
|
||||||
|
// HaloGatherDir(source,compress,point,face_idx);
|
||||||
|
// }
|
||||||
|
int face_idx=0;
|
||||||
|
if ( dag ) {
|
||||||
|
// std::cout << " Optimised Dagger compress " <<std::endl;
|
||||||
|
this->HaloGatherDir(source,XpCompress,Xp,face_idx);
|
||||||
|
this->HaloGatherDir(source,YpCompress,Yp,face_idx);
|
||||||
|
this->HaloGatherDir(source,ZpCompress,Zp,face_idx);
|
||||||
|
this->HaloGatherDir(source,TpCompress,Tp,face_idx);
|
||||||
|
this->HaloGatherDir(source,XmCompress,Xm,face_idx);
|
||||||
|
this->HaloGatherDir(source,YmCompress,Ym,face_idx);
|
||||||
|
this->HaloGatherDir(source,ZmCompress,Zm,face_idx);
|
||||||
|
this->HaloGatherDir(source,TmCompress,Tm,face_idx);
|
||||||
|
} else {
|
||||||
|
this->HaloGatherDir(source,XmCompress,Xp,face_idx);
|
||||||
|
this->HaloGatherDir(source,YmCompress,Yp,face_idx);
|
||||||
|
this->HaloGatherDir(source,ZmCompress,Zp,face_idx);
|
||||||
|
this->HaloGatherDir(source,TmCompress,Tp,face_idx);
|
||||||
|
this->HaloGatherDir(source,XpCompress,Xm,face_idx);
|
||||||
|
this->HaloGatherDir(source,YpCompress,Ym,face_idx);
|
||||||
|
this->HaloGatherDir(source,ZpCompress,Zm,face_idx);
|
||||||
|
this->HaloGatherDir(source,TpCompress,Tm,face_idx);
|
||||||
|
}
|
||||||
|
this->face_table_computed=1;
|
||||||
assert(this->u_comm_offset==this->_unified_buffer_size);
|
assert(this->u_comm_offset==this->_unified_buffer_size);
|
||||||
this->halogtime+=usecond();
|
this->halogtime+=usecond();
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
|
||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
@ -29,15 +30,14 @@ See the full license in the file "LICENSE" in the top level distribution
|
|||||||
directory
|
directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
const std::vector<int> WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2,
|
const std::vector<int> WilsonFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3});
|
||||||
3});
|
const std::vector<int> WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1});
|
||||||
const std::vector<int> WilsonFermionStatic::displacements({1, 1, 1, 1, -1, -1,
|
|
||||||
-1, -1});
|
|
||||||
int WilsonFermionStatic::HandOptDslash;
|
int WilsonFermionStatic::HandOptDslash;
|
||||||
|
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
@ -52,10 +52,8 @@ WilsonFermion<Impl>::WilsonFermion(GaugeField &_Umu, GridCartesian &Fgrid,
|
|||||||
_grid(&Fgrid),
|
_grid(&Fgrid),
|
||||||
_cbgrid(&Hgrid),
|
_cbgrid(&Hgrid),
|
||||||
Stencil(&Fgrid, npoint, Even, directions, displacements),
|
Stencil(&Fgrid, npoint, Even, directions, displacements),
|
||||||
StencilEven(&Hgrid, npoint, Even, directions,
|
StencilEven(&Hgrid, npoint, Even, directions,displacements), // source is Even
|
||||||
displacements), // source is Even
|
StencilOdd(&Hgrid, npoint, Odd, directions,displacements), // source is Odd
|
||||||
StencilOdd(&Hgrid, npoint, Odd, directions,
|
|
||||||
displacements), // source is Odd
|
|
||||||
mass(_mass),
|
mass(_mass),
|
||||||
Lebesgue(_grid),
|
Lebesgue(_grid),
|
||||||
LebesgueEvenOdd(_cbgrid),
|
LebesgueEvenOdd(_cbgrid),
|
||||||
@ -113,42 +111,40 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
void WilsonFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||||
out.checkerboard = in.checkerboard;
|
out.checkerboard = in.checkerboard;
|
||||||
typename FermionField::scalar_type scal(4.0 + mass);
|
typename FermionField::scalar_type scal(4.0 + mass);
|
||||||
out = scal * in;
|
out = scal * in;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
void WilsonFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||||
out.checkerboard = in.checkerboard;
|
out.checkerboard = in.checkerboard;
|
||||||
Mooee(in, out);
|
Mooee(in, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
void WilsonFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||||
out.checkerboard = in.checkerboard;
|
out.checkerboard = in.checkerboard;
|
||||||
out = (1.0/(4.0+mass))*in;
|
out = (1.0/(4.0+mass))*in;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
|
void WilsonFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out) {
|
||||||
out.checkerboard = in.checkerboard;
|
out.checkerboard = in.checkerboard;
|
||||||
MooeeInv(in,out);
|
MooeeInv(in,out);
|
||||||
}
|
}
|
||||||
|
template<class Impl>
|
||||||
template<class Impl>
|
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m)
|
||||||
void WilsonFermion<Impl>::MomentumSpacePropagator(FermionField &out, const FermionField &in,RealD _m) {
|
{
|
||||||
|
typedef typename FermionField::vector_type vector_type;
|
||||||
|
typedef typename FermionField::scalar_type ScalComplex;
|
||||||
|
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||||
|
|
||||||
// what type LatticeComplex
|
// what type LatticeComplex
|
||||||
conformable(_grid,out._grid);
|
conformable(_grid,out._grid);
|
||||||
|
|
||||||
typedef typename FermionField::vector_type vector_type;
|
|
||||||
typedef typename FermionField::scalar_type ScalComplex;
|
|
||||||
|
|
||||||
typedef Lattice<iSinglet<vector_type> > LatComplex;
|
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::Algebra::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::Algebra::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
@ -191,7 +187,7 @@ void WilsonFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
|||||||
|
|
||||||
out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
|
out = num*denom; // [ -i gmu sin k + 2 sin^2 k/2 + m] / [ sin^2 k + (2 sin^2 k/2 + m)^2 ]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////
|
///////////////////////////////////
|
||||||
@ -222,9 +218,8 @@ void WilsonFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U,
|
|||||||
////////////////////////
|
////////////////////////
|
||||||
// Call the single hop
|
// Call the single hop
|
||||||
////////////////////////
|
////////////////////////
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for (int sss = 0; sss < B._grid->oSites(); sss++) {
|
||||||
for (int sss = 0; sss < B._grid->oSites(); sss++) {
|
Kernels::DhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma);
|
||||||
Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sss, sss, B, Btilde, mu, gamma);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
@ -273,8 +268,7 @@ void WilsonFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
|
||||||
int dag) {
|
|
||||||
conformable(in._grid, _grid); // verifies full grid
|
conformable(in._grid, _grid); // verifies full grid
|
||||||
conformable(in._grid, out._grid);
|
conformable(in._grid, out._grid);
|
||||||
|
|
||||||
@ -284,8 +278,7 @@ void WilsonFermion<Impl>::Dhop(const FermionField &in, FermionField &out,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
|
||||||
int dag) {
|
|
||||||
conformable(in._grid, _cbgrid); // verifies half grid
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
conformable(in._grid, out._grid); // drops the cb check
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
@ -296,8 +289,7 @@ void WilsonFermion<Impl>::DhopOE(const FermionField &in, FermionField &out,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag) {
|
||||||
int dag) {
|
|
||||||
conformable(in._grid, _cbgrid); // verifies half grid
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
conformable(in._grid, out._grid); // drops the cb check
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
@ -308,14 +300,12 @@ void WilsonFermion<Impl>::DhopEO(const FermionField &in, FermionField &out,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
int dir, int disp) {
|
|
||||||
DhopDir(in, out, dir, disp);
|
DhopDir(in, out, dir, disp);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
int dir, int disp) {
|
|
||||||
int skip = (disp == 1) ? 0 : 1;
|
int skip = (disp == 1) ? 0 : 1;
|
||||||
int dirdisp = dir + skip * 4;
|
int dirdisp = dir + skip * 4;
|
||||||
int gamma = dir + (1 - skip) * 4;
|
int gamma = dir + (1 - skip) * 4;
|
||||||
@ -324,16 +314,13 @@ void WilsonFermion<Impl>::DhopDir(const FermionField &in, FermionField &out,
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <class Impl>
|
template <class Impl>
|
||||||
void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,
|
void WilsonFermion<Impl>::DhopDirDisp(const FermionField &in, FermionField &out,int dirdisp, int gamma, int dag) {
|
||||||
int dirdisp, int gamma, int dag) {
|
|
||||||
Compressor compressor(dag);
|
Compressor compressor(dag);
|
||||||
|
|
||||||
Stencil.HaloExchange(in, compressor);
|
Stencil.HaloExchange(in, compressor);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
Kernels::DhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out, dirdisp, gamma);
|
||||||
Kernels::DiracOptDhopDir(Stencil, Umu, Stencil.CommBuf(), sss, sss, in, out,
|
|
||||||
dirdisp, gamma);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -348,16 +335,12 @@ void WilsonFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
|||||||
st.HaloExchange(in, compressor);
|
st.HaloExchange(in, compressor);
|
||||||
|
|
||||||
if (dag == DaggerYes) {
|
if (dag == DaggerYes) {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
Kernels::DhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
|
||||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
|
|
||||||
out);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
Kernels::DhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in, out);
|
||||||
Kernels::DiracOptDhopSite(st, lo, U, st.CommBuf(), sss, sss, 1, 1, in,
|
|
||||||
out);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -30,8 +30,9 @@ Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
#include <Grid/PerfCount.h>
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -64,71 +65,55 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
|||||||
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
||||||
_tmp(&FiveDimRedBlackGrid)
|
_tmp(&FiveDimRedBlackGrid)
|
||||||
{
|
{
|
||||||
|
// some assertions
|
||||||
|
assert(FiveDimGrid._ndimension==5);
|
||||||
|
assert(FourDimGrid._ndimension==4);
|
||||||
|
assert(FourDimRedBlackGrid._ndimension==4);
|
||||||
|
assert(FiveDimRedBlackGrid._ndimension==5);
|
||||||
|
assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
|
||||||
|
|
||||||
|
// extent of fifth dim and not spread out
|
||||||
|
Ls=FiveDimGrid._fdimensions[0];
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
|
||||||
|
assert(FiveDimGrid._processors[0] ==1);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[0] ==1);
|
||||||
|
|
||||||
|
// Other dimensions must match the decomposition of the four-D fields
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
|
||||||
|
assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]);
|
||||||
|
}
|
||||||
|
|
||||||
if (Impl::LsVectorised) {
|
if (Impl::LsVectorised) {
|
||||||
|
|
||||||
int nsimd = Simd::Nsimd();
|
int nsimd = Simd::Nsimd();
|
||||||
|
|
||||||
// some assertions
|
|
||||||
assert(FiveDimGrid._ndimension==5);
|
|
||||||
assert(FiveDimRedBlackGrid._ndimension==5);
|
|
||||||
assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
|
|
||||||
assert(FourDimGrid._ndimension==4);
|
|
||||||
|
|
||||||
// Dimension zero of the five-d is the Ls direction
|
// Dimension zero of the five-d is the Ls direction
|
||||||
Ls=FiveDimGrid._fdimensions[0];
|
|
||||||
assert(FiveDimGrid._processors[0] ==1);
|
|
||||||
assert(FiveDimGrid._simd_layout[0] ==nsimd);
|
assert(FiveDimGrid._simd_layout[0] ==nsimd);
|
||||||
|
|
||||||
assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
|
|
||||||
assert(FiveDimRedBlackGrid._processors[0] ==1);
|
|
||||||
assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
|
assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
|
||||||
|
|
||||||
// Other dimensions must match the decomposition of the four-D fields
|
|
||||||
for(int d=0;d<4;d++){
|
for(int d=0;d<4;d++){
|
||||||
assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
|
|
||||||
assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
|
||||||
|
|
||||||
assert(FourDimGrid._simd_layout[d]=1);
|
assert(FourDimGrid._simd_layout[d]=1);
|
||||||
assert(FourDimRedBlackGrid._simd_layout[d]=1);
|
assert(FourDimRedBlackGrid._simd_layout[d]=1);
|
||||||
assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
|
||||||
|
|
||||||
assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]);
|
|
||||||
assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
|
||||||
assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// some assertions
|
|
||||||
assert(FiveDimGrid._ndimension==5);
|
|
||||||
assert(FourDimGrid._ndimension==4);
|
|
||||||
assert(FiveDimRedBlackGrid._ndimension==5);
|
|
||||||
assert(FourDimRedBlackGrid._ndimension==4);
|
|
||||||
assert(FiveDimRedBlackGrid._checker_dim==1);
|
|
||||||
|
|
||||||
// Dimension zero of the five-d is the Ls direction
|
// Dimension zero of the five-d is the Ls direction
|
||||||
Ls=FiveDimGrid._fdimensions[0];
|
|
||||||
assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
|
|
||||||
assert(FiveDimRedBlackGrid._processors[0] ==1);
|
|
||||||
assert(FiveDimRedBlackGrid._simd_layout[0]==1);
|
assert(FiveDimRedBlackGrid._simd_layout[0]==1);
|
||||||
assert(FiveDimGrid._processors[0] ==1);
|
|
||||||
assert(FiveDimGrid._simd_layout[0] ==1);
|
assert(FiveDimGrid._simd_layout[0] ==1);
|
||||||
|
|
||||||
// Other dimensions must match the decomposition of the four-D fields
|
|
||||||
for(int d=0;d<4;d++){
|
|
||||||
assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]);
|
|
||||||
assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
|
|
||||||
|
|
||||||
assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]);
|
|
||||||
assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
|
||||||
|
|
||||||
assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]);
|
|
||||||
assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
|
|
||||||
|
|
||||||
assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]);
|
|
||||||
assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
|
||||||
assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate the required comms buffer
|
// Allocate the required comms buffer
|
||||||
@ -141,34 +126,37 @@ void WilsonFermion5D<Impl>::Report(void)
|
|||||||
std::vector<int> latt = GridDefaultLatt();
|
std::vector<int> latt = GridDefaultLatt();
|
||||||
RealD volume = Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
RealD volume = Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||||
RealD NP = _FourDimGrid->_Nprocessors;
|
RealD NP = _FourDimGrid->_Nprocessors;
|
||||||
|
RealD NN = _FourDimGrid->NodeCount();
|
||||||
|
|
||||||
if ( DhopCalls > 0 ) {
|
if ( DhopCalls > 0 ) {
|
||||||
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Dhop Calls : " << DhopCalls << std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D Number of DhopEO Calls : " << DhopCalls << std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " << DhopCommTime<< " us" << std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D TotalTime /Calls : " << DhopTotalTime / DhopCalls << " us" << std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " << DhopCommTime / DhopCalls << " us" << std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D CommTime /Calls : " << DhopCommTime / DhopCalls << " us" << std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " << DhopComputeTime << " us" << std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D FaceTime /Calls : " << DhopFaceTime / DhopCalls << " us" << std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime1/Calls : " << DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime2/Calls : " << DhopComputeTime2/ DhopCalls << " us" << std::endl;
|
||||||
|
|
||||||
|
// Average the compute time
|
||||||
|
_FourDimGrid->GlobalSum(DhopComputeTime);
|
||||||
|
DhopComputeTime/=NP;
|
||||||
RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
RealD mflops = 1344*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
||||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||||
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl;
|
||||||
|
|
||||||
RealD Fullmflops = 1344*volume*DhopCalls/(DhopComputeTime+DhopCommTime)/2; // 2 for red black counting
|
RealD Fullmflops = 1344*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
|
||||||
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||||
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
||||||
|
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( DerivCalls > 0 ) {
|
if ( DerivCalls > 0 ) {
|
||||||
std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
|
std::cout << GridLogMessage << "#### Deriv calls report "<< std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls : " <<DerivCalls <<std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D Number of Deriv Calls : " <<DerivCalls <<std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Total Communication time : " <<DerivCommTime <<" us"<<std::endl;
|
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D CommTime/Calls : " <<DerivCommTime/DerivCalls<<" us" <<std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Total Compute time : " <<DerivComputeTime <<" us"<<std::endl;
|
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D ComputeTime/Calls : " <<DerivComputeTime/DerivCalls<<" us" <<std::endl;
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Total Dhop Compute time : " <<DerivDhopComputeTime <<" us"<<std::endl;
|
|
||||||
std::cout << GridLogMessage << "WilsonFermion5D Dhop ComputeTime/Calls : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
|
std::cout << GridLogMessage << "WilsonFermion5D Dhop ComputeTime/Calls : " <<DerivDhopComputeTime/DerivCalls<<" us" <<std::endl;
|
||||||
|
|
||||||
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
RealD mflops = 144*volume*DerivCalls/DerivDhopComputeTime;
|
||||||
@ -191,6 +179,9 @@ void WilsonFermion5D<Impl>::ZeroCounters(void) {
|
|||||||
DhopCalls = 0;
|
DhopCalls = 0;
|
||||||
DhopCommTime = 0;
|
DhopCommTime = 0;
|
||||||
DhopComputeTime = 0;
|
DhopComputeTime = 0;
|
||||||
|
DhopComputeTime2= 0;
|
||||||
|
DhopFaceTime = 0;
|
||||||
|
DhopTotalTime = 0;
|
||||||
|
|
||||||
DerivCalls = 0;
|
DerivCalls = 0;
|
||||||
DerivCommTime = 0;
|
DerivCommTime = 0;
|
||||||
@ -231,15 +222,11 @@ void WilsonFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,in
|
|||||||
assert(dirdisp<=7);
|
assert(dirdisp<=7);
|
||||||
assert(dirdisp>=0);
|
assert(dirdisp>=0);
|
||||||
|
|
||||||
int LLs = out._grid->_rdimensions[0];
|
parallel_for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||||
|
for(int s=0;s<Ls;s++){
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
|
||||||
int sU=ss;
|
int sU=ss;
|
||||||
for(int s=0;s<LLs;s++){
|
int sF = s+Ls*sU;
|
||||||
int sF = s+LLs*sU;
|
Kernels::DhopDir(Stencil,Umu,Stencil.CommBuf(),sF,sU,in,out,dirdisp,gamma);
|
||||||
assert(sF < out._grid->oSites());
|
|
||||||
Kernels::DiracOptDhopDir(Stencil,Umu,Stencil.CommBuf(),sF,sU,in,out,dirdisp,gamma);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -284,15 +271,19 @@ void WilsonFermion5D<Impl>::DerivInternal(StencilImpl & st,
|
|||||||
////////////////////////
|
////////////////////////
|
||||||
|
|
||||||
DerivDhopComputeTime -= usecond();
|
DerivDhopComputeTime -= usecond();
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for (int sss = 0; sss < U._grid->oSites(); sss++) {
|
||||||
for (int sss = 0; sss < U._grid->oSites(); sss++) {
|
for (int s = 0; s < Ls; s++) {
|
||||||
int sU = sss;
|
int sU = sss;
|
||||||
for (int s = 0; s < LLs; s++) {
|
int sF = s + Ls * sU;
|
||||||
int sF = s + LLs * sU;
|
|
||||||
assert(sF < B._grid->oSites());
|
assert(sF < B._grid->oSites());
|
||||||
assert(sU < U._grid->oSites());
|
assert(sU < U._grid->oSites());
|
||||||
|
|
||||||
Kernels::DiracOptDhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma);
|
Kernels::DhopDir(st, U, st.CommBuf(), sF, sU, B, Btilde, mu, gamma);
|
||||||
|
|
||||||
|
////////////////////////////
|
||||||
|
// spin trace outer product
|
||||||
|
////////////////////////////
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
////////////////////////////
|
////////////////////////////
|
||||||
@ -357,6 +348,86 @@ template<class Impl>
|
|||||||
void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||||
DoubledGaugeField & U,
|
DoubledGaugeField & U,
|
||||||
const FermionField &in, FermionField &out,int dag)
|
const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
DhopTotalTime-=usecond();
|
||||||
|
#ifdef GRID_OMP
|
||||||
|
if ( WilsonKernelsStatic::Comms == WilsonKernelsStatic::CommsAndCompute )
|
||||||
|
DhopInternalOverlappedComms(st,lo,U,in,out,dag);
|
||||||
|
else
|
||||||
|
#endif
|
||||||
|
DhopInternalSerialComms(st,lo,U,in,out,dag);
|
||||||
|
DhopTotalTime+=usecond();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
#ifdef GRID_OMP
|
||||||
|
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||||
|
typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
|
||||||
|
|
||||||
|
Compressor compressor(dag);
|
||||||
|
|
||||||
|
int LLs = in._grid->_rdimensions[0];
|
||||||
|
int len = U._grid->oSites();
|
||||||
|
|
||||||
|
DhopFaceTime-=usecond();
|
||||||
|
st.HaloExchangeOptGather(in,compressor);
|
||||||
|
DhopFaceTime+=usecond();
|
||||||
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||||
|
|
||||||
|
#pragma omp parallel
|
||||||
|
{
|
||||||
|
int nthreads = omp_get_num_threads();
|
||||||
|
int me = omp_get_thread_num();
|
||||||
|
int myoff, mywork;
|
||||||
|
|
||||||
|
GridThread::GetWork(len,me-1,mywork,myoff,nthreads-1);
|
||||||
|
int sF = LLs * myoff;
|
||||||
|
|
||||||
|
if ( me == 0 ) {
|
||||||
|
DhopCommTime-=usecond();
|
||||||
|
st.CommunicateBegin(reqs);
|
||||||
|
st.CommunicateComplete(reqs);
|
||||||
|
DhopCommTime+=usecond();
|
||||||
|
} else {
|
||||||
|
// Interior links in stencil
|
||||||
|
if ( me==1 ) DhopComputeTime-=usecond();
|
||||||
|
if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
|
||||||
|
else Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
|
||||||
|
if ( me==1 ) DhopComputeTime+=usecond();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DhopFaceTime-=usecond();
|
||||||
|
st.CommsMerge();
|
||||||
|
DhopFaceTime+=usecond();
|
||||||
|
|
||||||
|
#pragma omp parallel
|
||||||
|
{
|
||||||
|
int nthreads = omp_get_num_threads();
|
||||||
|
int me = omp_get_thread_num();
|
||||||
|
int myoff, mywork;
|
||||||
|
|
||||||
|
GridThread::GetWork(len,me,mywork,myoff,nthreads);
|
||||||
|
int sF = LLs * myoff;
|
||||||
|
|
||||||
|
// Exterior links in stencil
|
||||||
|
if ( me==0 ) DhopComputeTime2-=usecond();
|
||||||
|
if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1);
|
||||||
|
else Kernels::DhopSite (st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,0,1);
|
||||||
|
if ( me==0 ) DhopComputeTime2+=usecond();
|
||||||
|
}// end parallel region
|
||||||
|
#else
|
||||||
|
assert(0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void WilsonFermion5D<Impl>::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
const FermionField &in, FermionField &out,int dag)
|
||||||
{
|
{
|
||||||
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||||
Compressor compressor(dag);
|
Compressor compressor(dag);
|
||||||
@ -364,45 +435,23 @@ void WilsonFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
|||||||
int LLs = in._grid->_rdimensions[0];
|
int LLs = in._grid->_rdimensions[0];
|
||||||
|
|
||||||
DhopCommTime-=usecond();
|
DhopCommTime-=usecond();
|
||||||
st.HaloExchange(in,compressor);
|
st.HaloExchangeOpt(in,compressor);
|
||||||
DhopCommTime+=usecond();
|
DhopCommTime+=usecond();
|
||||||
|
|
||||||
DhopComputeTime-=usecond();
|
DhopComputeTime-=usecond();
|
||||||
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
||||||
if (dag == DaggerYes) {
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
|
||||||
int sU = ss;
|
|
||||||
int sF = LLs * sU;
|
|
||||||
Kernels::DiracOptDhopSiteDag(st, lo, U, st.CommBuf(), sF, sU, LLs, 1, in, out);
|
|
||||||
}
|
|
||||||
#ifdef AVX512
|
|
||||||
} else if (stat.is_init() ) {
|
|
||||||
|
|
||||||
int nthreads;
|
if (dag == DaggerYes) {
|
||||||
stat.start();
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
#pragma omp parallel
|
|
||||||
{
|
|
||||||
#pragma omp master
|
|
||||||
nthreads = omp_get_num_threads();
|
|
||||||
int mythread = omp_get_thread_num();
|
|
||||||
stat.enter(mythread);
|
|
||||||
#pragma omp for nowait
|
|
||||||
for(int ss=0;ss<U._grid->oSites();ss++) {
|
|
||||||
int sU=ss;
|
|
||||||
int sF=LLs*sU;
|
|
||||||
Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
|
||||||
}
|
|
||||||
stat.exit(mythread);
|
|
||||||
}
|
|
||||||
stat.accum(nthreads);
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
|
||||||
int sU = ss;
|
int sU = ss;
|
||||||
int sF = LLs * sU;
|
int sF = LLs * sU;
|
||||||
Kernels::DiracOptDhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
|
int sU = ss;
|
||||||
|
int sF = LLs * sU;
|
||||||
|
Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
DhopComputeTime+=usecond();
|
DhopComputeTime+=usecond();
|
||||||
|
@ -31,7 +31,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_WILSON_FERMION_5D_H
|
#ifndef GRID_QCD_WILSON_FERMION_5D_H
|
||||||
#define GRID_QCD_WILSON_FERMION_5D_H
|
#define GRID_QCD_WILSON_FERMION_5D_H
|
||||||
|
|
||||||
#include <Grid/Stat.h>
|
#include <Grid/perfmon/Stat.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -82,6 +82,9 @@ namespace QCD {
|
|||||||
double DhopCalls;
|
double DhopCalls;
|
||||||
double DhopCommTime;
|
double DhopCommTime;
|
||||||
double DhopComputeTime;
|
double DhopComputeTime;
|
||||||
|
double DhopComputeTime2;
|
||||||
|
double DhopFaceTime;
|
||||||
|
double DhopTotalTime;
|
||||||
|
|
||||||
double DerivCalls;
|
double DerivCalls;
|
||||||
double DerivCommTime;
|
double DerivCommTime;
|
||||||
@ -146,6 +149,20 @@ namespace QCD {
|
|||||||
FermionField &out,
|
FermionField &out,
|
||||||
int dag);
|
int dag);
|
||||||
|
|
||||||
|
void DhopInternalOverlappedComms(StencilImpl & st,
|
||||||
|
LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out,
|
||||||
|
int dag);
|
||||||
|
|
||||||
|
void DhopInternalSerialComms(StencilImpl & st,
|
||||||
|
LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out,
|
||||||
|
int dag);
|
||||||
|
|
||||||
// Constructors
|
// Constructors
|
||||||
WilsonFermion5D(GaugeField &_Umu,
|
WilsonFermion5D(GaugeField &_Umu,
|
||||||
GridCartesian &FiveDimGrid,
|
GridCartesian &FiveDimGrid,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user