mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 15:55:37 +00:00
Merge branch 'feature/qed-fvol' of https://github.com/paboyle/Grid into feature/qed-fvol
This commit is contained in:
commit
e4a105a30b
14
.gitignore
vendored
14
.gitignore
vendored
@ -104,4 +104,16 @@ lib/fftw/*
|
|||||||
# libtool macros #
|
# libtool macros #
|
||||||
##################
|
##################
|
||||||
m4/lt*
|
m4/lt*
|
||||||
m4/libtool.m4
|
m4/libtool.m4
|
||||||
|
|
||||||
|
# Buck files #
|
||||||
|
##############
|
||||||
|
.buck*
|
||||||
|
buck-out
|
||||||
|
BUCK
|
||||||
|
make-bin-BUCK.sh
|
||||||
|
|
||||||
|
# generated sources #
|
||||||
|
#####################
|
||||||
|
lib/qcd/spin/gamma-gen/*.h
|
||||||
|
lib/qcd/spin/gamma-gen/*.cc
|
@ -102,5 +102,5 @@ script:
|
|||||||
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto
|
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=mpi-auto
|
||||||
- make -j4
|
- make -j4
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then mpirun.openmpi -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
||||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then mpirun -n 2 ./benchmarks/Benchmark_dwf --threads 1 --mpi 2.1.1.1; fi
|
|
||||||
|
|
||||||
|
@ -48,9 +48,9 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
std::cout<<GridLogMessage << "= Benchmarking concurrent halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
int maxlat=16;
|
int maxlat=24;
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -124,8 +124,8 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat,lat,lat,lat});
|
std::vector<int> latt_size ({lat,lat,lat,lat});
|
||||||
|
|
||||||
@ -194,14 +194,14 @@ int main (int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Nloop=100;
|
Nloop=10;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
std::cout<<GridLogMessage << "= Benchmarking concurrent STENCIL halo exchange in "<<nmu<<" dimensions"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -281,8 +281,8 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||||
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
std::cout<<GridLogMessage << " L "<<"\t\t"<<" Ls "<<"\t\t"<<"bytes"<<"\t\t"<<"MB/s uni"<<"\t\t"<<"MB/s bidi"<<std::endl;
|
||||||
|
|
||||||
for(int lat=4;lat<=maxlat;lat+=2){
|
for(int lat=4;lat<=maxlat;lat+=4){
|
||||||
for(int Ls=1;Ls<=16;Ls*=2){
|
for(int Ls=8;Ls<=32;Ls*=2){
|
||||||
|
|
||||||
std::vector<int> latt_size ({lat*mpi_layout[0],
|
std::vector<int> latt_size ({lat*mpi_layout[0],
|
||||||
lat*mpi_layout[1],
|
lat*mpi_layout[1],
|
||||||
@ -324,8 +324,8 @@ int main (int argc, char ** argv)
|
|||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
// Grid.StencilSendToRecvFromComplete(requests);
|
Grid.StencilSendToRecvFromComplete(requests);
|
||||||
// requests.resize(0);
|
requests.resize(0);
|
||||||
|
|
||||||
comm_proc = mpi_layout[mu]-1;
|
comm_proc = mpi_layout[mu]-1;
|
||||||
|
|
||||||
|
@ -37,27 +37,27 @@ struct scal {
|
|||||||
d internal;
|
d internal;
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::GammaMatrix Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR;
|
typedef WilsonFermion5D<DomainWallVec5dImplR> WilsonFermion5DR;
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF;
|
typedef WilsonFermion5D<DomainWallVec5dImplF> WilsonFermion5DF;
|
||||||
typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD;
|
typedef WilsonFermion5D<DomainWallVec5dImplD> WilsonFermion5DD;
|
||||||
|
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
Grid_init(&argc,&argv);
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
|
||||||
std::vector<int> latt4 = GridDefaultLatt();
|
std::vector<int> latt4 = GridDefaultLatt();
|
||||||
const int Ls=8;
|
const int Ls=16;
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
@ -71,35 +71,66 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
std::vector<int> seeds4({1,2,3,4});
|
std::vector<int> seeds4({1,2,3,4});
|
||||||
std::vector<int> seeds5({5,6,7,8});
|
std::vector<int> seeds5({5,6,7,8});
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||||
|
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||||
|
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
||||||
|
|
||||||
LatticeFermion src (FGrid); random(RNG5,src);
|
LatticeFermion src (FGrid); random(RNG5,src);
|
||||||
|
#if 0
|
||||||
|
src = zero;
|
||||||
|
{
|
||||||
|
std::vector<int> origin({0,0,0,latt4[2]-1,0});
|
||||||
|
SpinColourVectorF tmp;
|
||||||
|
tmp=zero;
|
||||||
|
tmp()(0)(0)=Complex(-2.0,0.0);
|
||||||
|
std::cout << " source site 0 " << tmp<<std::endl;
|
||||||
|
pokeSite(tmp,src,origin);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
RealD N2 = 1.0/::sqrt(norm2(src));
|
||||||
|
src = src*N2;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LatticeFermion result(FGrid); result=zero;
|
LatticeFermion result(FGrid); result=zero;
|
||||||
LatticeFermion ref(FGrid); ref=zero;
|
LatticeFermion ref(FGrid); ref=zero;
|
||||||
LatticeFermion tmp(FGrid);
|
LatticeFermion tmp(FGrid);
|
||||||
LatticeFermion err(FGrid);
|
LatticeFermion err(FGrid);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||||
LatticeGaugeField Umu(UGrid);
|
LatticeGaugeField Umu(UGrid);
|
||||||
random(RNG4,Umu);
|
SU3::HotConfiguration(RNG4,Umu);
|
||||||
|
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||||
LatticeGaugeField Umu5d(FGrid);
|
#if 0
|
||||||
|
Umu=1.0;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
LatticeColourMatrix ttmp(UGrid);
|
||||||
|
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
||||||
|
// if (mu !=2 ) ttmp = 0;
|
||||||
|
// ttmp = ttmp* pow(10.0,mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// Naive wilson implementation
|
||||||
|
////////////////////////////////////
|
||||||
// replicate across fifth dimension
|
// replicate across fifth dimension
|
||||||
|
LatticeGaugeField Umu5d(FGrid);
|
||||||
|
std::vector<LatticeColourMatrix> U(4,FGrid);
|
||||||
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
Umu5d._odata[Ls*ss+s] = Umu._odata[ss];
|
Umu5d._odata[Ls*ss+s] = Umu._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////
|
|
||||||
// Naive wilson implementation
|
|
||||||
////////////////////////////////////
|
|
||||||
std::vector<LatticeColourMatrix> U(4,FGrid);
|
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
|
U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
|
||||||
}
|
}
|
||||||
|
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
{
|
{
|
||||||
@ -121,6 +152,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
RealD NP = UGrid->_Nprocessors;
|
RealD NP = UGrid->_Nprocessors;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "Creating action operator " << std::endl;
|
||||||
DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
DomainWallFermionR Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
@ -136,10 +168,11 @@ int main (int argc, char ** argv)
|
|||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
|
|
||||||
int ncall =100;
|
int ncall =1000;
|
||||||
if (1) {
|
if (1) {
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.ZeroCounters();
|
Dw.ZeroCounters();
|
||||||
|
Dw.Dhop(src,result,0);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
__SSC_START;
|
__SSC_START;
|
||||||
@ -153,12 +186,22 @@ int main (int argc, char ** argv)
|
|||||||
double flops=1344*volume*ncall;
|
double flops=1344*volume*ncall;
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
if(( norm2(err)>1.0e-4) ) {
|
||||||
|
std::cout << "RESULT\n " << result<<std::endl;
|
||||||
|
std::cout << "REF \n " << ref <<std::endl;
|
||||||
|
std::cout << "ERR \n " << err <<std::endl;
|
||||||
|
FGrid->Barrier();
|
||||||
|
exit(-1);
|
||||||
|
}
|
||||||
|
*/
|
||||||
assert (norm2(err)< 1.0e-4 );
|
assert (norm2(err)< 1.0e-4 );
|
||||||
Dw.Report();
|
Dw.Report();
|
||||||
}
|
}
|
||||||
@ -182,21 +225,13 @@ int main (int argc, char ** argv)
|
|||||||
LatticeFermion sresult(sFGrid);
|
LatticeFermion sresult(sFGrid);
|
||||||
|
|
||||||
WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5);
|
WilsonFermion5DR sDw(Umu,*sFGrid,*sFrbGrid,*sUGrid,*sUrbGrid,M5);
|
||||||
|
|
||||||
for(int x=0;x<latt4[0];x++){
|
localConvert(src,ssrc);
|
||||||
for(int y=0;y<latt4[1];y++){
|
|
||||||
for(int z=0;z<latt4[2];z++){
|
|
||||||
for(int t=0;t<latt4[3];t++){
|
|
||||||
for(int s=0;s<Ls;s++){
|
|
||||||
std::vector<int> site({s,x,y,z,t});
|
|
||||||
SpinColourVector tmp;
|
|
||||||
peekSite(tmp,src,site);
|
|
||||||
pokeSite(tmp,ssrc,site);
|
|
||||||
}}}}}
|
|
||||||
std::cout<<GridLogMessage<< "src norms "<< norm2(src)<<" " <<norm2(ssrc)<<std::endl;
|
std::cout<<GridLogMessage<< "src norms "<< norm2(src)<<" " <<norm2(ssrc)<<std::endl;
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
double t0=usecond();
|
sDw.Dhop(ssrc,sresult,0);
|
||||||
sDw.ZeroCounters();
|
sDw.ZeroCounters();
|
||||||
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
__SSC_START;
|
__SSC_START;
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
sDw.Dhop(ssrc,sresult,0);
|
||||||
@ -210,46 +245,47 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw s_inner "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
|
// std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
|
||||||
sDw.Report();
|
sDw.Report();
|
||||||
|
|
||||||
if(0){
|
|
||||||
for(int i=0;i< PerformanceCounter::NumTypes(); i++ ){
|
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
|
||||||
PerformanceCounter Counter(i);
|
|
||||||
Counter.Start();
|
|
||||||
sDw.Dhop(ssrc,sresult,0);
|
|
||||||
Counter.Stop();
|
|
||||||
Counter.Report();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage<< "res norms "<< norm2(result)<<" " <<norm2(sresult)<<std::endl;
|
|
||||||
|
|
||||||
RealD sum=0;
|
RealD sum=0;
|
||||||
for(int x=0;x<latt4[0];x++){
|
|
||||||
for(int y=0;y<latt4[1];y++){
|
|
||||||
for(int z=0;z<latt4[2];z++){
|
|
||||||
for(int t=0;t<latt4[3];t++){
|
|
||||||
for(int s=0;s<Ls;s++){
|
|
||||||
std::vector<int> site({s,x,y,z,t});
|
|
||||||
SpinColourVector normal, simd;
|
|
||||||
peekSite(normal,result,site);
|
|
||||||
peekSite(simd,sresult,site);
|
|
||||||
sum=sum+norm2(normal-simd);
|
|
||||||
if (norm2(normal-simd) > 1.0e-6 ) {
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" "<<norm2(normal-simd)<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" normal "<<normal<<std::endl;
|
|
||||||
std::cout << "site "<<x<<","<<y<<","<<z<<","<<t<<","<<s<<" simd "<<simd<<std::endl;
|
|
||||||
}
|
|
||||||
}}}}}
|
|
||||||
std::cout<<GridLogMessage<<" difference between normal and simd is "<<sum<<std::endl;
|
|
||||||
assert (sum< 1.0e-4 );
|
|
||||||
|
|
||||||
|
err=zero;
|
||||||
|
localConvert(sresult,err);
|
||||||
|
err = err - ref;
|
||||||
|
sum = norm2(err);
|
||||||
|
std::cout<<GridLogMessage<<" difference between normal ref and simd is "<<sum<<std::endl;
|
||||||
|
if(sum > 1.0e-4 ){
|
||||||
|
std::cout<< "sD REF\n " <<ref << std::endl;
|
||||||
|
std::cout<< "sD ERR \n " <<err <<std::endl;
|
||||||
|
}
|
||||||
|
// assert(sum < 1.0e-4);
|
||||||
|
|
||||||
if (1) {
|
err=zero;
|
||||||
|
localConvert(sresult,err);
|
||||||
|
err = err - result;
|
||||||
|
sum = norm2(err);
|
||||||
|
std::cout<<GridLogMessage<<" difference between normal result and simd is "<<sum<<std::endl;
|
||||||
|
if(sum > 1.0e-4 ){
|
||||||
|
std::cout<< "sD REF\n " <<result << std::endl;
|
||||||
|
std::cout<< "sD ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
|
assert(sum < 1.0e-4);
|
||||||
|
|
||||||
|
if(1){
|
||||||
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
|
||||||
|
if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
|
||||||
|
if ( sizeof(Real)==8 ) std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric )
|
||||||
|
std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll)
|
||||||
|
std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl;
|
||||||
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm )
|
||||||
|
std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
|
|
||||||
LatticeFermion sr_eo(sFGrid);
|
LatticeFermion sr_eo(sFGrid);
|
||||||
|
|
||||||
LatticeFermion ssrc_e (sFrbGrid);
|
LatticeFermion ssrc_e (sFrbGrid);
|
||||||
LatticeFermion ssrc_o (sFrbGrid);
|
LatticeFermion ssrc_o (sFrbGrid);
|
||||||
LatticeFermion sr_e (sFrbGrid);
|
LatticeFermion sr_e (sFrbGrid);
|
||||||
@ -257,33 +293,23 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
pickCheckerboard(Even,ssrc_e,ssrc);
|
pickCheckerboard(Even,ssrc_e,ssrc);
|
||||||
pickCheckerboard(Odd,ssrc_o,ssrc);
|
pickCheckerboard(Odd,ssrc_o,ssrc);
|
||||||
|
// setCheckerboard(sr_eo,ssrc_o);
|
||||||
setCheckerboard(sr_eo,ssrc_o);
|
// setCheckerboard(sr_eo,ssrc_e);
|
||||||
setCheckerboard(sr_eo,ssrc_e);
|
|
||||||
|
|
||||||
sr_e = zero;
|
sr_e = zero;
|
||||||
sr_o = zero;
|
sr_o = zero;
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "* Benchmarking WilsonFermion5D<DomainWallVec5dImplR>::DhopEO "<<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "* Vectorising fifth dimension by "<<vComplex::Nsimd()<<std::endl;
|
|
||||||
if ( sizeof(Real)==4 ) std::cout << GridLogMessage<< "* SINGLE precision "<<std::endl;
|
|
||||||
if ( sizeof(Real)==8 ) std::cout << GridLogMessage<< "* DOUBLE precision "<<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptGeneric ) std::cout << GridLogMessage<< "* Using GENERIC Nc WilsonKernels" <<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptHandUnroll) std::cout << GridLogMessage<< "* Using Nc=3 WilsonKernels" <<std::endl;
|
|
||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
|
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
|
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
||||||
sDw.ZeroCounters();
|
sDw.ZeroCounters();
|
||||||
sDw.stat.init("DhopEO");
|
// sDw.stat.init("DhopEO");
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for (int i = 0; i < ncall; i++) {
|
for (int i = 0; i < ncall; i++) {
|
||||||
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
sDw.DhopEO(ssrc_o, sr_e, DaggerNo);
|
||||||
}
|
}
|
||||||
double t1=usecond();
|
double t1=usecond();
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
sDw.stat.print();
|
// sDw.stat.print();
|
||||||
|
|
||||||
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
double volume=Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt4[mu];
|
||||||
double flops=(1344.0*volume*ncall)/2;
|
double flops=(1344.0*volume*ncall)/2;
|
||||||
@ -298,22 +324,26 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
pickCheckerboard(Even,ssrc_e,sresult);
|
pickCheckerboard(Even,ssrc_e,sresult);
|
||||||
pickCheckerboard(Odd ,ssrc_o,sresult);
|
pickCheckerboard(Odd ,ssrc_o,sresult);
|
||||||
|
|
||||||
ssrc_e = ssrc_e - sr_e;
|
ssrc_e = ssrc_e - sr_e;
|
||||||
RealD error = norm2(ssrc_e);
|
RealD error = norm2(ssrc_e);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm"<<norm2(sr_e) <<std::endl;
|
std::cout<<GridLogMessage << "sE norm diff "<< norm2(ssrc_e)<< " vec nrm"<<norm2(sr_e) <<std::endl;
|
||||||
ssrc_o = ssrc_o - sr_o;
|
|
||||||
|
|
||||||
|
ssrc_o = ssrc_o - sr_o;
|
||||||
error+= norm2(ssrc_o);
|
error+= norm2(ssrc_o);
|
||||||
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm"<<norm2(sr_o) <<std::endl;
|
std::cout<<GridLogMessage << "sO norm diff "<< norm2(ssrc_o)<< " vec nrm"<<norm2(sr_o) <<std::endl;
|
||||||
if(error>1.0e-4) {
|
|
||||||
|
if(( error>1.0e-4) ) {
|
||||||
setCheckerboard(ssrc,ssrc_o);
|
setCheckerboard(ssrc,ssrc_o);
|
||||||
setCheckerboard(ssrc,ssrc_e);
|
setCheckerboard(ssrc,ssrc_e);
|
||||||
std::cout<< ssrc << std::endl;
|
std::cout<< "DIFF\n " <<ssrc << std::endl;
|
||||||
|
setCheckerboard(ssrc,sr_o);
|
||||||
|
setCheckerboard(ssrc,sr_e);
|
||||||
|
std::cout<< "CBRESULT\n " <<ssrc << std::endl;
|
||||||
|
std::cout<< "RESULT\n " <<sresult<< std::endl;
|
||||||
}
|
}
|
||||||
|
assert(error<1.0e-4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
@ -321,28 +351,33 @@ int main (int argc, char ** argv)
|
|||||||
ref = zero;
|
ref = zero;
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
|
||||||
// ref = src - Gamma(Gamma::GammaX)* src ; // 1+gamma_x
|
// ref = src - Gamma(Gamma::Algebra::GammaX)* src ; // 1+gamma_x
|
||||||
tmp = U[mu]*Cshift(src,mu+1,1);
|
tmp = U[mu]*Cshift(src,mu+1,1);
|
||||||
for(int i=0;i<ref._odata.size();i++){
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmp =adj(U[mu])*src;
|
tmp =adj(U[mu])*src;
|
||||||
tmp =Cshift(tmp,mu+1,-1);
|
tmp =Cshift(tmp,mu+1,-1);
|
||||||
for(int i=0;i<ref._odata.size();i++){
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ref = -0.5*ref;
|
ref = -0.5*ref;
|
||||||
}
|
}
|
||||||
|
// dump=1;
|
||||||
Dw.Dhop(src,result,1);
|
Dw.Dhop(src,result,1);
|
||||||
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||||
assert(norm2(err)<1.0e-4);
|
if((norm2(err)>1.0e-4)){
|
||||||
|
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
||||||
|
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
||||||
|
std::cout<< "DAG ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
LatticeFermion src_e (FrbGrid);
|
LatticeFermion src_e (FrbGrid);
|
||||||
LatticeFermion src_o (FrbGrid);
|
LatticeFermion src_o (FrbGrid);
|
||||||
LatticeFermion r_e (FrbGrid);
|
LatticeFermion r_e (FrbGrid);
|
||||||
@ -350,13 +385,18 @@ int main (int argc, char ** argv)
|
|||||||
LatticeFermion r_eo (FGrid);
|
LatticeFermion r_eo (FGrid);
|
||||||
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Calling Deo and Doe and assert Deo+Doe == Dunprec"<<std::endl;
|
std::cout<<GridLogMessage << "Calling Deo and Doe and //assert Deo+Doe == Dunprec"<<std::endl;
|
||||||
pickCheckerboard(Even,src_e,src);
|
pickCheckerboard(Even,src_e,src);
|
||||||
pickCheckerboard(Odd,src_o,src);
|
pickCheckerboard(Odd,src_o,src);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "src_e"<<norm2(src_e)<<std::endl;
|
std::cout<<GridLogMessage << "src_e"<<norm2(src_e)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "src_o"<<norm2(src_o)<<std::endl;
|
std::cout<<GridLogMessage << "src_o"<<norm2(src_o)<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
static int Opt; // these are a temporary hack
|
||||||
|
static int Comms; // these are a temporary hack
|
||||||
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionR::DhopEO "<<std::endl;
|
std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionR::DhopEO "<<std::endl;
|
||||||
std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
|
std::cout << GridLogMessage<< "* Vectorising space-time by "<<vComplex::Nsimd()<<std::endl;
|
||||||
@ -369,6 +409,7 @@ int main (int argc, char ** argv)
|
|||||||
{
|
{
|
||||||
Dw.ZeroCounters();
|
Dw.ZeroCounters();
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
for(int i=0;i<ncall;i++){
|
for(int i=0;i<ncall;i++){
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
@ -396,14 +437,19 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
err = r_eo-result;
|
err = r_eo-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
assert(norm2(err)<1.0e-4);
|
if((norm2(err)>1.0e-4)){
|
||||||
|
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
||||||
|
std::cout<< "Deo REF\n " <<result << std::endl;
|
||||||
|
std::cout<< "Deo ERR \n " << err <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
pickCheckerboard(Even,src_e,err);
|
pickCheckerboard(Even,src_e,err);
|
||||||
pickCheckerboard(Odd,src_o,err);
|
pickCheckerboard(Odd,src_o,err);
|
||||||
std::cout<<GridLogMessage << "norm diff even "<< norm2(src_e)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff even "<< norm2(src_e)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm diff odd "<< norm2(src_o)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff odd "<< norm2(src_o)<<std::endl;
|
||||||
assert(norm2(src_e)<1.0e-4);
|
|
||||||
assert(norm2(src_o)<1.0e-4);
|
//assert(norm2(src_e)<1.0e-4);
|
||||||
|
//assert(norm2(src_o)<1.0e-4);
|
||||||
|
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
}
|
}
|
||||||
|
@ -37,11 +37,11 @@ struct scal {
|
|||||||
d internal;
|
d internal;
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::GammaMatrix Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
void benchDw(std::vector<int> & L, int Ls, int threads, int report =0 );
|
void benchDw(std::vector<int> & L, int Ls, int threads, int report =0 );
|
||||||
|
@ -77,8 +77,7 @@ int main (int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
double start=usecond();
|
double start=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int t=0;t<threads;t++){
|
||||||
for(int t=0;t<threads;t++){
|
|
||||||
|
|
||||||
sum[t] = x[t]._odata[0];
|
sum[t] = x[t]._odata[0];
|
||||||
for(int i=0;i<Nloop;i++){
|
for(int i=0;i<Nloop;i++){
|
||||||
|
134
benchmarks/Benchmark_staggered.cc
Normal file
134
benchmarks/Benchmark_staggered.cc
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./benchmarks/Benchmark_staggered.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
using namespace Grid::QCD;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridRedBlackCartesian RBGrid(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
|
int threads = GridThread::GetThreads();
|
||||||
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REALF"<< sizeof(RealF)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REALD"<< sizeof(RealD)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "Grid floating point word size is REAL"<< sizeof(Real)<<std::endl;
|
||||||
|
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
GridParallelRNG pRNG(&Grid);
|
||||||
|
pRNG.SeedFixedIntegers(seeds);
|
||||||
|
// pRNG.SeedRandomDevice();
|
||||||
|
|
||||||
|
typedef typename ImprovedStaggeredFermionR::FermionField FermionField;
|
||||||
|
typename ImprovedStaggeredFermionR::ImplParams params;
|
||||||
|
|
||||||
|
FermionField src (&Grid); random(pRNG,src);
|
||||||
|
FermionField result(&Grid); result=zero;
|
||||||
|
FermionField ref(&Grid); ref=zero;
|
||||||
|
FermionField tmp(&Grid); tmp=zero;
|
||||||
|
FermionField err(&Grid); tmp=zero;
|
||||||
|
LatticeGaugeField Umu(&Grid); random(pRNG,Umu);
|
||||||
|
std::vector<LatticeColourMatrix> U(4,&Grid);
|
||||||
|
|
||||||
|
double volume=1;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
volume=volume*latt_size[mu];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only one non-zero (y)
|
||||||
|
#if 0
|
||||||
|
Umu=zero;
|
||||||
|
Complex cone(1.0,0.0);
|
||||||
|
for(int nn=0;nn<Nd;nn++){
|
||||||
|
random(pRNG,U[nn]);
|
||||||
|
if(1) {
|
||||||
|
if (nn!=2) { U[nn]=zero; std::cout<<GridLogMessage << "zeroing gauge field in dir "<<nn<<std::endl; }
|
||||||
|
// else { U[nn]= cone;std::cout<<GridLogMessage << "unit gauge field in dir "<<nn<<std::endl; }
|
||||||
|
else { std::cout<<GridLogMessage << "random gauge field in dir "<<nn<<std::endl; }
|
||||||
|
}
|
||||||
|
PokeIndex<LorentzIndex>(Umu,U[nn],nn);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||||
|
}
|
||||||
|
ref = zero;
|
||||||
|
/*
|
||||||
|
{ // Naive wilson implementation
|
||||||
|
ref = zero;
|
||||||
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
// ref = src + Gamma(Gamma::GammaX)* src ; // 1-gamma_x
|
||||||
|
tmp = U[mu]*Cshift(src,mu,1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp =adj(U[mu])*src;
|
||||||
|
tmp =Cshift(tmp,mu,-1);
|
||||||
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
|
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ref = -0.5*ref;
|
||||||
|
*/
|
||||||
|
|
||||||
|
RealD mass=0.1;
|
||||||
|
RealD c1=9.0/8.0;
|
||||||
|
RealD c2=-1.0/24.0;
|
||||||
|
RealD u0=1.0;
|
||||||
|
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "Calling Ds"<<std::endl;
|
||||||
|
int ncall=1000;
|
||||||
|
double t0=usecond();
|
||||||
|
for(int i=0;i<ncall;i++){
|
||||||
|
Ds.Dhop(src,result,0);
|
||||||
|
}
|
||||||
|
double t1=usecond();
|
||||||
|
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
|
err = ref-result;
|
||||||
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
@ -37,11 +37,11 @@ struct scal {
|
|||||||
d internal;
|
d internal;
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::GammaMatrix Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
bool overlapComms = false;
|
bool overlapComms = false;
|
||||||
@ -106,7 +106,7 @@ int main (int argc, char ** argv)
|
|||||||
{ // Naive wilson implementation
|
{ // Naive wilson implementation
|
||||||
ref = zero;
|
ref = zero;
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
// ref = src + Gamma(Gamma::GammaX)* src ; // 1-gamma_x
|
// ref = src + Gamma(Gamma::Algebra::GammaX)* src ; // 1-gamma_x
|
||||||
tmp = U[mu]*Cshift(src,mu,1);
|
tmp = U[mu]*Cshift(src,mu,1);
|
||||||
for(int i=0;i<ref._odata.size();i++){
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
ref._odata[i]+= tmp._odata[i] - Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
@ -159,7 +159,7 @@ int main (int argc, char ** argv)
|
|||||||
ref = zero;
|
ref = zero;
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
|
|
||||||
// ref = src - Gamma(Gamma::GammaX)* src ; // 1+gamma_x
|
// ref = src - Gamma(Gamma::Algebra::GammaX)* src ; // 1+gamma_x
|
||||||
tmp = U[mu]*Cshift(src,mu,1);
|
tmp = U[mu]*Cshift(src,mu,1);
|
||||||
for(int i=0;i<ref._odata.size();i++){
|
for(int i=0;i<ref._odata.size();i++){
|
||||||
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
ref._odata[i]+= tmp._odata[i] + Gamma(Gmu[mu])*tmp._odata[i]; ;
|
||||||
|
@ -30,11 +30,11 @@ struct scal {
|
|||||||
d internal;
|
d internal;
|
||||||
};
|
};
|
||||||
|
|
||||||
Gamma::GammaMatrix Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
bool overlapComms = false;
|
bool overlapComms = false;
|
||||||
|
@ -6,7 +6,7 @@ AC_CANONICAL_TARGET
|
|||||||
AM_INIT_AUTOMAKE(subdir-objects)
|
AM_INIT_AUTOMAKE(subdir-objects)
|
||||||
AC_CONFIG_MACRO_DIR([m4])
|
AC_CONFIG_MACRO_DIR([m4])
|
||||||
AC_CONFIG_SRCDIR([lib/Grid.h])
|
AC_CONFIG_SRCDIR([lib/Grid.h])
|
||||||
AC_CONFIG_HEADERS([lib/Config.h])
|
AC_CONFIG_HEADERS([lib/Config.h],[sed -i 's|PACKAGE_|GRID_|' lib/Config.h])
|
||||||
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
|
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
|
||||||
|
|
||||||
############### Checks for programs
|
############### Checks for programs
|
||||||
@ -319,7 +319,7 @@ AM_CONDITIONAL(BUILD_COMMS_MPI3L, [ test "${comms_type}X" == "mpi3lX" ] )
|
|||||||
AM_CONDITIONAL(BUILD_COMMS_NONE, [ test "${comms_type}X" == "noneX" ])
|
AM_CONDITIONAL(BUILD_COMMS_NONE, [ test "${comms_type}X" == "noneX" ])
|
||||||
|
|
||||||
############### RNG selection
|
############### RNG selection
|
||||||
AC_ARG_ENABLE([rng],[AC_HELP_STRING([--enable-rng=ranlux48|mt19937],\
|
AC_ARG_ENABLE([rng],[AC_HELP_STRING([--enable-rng=ranlux48|mt19937|sitmo],\
|
||||||
[Select Random Number Generator to be used])],\
|
[Select Random Number Generator to be used])],\
|
||||||
[ac_RNG=${enable_rng}],[ac_RNG=ranlux48])
|
[ac_RNG=${enable_rng}],[ac_RNG=ranlux48])
|
||||||
|
|
||||||
@ -330,6 +330,9 @@ case ${ac_RNG} in
|
|||||||
mt19937)
|
mt19937)
|
||||||
AC_DEFINE([RNG_MT19937],[1],[RNG_MT19937] )
|
AC_DEFINE([RNG_MT19937],[1],[RNG_MT19937] )
|
||||||
;;
|
;;
|
||||||
|
sitmo)
|
||||||
|
AC_DEFINE([RNG_SITMO],[1],[RNG_SITMO] )
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
AC_MSG_ERROR([${ac_RNG} unsupported --enable-rng option]);
|
AC_MSG_ERROR([${ac_RNG} unsupported --enable-rng option]);
|
||||||
;;
|
;;
|
||||||
|
@ -160,6 +160,15 @@ std::string typeName(void)
|
|||||||
return typeName(typeIdPt<T>());
|
return typeName(typeIdPt<T>());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// default writers/readers
|
||||||
|
#ifdef HAVE_HDF5
|
||||||
|
typedef Hdf5Reader CorrReader;
|
||||||
|
typedef Hdf5Writer CorrWriter;
|
||||||
|
#else
|
||||||
|
typedef XmlReader CorrReader;
|
||||||
|
typedef XmlWriter CorrWriter;
|
||||||
|
#endif
|
||||||
|
|
||||||
END_HADRONS_NAMESPACE
|
END_HADRONS_NAMESPACE
|
||||||
|
|
||||||
#endif // Hadrons_Global_hpp_
|
#endif // Hadrons_Global_hpp_
|
||||||
|
@ -45,9 +45,11 @@ class MesonPar: Serializable
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonPar,
|
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonPar,
|
||||||
std::string, q1,
|
std::string, q1,
|
||||||
std::string, q2,
|
std::string, q2,
|
||||||
std::string, output);
|
std::string, output,
|
||||||
|
Gamma::Algebra, gammaSource,
|
||||||
|
Gamma::Algebra, gammaSink);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename FImpl1, typename FImpl2>
|
template <typename FImpl1, typename FImpl2>
|
||||||
@ -59,8 +61,7 @@ public:
|
|||||||
class Result: Serializable
|
class Result: Serializable
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
|
GRID_SERIALIZABLE_CLASS_MEMBERS(Result, std::vector<Complex>, corr);
|
||||||
std::vector<std::vector<std::vector<Complex>>>, corr);
|
|
||||||
};
|
};
|
||||||
public:
|
public:
|
||||||
// constructor
|
// constructor
|
||||||
@ -114,29 +115,17 @@ void TMeson<FImpl1, FImpl2>::execute(void)
|
|||||||
PropagatorField1 &q1 = *env().template getObject<PropagatorField1>(par().q1);
|
PropagatorField1 &q1 = *env().template getObject<PropagatorField1>(par().q1);
|
||||||
PropagatorField2 &q2 = *env().template getObject<PropagatorField2>(par().q2);
|
PropagatorField2 &q2 = *env().template getObject<PropagatorField2>(par().q2);
|
||||||
LatticeComplex c(env().getGrid());
|
LatticeComplex c(env().getGrid());
|
||||||
SpinMatrix g[Ns*Ns], g5;
|
Gamma gSrc(par().gammaSource), gSnk(par().gammaSink);
|
||||||
|
Gamma g5(Gamma::Algebra::Gamma5);
|
||||||
std::vector<TComplex> buf;
|
std::vector<TComplex> buf;
|
||||||
Result result;
|
Result result;
|
||||||
|
|
||||||
g5 = makeGammaProd(Ns*Ns - 1);
|
c = trace(gSnk*q1*adj(gSrc)*g5*adj(q2)*g5);
|
||||||
result.corr.resize(Ns*Ns);
|
sliceSum(c, buf, Tp);
|
||||||
for (unsigned int i = 0; i < Ns*Ns; ++i)
|
result.corr.resize(buf.size());
|
||||||
|
for (unsigned int t = 0; t < buf.size(); ++t)
|
||||||
{
|
{
|
||||||
g[i] = makeGammaProd(i);
|
result.corr[t] = TensorRemove(buf[t]);
|
||||||
}
|
|
||||||
for (unsigned int iSink = 0; iSink < Ns*Ns; ++iSink)
|
|
||||||
{
|
|
||||||
result.corr[iSink].resize(Ns*Ns);
|
|
||||||
for (unsigned int iSrc = 0; iSrc < Ns*Ns; ++iSrc)
|
|
||||||
{
|
|
||||||
c = trace(g[iSink]*q1*g[iSrc]*g5*adj(q2)*g5);
|
|
||||||
sliceSum(c, buf, Tp);
|
|
||||||
result.corr[iSink][iSrc].resize(buf.size());
|
|
||||||
for (unsigned int t = 0; t < buf.size(); ++t)
|
|
||||||
{
|
|
||||||
result.corr[iSink][iSrc][t] = TensorRemove(buf[t]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
write(writer, "meson", result);
|
write(writer, "meson", result);
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ void TChargedProp::execute(void)
|
|||||||
LOG(Message) << "Saving zero-momentum projection to '"
|
LOG(Message) << "Saving zero-momentum projection to '"
|
||||||
<< filename << "'..." << std::endl;
|
<< filename << "'..." << std::endl;
|
||||||
|
|
||||||
Hdf5Writer writer(filename);
|
CorrWriter writer(filename);
|
||||||
std::vector<TComplex> vecBuf;
|
std::vector<TComplex> vecBuf;
|
||||||
std::vector<Complex> result;
|
std::vector<Complex> result;
|
||||||
|
|
||||||
|
@ -60,11 +60,11 @@ class SeqGammaPar: Serializable
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
GRID_SERIALIZABLE_CLASS_MEMBERS(SeqGammaPar,
|
GRID_SERIALIZABLE_CLASS_MEMBERS(SeqGammaPar,
|
||||||
std::string, q,
|
std::string, q,
|
||||||
unsigned int, tA,
|
unsigned int, tA,
|
||||||
unsigned int, tB,
|
unsigned int, tB,
|
||||||
unsigned int, gamma,
|
Gamma::Algebra, gamma,
|
||||||
std::string, mom);
|
std::string, mom);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename FImpl>
|
template <typename FImpl>
|
||||||
@ -140,11 +140,10 @@ void TSeqGamma<FImpl>::execute(void)
|
|||||||
PropagatorField &q = *env().template getObject<PropagatorField>(par().q);
|
PropagatorField &q = *env().template getObject<PropagatorField>(par().q);
|
||||||
Lattice<iScalar<vInteger>> t(env().getGrid());
|
Lattice<iScalar<vInteger>> t(env().getGrid());
|
||||||
LatticeComplex ph(env().getGrid()), coor(env().getGrid());
|
LatticeComplex ph(env().getGrid()), coor(env().getGrid());
|
||||||
SpinMatrix g;
|
Gamma g(par().gamma);
|
||||||
std::vector<Real> p;
|
std::vector<Real> p;
|
||||||
Complex i(0.0,1.0);
|
Complex i(0.0,1.0);
|
||||||
|
|
||||||
g = makeGammaProd(par().gamma);
|
|
||||||
p = strToVec<Real>(par().mom);
|
p = strToVec<Real>(par().mom);
|
||||||
ph = zero;
|
ph = zero;
|
||||||
for(unsigned int mu = 0; mu < env().getNd(); mu++)
|
for(unsigned int mu = 0; mu < env().getNd(); mu++)
|
||||||
@ -154,7 +153,7 @@ void TSeqGamma<FImpl>::execute(void)
|
|||||||
}
|
}
|
||||||
ph = exp(i*ph);
|
ph = exp(i*ph);
|
||||||
LatticeCoordinate(t, Tp);
|
LatticeCoordinate(t, Tp);
|
||||||
src = where((t >= par().tA) and (t <= par().tB), g*ph*q, 0.*q);
|
src = where((t >= par().tA) and (t <= par().tB), ph*(g*q), 0.*q);
|
||||||
}
|
}
|
||||||
|
|
||||||
END_MODULE_NAMESPACE
|
END_MODULE_NAMESPACE
|
||||||
|
52
lib/Grid.h
52
lib/Grid.h
@ -38,52 +38,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_H
|
#ifndef GRID_H
|
||||||
#define GRID_H
|
#define GRID_H
|
||||||
|
|
||||||
///////////////////
|
#include <Grid/GridCore.h>
|
||||||
// Std C++ dependencies
|
#include <Grid/GridQCDcore.h>
|
||||||
///////////////////
|
#include <Grid/qcd/action/Action.h>
|
||||||
#include <cassert>
|
#include <Grid/qcd/smearing/Smearing.h>
|
||||||
#include <complex>
|
#include <Grid/qcd/hmc/HMC_aggregate.h>
|
||||||
#include <vector>
|
|
||||||
#include <iostream>
|
|
||||||
#include <iomanip>
|
|
||||||
#include <random>
|
|
||||||
#include <functional>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#include <ctime>
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <chrono>
|
|
||||||
|
|
||||||
///////////////////
|
|
||||||
// Grid headers
|
|
||||||
///////////////////
|
|
||||||
#include "Config.h"
|
|
||||||
#include <Grid/Timer.h>
|
|
||||||
#include <Grid/PerfCount.h>
|
|
||||||
#include <Grid/Log.h>
|
|
||||||
#include <Grid/AlignedAllocator.h>
|
|
||||||
#include <Grid/Simd.h>
|
|
||||||
#include <Grid/serialisation/Serialisation.h>
|
|
||||||
#include <Grid/Threads.h>
|
|
||||||
#include <Grid/Lexicographic.h>
|
|
||||||
#include <Grid/Init.h>
|
|
||||||
#include <Grid/Communicator.h>
|
|
||||||
#include <Grid/Cartesian.h>
|
|
||||||
#include <Grid/Tensors.h>
|
|
||||||
#include <Grid/Lattice.h>
|
|
||||||
#include <Grid/Cshift.h>
|
|
||||||
#include <Grid/Stencil.h>
|
|
||||||
#include <Grid/Algorithms.h>
|
|
||||||
#include <Grid/parallelIO/BinaryIO.h>
|
|
||||||
#include <Grid/FFT.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/QCD.h>
|
|
||||||
#include <Grid/parallelIO/NerscIO.h>
|
|
||||||
#include <Grid/qcd/hmc/NerscCheckpointer.h>
|
|
||||||
#include <Grid/qcd/hmc/HmcRunner.h>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
81
lib/GridCore.h
Normal file
81
lib/GridCore.h
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/Grid.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
//
|
||||||
|
// Grid.h
|
||||||
|
// simd
|
||||||
|
//
|
||||||
|
// Created by Peter Boyle on 09/05/2014.
|
||||||
|
// Copyright (c) 2014 University of Edinburgh. All rights reserved.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef GRID_BASE_H
|
||||||
|
#define GRID_BASE_H
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
// Std C++ dependencies
|
||||||
|
///////////////////
|
||||||
|
#include <cassert>
|
||||||
|
#include <complex>
|
||||||
|
#include <vector>
|
||||||
|
#include <iostream>
|
||||||
|
#include <iomanip>
|
||||||
|
#include <random>
|
||||||
|
#include <functional>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <ctime>
|
||||||
|
#include <sys/time.h>
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
// Grid headers
|
||||||
|
///////////////////
|
||||||
|
#include "Config.h"
|
||||||
|
|
||||||
|
#include <Grid/perfmon/Timer.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
#include <Grid/log/Log.h>
|
||||||
|
#include <Grid/allocator/AlignedAllocator.h>
|
||||||
|
#include <Grid/simd/Simd.h>
|
||||||
|
#include <Grid/serialisation/Serialisation.h>
|
||||||
|
#include <Grid/threads/Threads.h>
|
||||||
|
#include <Grid/util/Util.h>
|
||||||
|
#include <Grid/communicator/Communicator.h>
|
||||||
|
#include <Grid/cartesian/Cartesian.h>
|
||||||
|
#include <Grid/tensors/Tensors.h>
|
||||||
|
#include <Grid/lattice/Lattice.h>
|
||||||
|
#include <Grid/cshift/Cshift.h>
|
||||||
|
#include <Grid/stencil/Stencil.h>
|
||||||
|
#include <Grid/parallelIO/BinaryIO.h>
|
||||||
|
#include <Grid/algorithms/Algorithms.h>
|
||||||
|
|
||||||
|
#endif
|
@ -2,12 +2,12 @@
|
|||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/qcd/hmc/HMC.cc
|
Source file: ./lib/Grid.h
|
||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: neo <cossu@post.kek.jp>
|
Author: azusayamaguchi <ayamaguc@YAMAKAZE.local>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
@ -27,10 +27,16 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid.h>
|
#ifndef GRID_QCD_CORE_H
|
||||||
|
#define GRID_QCD_CORE_H
|
||||||
|
|
||||||
namespace Grid{
|
/////////////////////////
|
||||||
namespace QCD{
|
// Core Grid QCD headers
|
||||||
|
/////////////////////////
|
||||||
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/qcd/QCD.h>
|
||||||
|
#include <Grid/qcd/spin/Spin.h>
|
||||||
|
#include <Grid/qcd/utils/Utils.h>
|
||||||
|
#include <Grid/qcd/representations/Representations.h>
|
||||||
|
|
||||||
}
|
#endif
|
||||||
}
|
|
Binary file not shown.
@ -1,154 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/Old/Tensor_peek.h
|
|
||||||
|
|
||||||
Copyright (C) 2015
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#ifndef GRID_MATH_PEEK_H
|
|
||||||
#define GRID_MATH_PEEK_H
|
|
||||||
namespace Grid {
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Peek on a specific index; returns a scalar in that index, tensor inherits rest
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// If we hit the right index, return scalar with no further recursion
|
|
||||||
|
|
||||||
//template<int Level> inline ComplexF peekIndex(const ComplexF arg) { return arg;}
|
|
||||||
//template<int Level> inline ComplexD peekIndex(const ComplexD arg) { return arg;}
|
|
||||||
//template<int Level> inline RealF peekIndex(const RealF arg) { return arg;}
|
|
||||||
//template<int Level> inline RealD peekIndex(const RealD arg) { return arg;}
|
|
||||||
#if 0
|
|
||||||
// Scalar peek, no indices
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg) -> iScalar<vtype>
|
|
||||||
{
|
|
||||||
return arg;
|
|
||||||
}
|
|
||||||
// Vector peek, one index
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i) -> iScalar<vtype> // Index matches
|
|
||||||
{
|
|
||||||
iScalar<vtype> ret; // return scalar
|
|
||||||
ret._internal = arg._internal[i];
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
// Matrix peek, two indices
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) -> iScalar<vtype>
|
|
||||||
{
|
|
||||||
iScalar<vtype> ret; // return scalar
|
|
||||||
ret._internal = arg._internal[i][j];
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// No match peek for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
|
|
||||||
/////////////
|
|
||||||
// scalar
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg) -> iScalar<decltype(peekIndex<Level>(arg._internal))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal))> ret;
|
|
||||||
ret._internal= peekIndex<Level>(arg._internal);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg,int i) -> iScalar<decltype(peekIndex<Level>(arg._internal,i))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal,i))> ret;
|
|
||||||
ret._internal=peekIndex<Level>(arg._internal,i);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iScalar<vtype> &arg,int i,int j) -> iScalar<decltype(peekIndex<Level>(arg._internal,i,j))>
|
|
||||||
{
|
|
||||||
iScalar<decltype(peekIndex<Level>(arg._internal,i,j))> ret;
|
|
||||||
ret._internal=peekIndex<Level>(arg._internal,i,j);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
// vector
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg) -> iVector<decltype(peekIndex<Level>(arg._internal[0])),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0])),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii]);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i) -> iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0],i)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iVector<vtype,N> &arg,int i,int j) -> iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N>
|
|
||||||
{
|
|
||||||
iVector<decltype(peekIndex<Level>(arg._internal[0],i,j)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
ret._internal[ii]=peekIndex<Level>(arg._internal[ii],i,j);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
// matrix
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0])),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj]);// Could avoid this because peeking a scalar is dumb
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i);
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N, typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
auto peekIndex(const iMatrix<vtype,N> &arg,int i,int j) -> iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N>
|
|
||||||
{
|
|
||||||
iMatrix<decltype(peekIndex<Level>(arg._internal[0][0],i,j)),N> ret;
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
ret._internal[ii][jj]=peekIndex<Level>(arg._internal[ii][jj],i,j);
|
|
||||||
}}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,127 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/Old/Tensor_poke.h
|
|
||||||
|
|
||||||
Copyright (C) 2015
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#ifndef GRID_MATH_POKE_H
|
|
||||||
#define GRID_MATH_POKE_H
|
|
||||||
namespace Grid {
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Poke a specific index;
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
#if 0
|
|
||||||
// Scalar poke
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<vtype> &arg)
|
|
||||||
{
|
|
||||||
ret._internal = arg._internal;
|
|
||||||
}
|
|
||||||
// Vector poke, one index
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iScalar<vtype> &arg,int i)
|
|
||||||
{
|
|
||||||
ret._internal[i] = arg._internal;
|
|
||||||
}
|
|
||||||
//Matrix poke, two indices
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel == Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iScalar<vtype> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
ret._internal[i][j] = arg._internal;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////
|
|
||||||
// No match poke for scalar,vector,matrix must forward on either 0,1,2 args. Must have 9 routines with notvalue
|
|
||||||
/////////////
|
|
||||||
// scalar
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal))> &arg)
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal);
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0))> &arg, int i)
|
|
||||||
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal,i);
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iScalar<vtype> &ret, const iScalar<decltype(peekIndex<Level>(ret._internal,0,0))> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
pokeIndex<Level>(ret._internal,arg._internal,i,j);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Vector
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, iVector<decltype(peekIndex<Level>(ret._internal)),N> &arg)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg,int i,int j)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii],arg._internal[ii],i,j);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matrix
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal)),N> &arg)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj]);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0)),N> &arg,int i)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
template<int Level,class vtype,int N,typename std::enable_if< iScalar<vtype>::TensorLevel != Level >::type * =nullptr> inline
|
|
||||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(peekIndex<Level>(ret._internal,0,0)),N> &arg, int i,int j)
|
|
||||||
{
|
|
||||||
for(int ii=0;ii<N;ii++){
|
|
||||||
for(int jj=0;jj<N;jj++){
|
|
||||||
pokeIndex<Level>(ret._internal[ii][jj],arg._internal[ii][jj],i,j);
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -42,15 +42,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
||||||
#include <Grid/algorithms/iterative/NormalEquations.h>
|
#include <Grid/algorithms/iterative/NormalEquations.h>
|
||||||
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
#include <Grid/algorithms/iterative/SchurRedBlack.h>
|
||||||
|
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
||||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
||||||
|
|
||||||
// Lanczos support
|
// Lanczos support
|
||||||
#include <Grid/algorithms/iterative/MatrixUtils.h>
|
#include <Grid/algorithms/iterative/MatrixUtils.h>
|
||||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||||
|
|
||||||
#include <Grid/algorithms/CoarsenedMatrix.h>
|
#include <Grid/algorithms/CoarsenedMatrix.h>
|
||||||
|
#include <Grid/algorithms/FFT.h>
|
||||||
|
|
||||||
// Eigen/lanczos
|
// Eigen/lanczos
|
||||||
// EigCg
|
// EigCg
|
@ -267,8 +267,7 @@ namespace Grid {
|
|||||||
SimpleCompressor<siteVector> compressor;
|
SimpleCompressor<siteVector> compressor;
|
||||||
Stencil.HaloExchange(in,compressor);
|
Stencil.HaloExchange(in,compressor);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<Grid()->oSites();ss++){
|
||||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
|
||||||
siteVector res = zero;
|
siteVector res = zero;
|
||||||
siteVector nbr;
|
siteVector nbr;
|
||||||
int ptype;
|
int ptype;
|
||||||
@ -380,8 +379,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Subspace.ProjectToSubspace(oProj,oblock);
|
Subspace.ProjectToSubspace(oProj,oblock);
|
||||||
// blockProject(iProj,iblock,Subspace.subspace);
|
// blockProject(iProj,iblock,Subspace.subspace);
|
||||||
// blockProject(oProj,oblock,Subspace.subspace);
|
// blockProject(oProj,oblock,Subspace.subspace);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<Grid()->oSites();ss++){
|
||||||
for(int ss=0;ss<Grid()->oSites();ss++){
|
|
||||||
for(int j=0;j<nbasis;j++){
|
for(int j=0;j<nbasis;j++){
|
||||||
if( disp!= 0 ) {
|
if( disp!= 0 ) {
|
||||||
A[p]._odata[ss](j,i) = oProj._odata[ss](j);
|
A[p]._odata[ss](j,i) = oProj._odata[ss](j);
|
||||||
|
@ -25,7 +25,7 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
double MultiShiftFunction::approx(double x)
|
double MultiShiftFunction::approx(double x)
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
#include<iomanip>
|
#include<iomanip>
|
||||||
#include<cassert>
|
#include<cassert>
|
||||||
|
|
||||||
#include<algorithms/approx/Remez.h>
|
#include<Grid/algorithms/approx/Remez.h>
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
AlgRemez::AlgRemez(double lower, double upper, long precision)
|
AlgRemez::AlgRemez(double lower, double upper, long precision)
|
||||||
|
@ -45,6 +45,8 @@ class ConjugateGradient : public OperatorFunction<Field> {
|
|||||||
// Defaults true.
|
// Defaults true.
|
||||||
RealD Tolerance;
|
RealD Tolerance;
|
||||||
Integer MaxIterations;
|
Integer MaxIterations;
|
||||||
|
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
|
||||||
|
|
||||||
ConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true)
|
ConjugateGradient(RealD tol, Integer maxit, bool err_on_no_conv = true)
|
||||||
: Tolerance(tol),
|
: Tolerance(tol),
|
||||||
MaxIterations(maxit),
|
MaxIterations(maxit),
|
||||||
@ -155,13 +157,14 @@ class ConjugateGradient : public OperatorFunction<Field> {
|
|||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
|
||||||
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
|
if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
|
||||||
|
IterationsToComplete = k;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << GridLogMessage << "ConjugateGradient did NOT converge"
|
std::cout << GridLogMessage << "ConjugateGradient did NOT converge"
|
||||||
<< std::endl;
|
<< std::endl;
|
||||||
if (ErrorOnNoConverge) assert(0);
|
if (ErrorOnNoConverge) assert(0);
|
||||||
|
IterationsToComplete = k;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ namespace Grid {
|
|||||||
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
|
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
|
||||||
public:
|
public:
|
||||||
RealD Tolerance;
|
RealD Tolerance;
|
||||||
|
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||||
Integer MaxInnerIterations;
|
Integer MaxInnerIterations;
|
||||||
Integer MaxOuterIterations;
|
Integer MaxOuterIterations;
|
||||||
GridBase* SinglePrecGrid; //Grid for single-precision fields
|
GridBase* SinglePrecGrid; //Grid for single-precision fields
|
||||||
@ -42,12 +43,16 @@ namespace Grid {
|
|||||||
LinearOperatorBase<FieldF> &Linop_f;
|
LinearOperatorBase<FieldF> &Linop_f;
|
||||||
LinearOperatorBase<FieldD> &Linop_d;
|
LinearOperatorBase<FieldD> &Linop_d;
|
||||||
|
|
||||||
|
Integer TotalInnerIterations; //Number of inner CG iterations
|
||||||
|
Integer TotalOuterIterations; //Number of restarts
|
||||||
|
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
|
||||||
|
|
||||||
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
|
//Option to speed up *inner single precision* solves using a LinearFunction that produces a guess
|
||||||
LinearFunction<FieldF> *guesser;
|
LinearFunction<FieldF> *guesser;
|
||||||
|
|
||||||
MixedPrecisionConjugateGradient(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d) :
|
MixedPrecisionConjugateGradient(RealD tol, Integer maxinnerit, Integer maxouterit, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d) :
|
||||||
Linop_f(_Linop_f), Linop_d(_Linop_d),
|
Linop_f(_Linop_f), Linop_d(_Linop_d),
|
||||||
Tolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
|
Tolerance(tol), InnerTolerance(tol), MaxInnerIterations(maxinnerit), MaxOuterIterations(maxouterit), SinglePrecGrid(_sp_grid),
|
||||||
OuterLoopNormMult(100.), guesser(NULL){ };
|
OuterLoopNormMult(100.), guesser(NULL){ };
|
||||||
|
|
||||||
void useGuesser(LinearFunction<FieldF> &g){
|
void useGuesser(LinearFunction<FieldF> &g){
|
||||||
@ -55,6 +60,8 @@ namespace Grid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void operator() (const FieldD &src_d_in, FieldD &sol_d){
|
void operator() (const FieldD &src_d_in, FieldD &sol_d){
|
||||||
|
TotalInnerIterations = 0;
|
||||||
|
|
||||||
GridStopWatch TotalTimer;
|
GridStopWatch TotalTimer;
|
||||||
TotalTimer.Start();
|
TotalTimer.Start();
|
||||||
|
|
||||||
@ -74,7 +81,7 @@ namespace Grid {
|
|||||||
FieldD src_d(DoublePrecGrid);
|
FieldD src_d(DoublePrecGrid);
|
||||||
src_d = src_d_in; //source for next inner iteration, computed from residual during operation
|
src_d = src_d_in; //source for next inner iteration, computed from residual during operation
|
||||||
|
|
||||||
RealD inner_tol = Tolerance;
|
RealD inner_tol = InnerTolerance;
|
||||||
|
|
||||||
FieldF src_f(SinglePrecGrid);
|
FieldF src_f(SinglePrecGrid);
|
||||||
src_f.checkerboard = cb;
|
src_f.checkerboard = cb;
|
||||||
@ -89,7 +96,9 @@ namespace Grid {
|
|||||||
|
|
||||||
GridStopWatch PrecChangeTimer;
|
GridStopWatch PrecChangeTimer;
|
||||||
|
|
||||||
for(Integer outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++){
|
Integer &outer_iter = TotalOuterIterations; //so it will be equal to the final iteration count
|
||||||
|
|
||||||
|
for(outer_iter = 0; outer_iter < MaxOuterIterations; outer_iter++){
|
||||||
//Compute double precision rsd and also new RHS vector.
|
//Compute double precision rsd and also new RHS vector.
|
||||||
Linop_d.HermOp(sol_d, tmp_d);
|
Linop_d.HermOp(sol_d, tmp_d);
|
||||||
RealD norm = axpy_norm(src_d, -1., tmp_d, src_d_in); //src_d is residual vector
|
RealD norm = axpy_norm(src_d, -1., tmp_d, src_d_in); //src_d is residual vector
|
||||||
@ -117,6 +126,7 @@ namespace Grid {
|
|||||||
InnerCGtimer.Start();
|
InnerCGtimer.Start();
|
||||||
CG_f(Linop_f, src_f, sol_f);
|
CG_f(Linop_f, src_f, sol_f);
|
||||||
InnerCGtimer.Stop();
|
InnerCGtimer.Stop();
|
||||||
|
TotalInnerIterations += CG_f.IterationsToComplete;
|
||||||
|
|
||||||
//Convert sol back to double and add to double prec solution
|
//Convert sol back to double and add to double prec solution
|
||||||
PrecChangeTimer.Start();
|
PrecChangeTimer.Start();
|
||||||
@ -131,9 +141,11 @@ namespace Grid {
|
|||||||
|
|
||||||
ConjugateGradient<FieldD> CG_d(Tolerance, MaxInnerIterations);
|
ConjugateGradient<FieldD> CG_d(Tolerance, MaxInnerIterations);
|
||||||
CG_d(Linop_d, src_d_in, sol_d);
|
CG_d(Linop_d, src_d_in, sol_d);
|
||||||
|
TotalFinalStepIterations = CG_d.IterationsToComplete;
|
||||||
|
|
||||||
TotalTimer.Stop();
|
TotalTimer.Stop();
|
||||||
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Total " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
|
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Inner CG iterations " << TotalInnerIterations << " Restarts " << TotalOuterIterations << " Final CG iterations " << TotalFinalStepIterations << std::endl;
|
||||||
|
std::cout<<GridLogMessage<<"MixedPrecisionConjugateGradient: Total time " << TotalTimer.Elapsed() << " Precision change " << PrecChangeTimer.Elapsed() << " Inner CG total " << InnerCGtimer.Elapsed() << std::endl;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <complex>
|
#include <complex>
|
||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <Grid.h>
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
|
||||||
/** Sign function **/
|
/** Sign function **/
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
@ -13,9 +13,10 @@ void *PointerCache::Insert(void *ptr,size_t bytes) {
|
|||||||
|
|
||||||
if (bytes < 4096 ) return NULL;
|
if (bytes < 4096 ) return NULL;
|
||||||
|
|
||||||
#ifdef _OPENMP
|
#ifdef GRID_OMP
|
||||||
assert(omp_in_parallel()==0);
|
assert(omp_in_parallel()==0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void * ret = NULL;
|
void * ret = NULL;
|
||||||
int v = -1;
|
int v = -1;
|
||||||
|
|
@ -52,7 +52,7 @@ public:
|
|||||||
|
|
||||||
// Physics Grid information.
|
// Physics Grid information.
|
||||||
std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
|
std::vector<int> _simd_layout;// Which dimensions get relayed out over simd lanes.
|
||||||
std::vector<int> _fdimensions;// Global dimensions of array prior to cb removal
|
std::vector<int> _fdimensions;// (full) Global dimensions of array prior to cb removal
|
||||||
std::vector<int> _gdimensions;// Global dimensions of array after cb removal
|
std::vector<int> _gdimensions;// Global dimensions of array after cb removal
|
||||||
std::vector<int> _ldimensions;// local dimensions of array with processor images removed
|
std::vector<int> _ldimensions;// local dimensions of array with processor images removed
|
||||||
std::vector<int> _rdimensions;// Reduced local dimensions with simd lane images and processor images removed
|
std::vector<int> _rdimensions;// Reduced local dimensions with simd lane images and processor images removed
|
||||||
@ -77,7 +77,7 @@ public:
|
|||||||
// GridCartesian / GridRedBlackCartesian
|
// GridCartesian / GridRedBlackCartesian
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
virtual int CheckerBoarded(int dim)=0;
|
virtual int CheckerBoarded(int dim)=0;
|
||||||
virtual int CheckerBoard(std::vector<int> &site)=0;
|
virtual int CheckerBoard(const std::vector<int> &site)=0;
|
||||||
virtual int CheckerBoardDestination(int source_cb,int shift,int dim)=0;
|
virtual int CheckerBoardDestination(int source_cb,int shift,int dim)=0;
|
||||||
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite)=0;
|
virtual int CheckerBoardShift(int source_cb,int dim,int shift,int osite)=0;
|
||||||
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int cb)=0;
|
virtual int CheckerBoardShiftForCB(int source_cb,int dim,int shift,int cb)=0;
|
||||||
@ -121,7 +121,6 @@ public:
|
|||||||
Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
|
Lexicographic::CoorFromIndex(coor,Oindex,_rdimensions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////
|
||||||
// SIMD lane addressing
|
// SIMD lane addressing
|
||||||
//////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////
|
||||||
@ -207,16 +206,16 @@ public:
|
|||||||
std::vector<int> lcoor;
|
std::vector<int> lcoor;
|
||||||
GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
|
GlobalCoorToProcessorCoorLocalCoor(pcoor,lcoor,gcoor);
|
||||||
rank = RankFromProcessorCoor(pcoor);
|
rank = RankFromProcessorCoor(pcoor);
|
||||||
|
/*
|
||||||
std::vector<int> cblcoor(lcoor);
|
std::vector<int> cblcoor(lcoor);
|
||||||
for(int d=0;d<cblcoor.size();d++){
|
for(int d=0;d<cblcoor.size();d++){
|
||||||
if( this->CheckerBoarded(d) ) {
|
if( this->CheckerBoarded(d) ) {
|
||||||
cblcoor[d] = lcoor[d]/2;
|
cblcoor[d] = lcoor[d]/2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
i_idx= iIndex(cblcoor);// this does not imply divide by 2 on checker dim
|
i_idx= iIndex(lcoor);
|
||||||
o_idx= oIndex(lcoor); // this implies divide by 2 on checkerdim
|
o_idx= oIndex(lcoor);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector<int> &gcoor)
|
void RankIndexToGlobalCoor(int rank, int o_idx, int i_idx , std::vector<int> &gcoor)
|
||||||
|
@ -49,7 +49,7 @@ public:
|
|||||||
virtual int CheckerBoarded(int dim){
|
virtual int CheckerBoarded(int dim){
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
virtual int CheckerBoard(std::vector<int> &site){
|
virtual int CheckerBoard(const std::vector<int> &site){
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
virtual int CheckerBoardDestination(int cb,int shift,int dim){
|
virtual int CheckerBoardDestination(int cb,int shift,int dim){
|
||||||
|
@ -49,7 +49,7 @@ public:
|
|||||||
if( dim==_checker_dim) return 1;
|
if( dim==_checker_dim) return 1;
|
||||||
else return 0;
|
else return 0;
|
||||||
}
|
}
|
||||||
virtual int CheckerBoard(std::vector<int> &site){
|
virtual int CheckerBoard(const std::vector<int> &site){
|
||||||
int linear=0;
|
int linear=0;
|
||||||
assert(site.size()==_ndimension);
|
assert(site.size()==_ndimension);
|
||||||
for(int d=0;d<_ndimension;d++){
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
@ -33,6 +34,7 @@ namespace Grid {
|
|||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
void * CartesianCommunicator::ShmCommBuf;
|
void * CartesianCommunicator::ShmCommBuf;
|
||||||
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
||||||
|
CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
|
||||||
|
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
// Alloc, free shmem region
|
// Alloc, free shmem region
|
||||||
@ -88,7 +90,9 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
|||||||
|
|
||||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
||||||
|
|
||||||
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
void *recv,
|
void *recv,
|
||||||
@ -96,6 +100,7 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||||
|
return 2.0*bytes;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||||
{
|
{
|
||||||
|
@ -116,6 +116,12 @@ class CartesianCommunicator {
|
|||||||
// Implemented in Communicator_base.C
|
// Implemented in Communicator_base.C
|
||||||
/////////////////////////////////
|
/////////////////////////////////
|
||||||
static void * ShmCommBuf;
|
static void * ShmCommBuf;
|
||||||
|
|
||||||
|
// Isend/Irecv/Wait, or Sendrecv blocking
|
||||||
|
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
|
||||||
|
static CommunicatorPolicy_t CommunicatorPolicy;
|
||||||
|
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
|
||||||
|
|
||||||
size_t heap_top;
|
size_t heap_top;
|
||||||
size_t heap_bytes;
|
size_t heap_bytes;
|
||||||
|
|
||||||
@ -148,6 +154,7 @@ class CartesianCommunicator {
|
|||||||
const std::vector<int> & ThisProcessorCoor(void) ;
|
const std::vector<int> & ThisProcessorCoor(void) ;
|
||||||
const std::vector<int> & ProcessorGrid(void) ;
|
const std::vector<int> & ProcessorGrid(void) ;
|
||||||
int ProcessorCount(void) ;
|
int ProcessorCount(void) ;
|
||||||
|
int NodeCount(void) ;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||||
@ -200,7 +207,7 @@ class CartesianCommunicator {
|
|||||||
|
|
||||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||||
|
|
||||||
void StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,
|
||||||
void *recv,
|
void *recv,
|
||||||
|
@ -25,7 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/GridQCDcore.h>
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -39,9 +41,13 @@ MPI_Comm CartesianCommunicator::communicator_world;
|
|||||||
// Should error check all MPI calls.
|
// Should error check all MPI calls.
|
||||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||||
int flag;
|
int flag;
|
||||||
|
int provided;
|
||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
MPI_Init(argc,argv);
|
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||||
|
if ( provided != MPI_THREAD_MULTIPLE ) {
|
||||||
|
QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||||
ShmInitGeneric();
|
ShmInitGeneric();
|
||||||
@ -152,24 +158,34 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
int from,
|
int from,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
MPI_Request xrq;
|
int myrank = _processor;
|
||||||
MPI_Request rrq;
|
|
||||||
int rank = _processor;
|
|
||||||
int ierr;
|
int ierr;
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
MPI_Request xrq;
|
||||||
|
MPI_Request rrq;
|
||||||
assert(ierr==0);
|
|
||||||
|
|
||||||
list.push_back(xrq);
|
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
list.push_back(rrq);
|
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
|
|
||||||
|
assert(ierr==0);
|
||||||
|
list.push_back(xrq);
|
||||||
|
list.push_back(rrq);
|
||||||
|
} else {
|
||||||
|
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||||
|
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||||
|
recv,bytes,MPI_CHAR,from, from,
|
||||||
|
communicator,MPI_STATUS_IGNORE);
|
||||||
|
assert(ierr==0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
int nreq=list.size();
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
std::vector<MPI_Status> status(nreq);
|
int nreq=list.size();
|
||||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
std::vector<MPI_Status> status(nreq);
|
||||||
assert(ierr==0);
|
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||||
|
assert(ierr==0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
/*************************************************************************************
|
/*************************************************************************************
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -25,9 +25,23 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
|
||||||
|
#include <semaphore.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <limits.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/ipc.h>
|
||||||
|
#include <sys/shm.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
//#include <zlib.h>
|
||||||
|
#ifndef SHM_HUGETLB
|
||||||
|
#define SHM_HUGETLB 04000
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -50,6 +64,10 @@ std::vector<int> CartesianCommunicator::GroupRanks;
|
|||||||
std::vector<int> CartesianCommunicator::MyGroup;
|
std::vector<int> CartesianCommunicator::MyGroup;
|
||||||
std::vector<void *> CartesianCommunicator::ShmCommBufs;
|
std::vector<void *> CartesianCommunicator::ShmCommBufs;
|
||||||
|
|
||||||
|
int CartesianCommunicator::NodeCount(void) { return GroupSize;};
|
||||||
|
|
||||||
|
|
||||||
|
#undef FORCE_COMMS
|
||||||
void *CartesianCommunicator::ShmBufferSelf(void)
|
void *CartesianCommunicator::ShmBufferSelf(void)
|
||||||
{
|
{
|
||||||
return ShmCommBufs[ShmRank];
|
return ShmCommBufs[ShmRank];
|
||||||
@ -57,6 +75,9 @@ void *CartesianCommunicator::ShmBufferSelf(void)
|
|||||||
void *CartesianCommunicator::ShmBuffer(int rank)
|
void *CartesianCommunicator::ShmBuffer(int rank)
|
||||||
{
|
{
|
||||||
int gpeer = GroupRanks[rank];
|
int gpeer = GroupRanks[rank];
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
if (gpeer == MPI_UNDEFINED){
|
if (gpeer == MPI_UNDEFINED){
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
@ -65,7 +86,13 @@ void *CartesianCommunicator::ShmBuffer(int rank)
|
|||||||
}
|
}
|
||||||
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
||||||
{
|
{
|
||||||
|
static int count =0;
|
||||||
int gpeer = GroupRanks[rank];
|
int gpeer = GroupRanks[rank];
|
||||||
|
assert(gpeer!=ShmRank); // never send to self
|
||||||
|
assert(rank!=WorldRank);// never send to self
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
if (gpeer == MPI_UNDEFINED){
|
if (gpeer == MPI_UNDEFINED){
|
||||||
return NULL;
|
return NULL;
|
||||||
} else {
|
} else {
|
||||||
@ -76,16 +103,27 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||||
|
|
||||||
int flag;
|
int flag;
|
||||||
|
int provided;
|
||||||
|
// mtrace();
|
||||||
|
|
||||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||||
if ( !flag ) {
|
if ( !flag ) {
|
||||||
MPI_Init(argc,argv);
|
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||||
|
assert (provided == MPI_THREAD_MULTIPLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Grid_quiesce_nodes();
|
||||||
|
|
||||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||||
MPI_Comm_rank(communicator_world,&WorldRank);
|
MPI_Comm_rank(communicator_world,&WorldRank);
|
||||||
MPI_Comm_size(communicator_world,&WorldSize);
|
MPI_Comm_size(communicator_world,&WorldSize);
|
||||||
|
|
||||||
|
if ( WorldRank == 0 ) {
|
||||||
|
std::cout << GridLogMessage<< "Initialising MPI "<< WorldRank <<"/"<<WorldSize <<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
// Split into groups that can share memory
|
// Split into groups that can share memory
|
||||||
/////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////
|
||||||
@ -131,7 +169,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&leaders_1hot[0],WorldSize,MPI_INT,MPI_SUM,communicator_world);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// find the group leaders world rank
|
// find the group leaders world rank
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
@ -141,7 +178,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
leaders_group[group++] = l;
|
leaders_group[group++] = l;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// Identify the rank of the group in which I (and my leader) live
|
// Identify the rank of the group in which I (and my leader) live
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
@ -152,39 +188,114 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(GroupRank!=-1);
|
assert(GroupRank!=-1);
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// allocate the shared window for our group
|
// allocate the shared window for our group
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
ShmCommBuf = 0;
|
ShmCommBuf = 0;
|
||||||
ierr = MPI_Win_allocate_shared(MAX_MPI_SHM_BYTES,1,MPI_INFO_NULL,ShmComm,&ShmCommBuf,&ShmWindow);
|
|
||||||
assert(ierr==0);
|
|
||||||
// KNL hack -- force to numa-domain 1 in flat
|
|
||||||
#if 0
|
|
||||||
//#include <numaif.h>
|
|
||||||
for(uint64_t page=0;page<MAX_MPI_SHM_BYTES;page+=4096){
|
|
||||||
void *pages = (void *) ( page + ShmCommBuf );
|
|
||||||
int status;
|
|
||||||
int flags=MPOL_MF_MOVE_ALL;
|
|
||||||
int nodes=1; // numa domain == MCDRAM
|
|
||||||
unsigned long count=1;
|
|
||||||
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
|
||||||
if (ierr && (page==0)) perror("numa relocate command failed");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
MPI_Win_lock_all (MPI_MODE_NOCHECK, ShmWindow);
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Plan: allocate a fixed SHM region. Scratch that is just used via some scheme during stencil comms, with no allocate free.
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
ShmCommBufs.resize(ShmSize);
|
ShmCommBufs.resize(ShmSize);
|
||||||
for(int r=0;r<ShmSize;r++){
|
|
||||||
MPI_Aint sz;
|
#if 1
|
||||||
int dsp_unit;
|
char shm_name [NAME_MAX];
|
||||||
MPI_Win_shared_query (ShmWindow, r, &sz, &dsp_unit, &ShmCommBufs[r]);
|
if ( ShmRank == 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
|
||||||
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||||
|
|
||||||
|
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||||
|
|
||||||
|
shm_unlink(shm_name);
|
||||||
|
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0660);
|
||||||
|
if ( fd < 0 ) { perror("failed shm_open"); assert(0); }
|
||||||
|
ftruncate(fd, size);
|
||||||
|
|
||||||
|
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||||
|
assert(((uint64_t)ptr&0x3F)==0);
|
||||||
|
ShmCommBufs[r] =ptr;
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
if ( ShmRank != 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES ;
|
||||||
|
|
||||||
|
sprintf(shm_name,"/Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||||
|
|
||||||
|
int fd=shm_open(shm_name,O_RDWR,0660);
|
||||||
|
if ( fd<0 ) { perror("failed shm_open"); assert(0); }
|
||||||
|
|
||||||
|
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||||
|
assert(((uint64_t)ptr&0x3F)==0);
|
||||||
|
ShmCommBufs[r] =ptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
std::vector<int> shmids(ShmSize);
|
||||||
|
|
||||||
|
if ( ShmRank == 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||||
|
key_t key = 0x4545 + r;
|
||||||
|
if ((shmids[r]= shmget(key,size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
|
||||||
|
int errsv = errno;
|
||||||
|
printf("Errno %d\n",errsv);
|
||||||
|
perror("shmget");
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
printf("shmid: 0x%x\n", shmids[r]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
MPI_Bcast(&shmids[0],ShmSize*sizeof(int),MPI_BYTE,0,ShmComm);
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
ShmCommBufs[r] = (uint64_t *)shmat(shmids[r], NULL,0);
|
||||||
|
if (ShmCommBufs[r] == (uint64_t *)-1) {
|
||||||
|
perror("Shared memory attach failure");
|
||||||
|
shmctl(shmids[r], IPC_RMID, NULL);
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
|
printf("shmaddr: %p\n", ShmCommBufs[r]);
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
// Mark for clean up
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
shmctl(shmids[r], IPC_RMID,(struct shmid_ds *)NULL);
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
ShmCommBuf = ShmCommBufs[ShmRank];
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
if ( ShmRank == 0 ) {
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
uint64_t * check = (uint64_t *) ShmCommBufs[r];
|
||||||
|
check[0] = GroupRank;
|
||||||
|
check[1] = r;
|
||||||
|
check[2] = 0x5A5A5A;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
for(int r=0;r<ShmSize;r++){
|
||||||
|
uint64_t * check = (uint64_t *) ShmCommBufs[r];
|
||||||
|
|
||||||
|
assert(check[0]==GroupRank);
|
||||||
|
assert(check[1]==r);
|
||||||
|
assert(check[2]==0x5A5A5A);
|
||||||
|
|
||||||
|
}
|
||||||
|
MPI_Barrier(ShmComm);
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Verbose for now
|
// Verbose for now
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -192,7 +303,7 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
|
std::cout<<GridLogMessage<< "Grid MPI-3 configuration: detected ";
|
||||||
std::cout<< WorldSize << " Ranks " ;
|
std::cout<< WorldSize << " Ranks " ;
|
||||||
std::cout<< GroupSize << " Nodes " ;
|
std::cout<< GroupSize << " Nodes " ;
|
||||||
std::cout<< ShmSize << " with ranks-per-node "<<std::endl;
|
std::cout<< " with "<< ShmSize << " ranks-per-node "<<std::endl;
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Grid MPI-3 configuration: allocated shared memory region of size ";
|
std::cout<<GridLogMessage <<"Grid MPI-3 configuration: allocated shared memory region of size ";
|
||||||
std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
|
std::cout<<std::hex << MAX_MPI_SHM_BYTES <<" ShmCommBuf address = "<<ShmCommBuf << std::dec<<std::endl;
|
||||||
@ -207,7 +318,6 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
if(g!=ShmSize-1) std::cout<<",";
|
if(g!=ShmSize-1) std::cout<<",";
|
||||||
else std::cout<<"}"<<std::endl;
|
else std::cout<<"}"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int g=0;g<GroupSize;g++){
|
for(int g=0;g<GroupSize;g++){
|
||||||
@ -216,23 +326,21 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
|||||||
if ( (ShmRank == 0) && (GroupRank==g) ) {
|
if ( (ShmRank == 0) && (GroupRank==g) ) {
|
||||||
std::cout<<MyGroup[r];
|
std::cout<<MyGroup[r];
|
||||||
if(r<ShmSize-1) std::cout<<",";
|
if(r<ShmSize-1) std::cout<<",";
|
||||||
else std::cout<<"}"<<std::endl;
|
else std::cout<<"}"<<std::endl<<std::flush;
|
||||||
}
|
}
|
||||||
MPI_Barrier(communicator_world);
|
MPI_Barrier(communicator_world);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ShmSetup==0); ShmSetup=1;
|
assert(ShmSetup==0); ShmSetup=1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Want to implement some magic ... Group sub-cubes into those on same node
|
// Want to implement some magic ... Group sub-cubes into those on same node
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &dest,int &source)
|
||||||
{
|
{
|
||||||
std::vector<int> coor = _processor_coor;
|
std::vector<int> coor = _processor_coor; // my coord
|
||||||
|
|
||||||
assert(std::abs(shift) <_processors[dim]);
|
assert(std::abs(shift) <_processors[dim]);
|
||||||
|
|
||||||
coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
|
coor[dim] = (_processor_coor[dim] + shift + _processors[dim])%_processors[dim];
|
||||||
@ -242,28 +350,32 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
|||||||
coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
|
coor[dim] = (_processor_coor[dim] - shift + _processors[dim])%_processors[dim];
|
||||||
Lexicographic::IndexFromCoor(coor,dest,_processors);
|
Lexicographic::IndexFromCoor(coor,dest,_processors);
|
||||||
dest = LexicographicToWorldRank[dest];
|
dest = LexicographicToWorldRank[dest];
|
||||||
}
|
|
||||||
|
}// rank is world rank.
|
||||||
|
|
||||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||||
{
|
{
|
||||||
int rank;
|
int rank;
|
||||||
Lexicographic::IndexFromCoor(coor,rank,_processors);
|
Lexicographic::IndexFromCoor(coor,rank,_processors);
|
||||||
rank = LexicographicToWorldRank[rank];
|
rank = LexicographicToWorldRank[rank];
|
||||||
return rank;
|
return rank;
|
||||||
}
|
}// rank is world rank
|
||||||
|
|
||||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||||
{
|
{
|
||||||
Lexicographic::CoorFromIndex(coor,rank,_processors);
|
int lr=-1;
|
||||||
rank = LexicographicToWorldRank[rank];
|
for(int r=0;r<WorldSize;r++){// map world Rank to lexico and then to coor
|
||||||
|
if( LexicographicToWorldRank[r]==rank) lr = r;
|
||||||
|
}
|
||||||
|
assert(lr!=-1);
|
||||||
|
Lexicographic::CoorFromIndex(coor,lr,_processors);
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||||
{
|
{
|
||||||
int ierr;
|
int ierr;
|
||||||
|
|
||||||
communicator=communicator_world;
|
communicator=communicator_world;
|
||||||
|
|
||||||
_ndimension = processors.size();
|
_ndimension = processors.size();
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Assert power of two shm_size.
|
// Assert power of two shm_size.
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
@ -275,24 +387,22 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(log2size != -1);
|
assert(log2size != -1);
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Identify subblock of ranks on node spreading across dims
|
// Identify subblock of ranks on node spreading across dims
|
||||||
// in a maximally symmetrical way
|
// in a maximally symmetrical way
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
int dim = 0;
|
|
||||||
|
|
||||||
std::vector<int> WorldDims = processors;
|
std::vector<int> WorldDims = processors;
|
||||||
|
|
||||||
ShmDims.resize(_ndimension,1);
|
ShmDims.resize (_ndimension,1);
|
||||||
GroupDims.resize(_ndimension);
|
GroupDims.resize(_ndimension);
|
||||||
|
ShmCoor.resize (_ndimension);
|
||||||
ShmCoor.resize(_ndimension);
|
|
||||||
GroupCoor.resize(_ndimension);
|
GroupCoor.resize(_ndimension);
|
||||||
WorldCoor.resize(_ndimension);
|
WorldCoor.resize(_ndimension);
|
||||||
|
|
||||||
|
int dim = 0;
|
||||||
for(int l2=0;l2<log2size;l2++){
|
for(int l2=0;l2<log2size;l2++){
|
||||||
while ( WorldDims[dim] / ShmDims[dim] <= 1 ) dim=(dim+1)%_ndimension;
|
while ( (WorldDims[dim] / ShmDims[dim]) <= 1 ) dim=(dim+1)%_ndimension;
|
||||||
ShmDims[dim]*=2;
|
ShmDims[dim]*=2;
|
||||||
dim=(dim+1)%_ndimension;
|
dim=(dim+1)%_ndimension;
|
||||||
}
|
}
|
||||||
@ -304,6 +414,29 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
GroupDims[d] = WorldDims[d]/ShmDims[d];
|
GroupDims[d] = WorldDims[d]/ShmDims[d];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Verbose
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
#if 0
|
||||||
|
std::cout<< GridLogMessage << "MPI-3 usage "<<std::endl;
|
||||||
|
std::cout<< GridLogMessage << "SHM ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< ShmDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
|
||||||
|
std::cout<< GridLogMessage << "Group ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< GroupDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
|
||||||
|
std::cout<< GridLogMessage<<"World ";
|
||||||
|
for(int d=0;d<_ndimension;d++){
|
||||||
|
std::cout<< WorldDims[d] <<" ";
|
||||||
|
}
|
||||||
|
std::cout<< std::endl;
|
||||||
|
#endif
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Check processor counts match
|
// Check processor counts match
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
@ -317,29 +450,57 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish mapping between lexico physics coord and WorldRank
|
// Establish mapping between lexico physics coord and WorldRank
|
||||||
//
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
LexicographicToWorldRank.resize(WorldSize,0);
|
|
||||||
Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
|
Lexicographic::CoorFromIndex(GroupCoor,GroupRank,GroupDims);
|
||||||
Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
|
Lexicographic::CoorFromIndex(ShmCoor,ShmRank,ShmDims);
|
||||||
for(int d=0;d<_ndimension;d++){
|
for(int d=0;d<_ndimension;d++){
|
||||||
WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
|
WorldCoor[d] = GroupCoor[d]*ShmDims[d]+ShmCoor[d];
|
||||||
}
|
}
|
||||||
_processor_coor = WorldCoor;
|
_processor_coor = WorldCoor;
|
||||||
|
_processor = WorldRank;
|
||||||
int lexico;
|
|
||||||
Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
|
|
||||||
LexicographicToWorldRank[lexico]=WorldRank;
|
|
||||||
_processor = lexico;
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
// global sum Lexico to World mapping
|
// global sum Lexico to World mapping
|
||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
|
int lexico;
|
||||||
|
LexicographicToWorldRank.resize(WorldSize,0);
|
||||||
|
Lexicographic::IndexFromCoor(WorldCoor,lexico,WorldDims);
|
||||||
|
LexicographicToWorldRank[lexico] = WorldRank;
|
||||||
ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
|
ierr=MPI_Allreduce(MPI_IN_PLACE,&LexicographicToWorldRank[0],WorldSize,MPI_INT,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
for(int i=0;i<WorldSize;i++){
|
||||||
|
|
||||||
|
int wr = LexicographicToWorldRank[i];
|
||||||
|
// int wr = i;
|
||||||
|
|
||||||
|
std::vector<int> coor(_ndimension);
|
||||||
|
ProcessorCoorFromRank(wr,coor); // from world rank
|
||||||
|
int ck = RankFromProcessorCoor(coor);
|
||||||
|
assert(ck==wr);
|
||||||
|
|
||||||
|
if ( wr == WorldRank ) {
|
||||||
|
for(int j=0;j<coor.size();j++) {
|
||||||
|
assert(coor[j] == _processor_coor[j]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
std::cout << GridLogMessage<< " Lexicographic "<<i;
|
||||||
|
std::cout << " MPI rank "<<wr;
|
||||||
|
std::cout << " Coor ";
|
||||||
|
for(int j=0;j<coor.size();j++) std::cout << coor[j];
|
||||||
|
std::cout<< std::endl;
|
||||||
|
*/
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
// Check everyone agrees on everyone elses coords
|
||||||
|
/////////////////////////////////////////////////////
|
||||||
|
std::vector<int> mcoor = coor;
|
||||||
|
this->Broadcast(0,(void *)&mcoor[0],mcoor.size()*sizeof(int));
|
||||||
|
for(int d = 0 ; d< _ndimension; d++) {
|
||||||
|
assert(coor[d] == mcoor[d]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
@ -367,8 +528,6 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
|||||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
@ -377,10 +536,14 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
std::vector<CommsRequest_t> reqs(0);
|
std::vector<CommsRequest_t> reqs(0);
|
||||||
|
// unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||||
|
// unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||||
|
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
|
||||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||||
SendToRecvFromComplete(reqs);
|
SendToRecvFromComplete(reqs);
|
||||||
|
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||||
|
// printf("proc %d SendToRecvFrom %d bytes %lx %lx\n",_processor,bytes,xcrc,rcrc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int sender,
|
int sender,
|
||||||
@ -397,7 +560,6 @@ void CartesianCommunicator::SendRecvPacket(void *xmit,
|
|||||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
@ -406,95 +568,29 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
int from,
|
int from,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
#if 0
|
int myrank = _processor;
|
||||||
this->StencilBarrier();
|
|
||||||
|
|
||||||
MPI_Request xrq;
|
|
||||||
MPI_Request rrq;
|
|
||||||
|
|
||||||
static int sequence;
|
|
||||||
|
|
||||||
int ierr;
|
int ierr;
|
||||||
int tag;
|
|
||||||
int check;
|
|
||||||
|
|
||||||
assert(dest != _processor);
|
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||||
assert(from != _processor);
|
MPI_Request xrq;
|
||||||
|
MPI_Request rrq;
|
||||||
int gdest = GroupRanks[dest];
|
|
||||||
int gfrom = GroupRanks[from];
|
|
||||||
int gme = GroupRanks[_processor];
|
|
||||||
|
|
||||||
sequence++;
|
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
|
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
char *from_ptr = (char *)ShmCommBufs[ShmRank];
|
|
||||||
|
|
||||||
int small = (bytes<MAX_MPI_SHM_BYTES);
|
|
||||||
|
|
||||||
typedef uint64_t T;
|
|
||||||
int words = bytes/sizeof(T);
|
|
||||||
|
|
||||||
assert(((size_t)bytes &(sizeof(T)-1))==0);
|
|
||||||
assert(gme == ShmRank);
|
|
||||||
|
|
||||||
if ( small && (gdest !=MPI_UNDEFINED) ) {
|
|
||||||
|
|
||||||
char *to_ptr = (char *)ShmCommBufs[gdest];
|
|
||||||
|
|
||||||
assert(gme != gdest);
|
|
||||||
|
|
||||||
T *ip = (T *)xmit;
|
|
||||||
T *op = (T *)to_ptr;
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int w=0;w<words;w++) {
|
|
||||||
op[w]=ip[w];
|
|
||||||
}
|
|
||||||
|
|
||||||
bcopy(&_processor,&to_ptr[bytes],sizeof(_processor));
|
|
||||||
bcopy(& sequence,&to_ptr[bytes+4],sizeof(sequence));
|
|
||||||
} else {
|
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
}
|
|
||||||
|
|
||||||
this->StencilBarrier();
|
|
||||||
|
|
||||||
if (small && (gfrom !=MPI_UNDEFINED) ) {
|
|
||||||
T *ip = (T *)from_ptr;
|
|
||||||
T *op = (T *)recv;
|
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int w=0;w<words;w++) {
|
|
||||||
op[w]=ip[w];
|
|
||||||
}
|
|
||||||
bcopy(&from_ptr[bytes] ,&tag ,sizeof(tag));
|
|
||||||
bcopy(&from_ptr[bytes+4],&check,sizeof(check));
|
|
||||||
assert(check==sequence);
|
|
||||||
assert(tag==from);
|
|
||||||
} else {
|
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
|
||||||
assert(ierr==0);
|
|
||||||
list.push_back(rrq);
|
list.push_back(rrq);
|
||||||
|
} else {
|
||||||
|
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||||
|
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||||
|
recv,bytes,MPI_CHAR,from, from,
|
||||||
|
communicator,MPI_STATUS_IGNORE);
|
||||||
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
this->StencilBarrier();
|
|
||||||
|
|
||||||
#else
|
|
||||||
MPI_Request xrq;
|
|
||||||
MPI_Request rrq;
|
|
||||||
int rank = _processor;
|
|
||||||
int ierr;
|
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
|
||||||
ierr|=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
|
||||||
|
|
||||||
assert(ierr==0);
|
|
||||||
|
|
||||||
list.push_back(xrq);
|
|
||||||
list.push_back(rrq);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
void *recv,
|
void *recv,
|
||||||
@ -505,57 +601,63 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
MPI_Request rrq;
|
MPI_Request rrq;
|
||||||
|
|
||||||
int ierr;
|
int ierr;
|
||||||
|
|
||||||
assert(dest != _processor);
|
|
||||||
assert(from != _processor);
|
|
||||||
|
|
||||||
int gdest = GroupRanks[dest];
|
int gdest = GroupRanks[dest];
|
||||||
int gfrom = GroupRanks[from];
|
int gfrom = GroupRanks[from];
|
||||||
int gme = GroupRanks[_processor];
|
int gme = GroupRanks[_processor];
|
||||||
|
|
||||||
assert(gme == ShmRank);
|
assert(dest != _processor);
|
||||||
|
assert(from != _processor);
|
||||||
|
assert(gme == ShmRank);
|
||||||
|
double off_node_bytes=0.0;
|
||||||
|
|
||||||
|
#ifdef FORCE_COMMS
|
||||||
|
gdest = MPI_UNDEFINED;
|
||||||
|
gfrom = MPI_UNDEFINED;
|
||||||
|
#endif
|
||||||
|
if ( gfrom ==MPI_UNDEFINED) {
|
||||||
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||||
|
assert(ierr==0);
|
||||||
|
list.push_back(rrq);
|
||||||
|
off_node_bytes+=bytes;
|
||||||
|
}
|
||||||
|
|
||||||
if ( gdest == MPI_UNDEFINED ) {
|
if ( gdest == MPI_UNDEFINED ) {
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
}
|
off_node_bytes+=bytes;
|
||||||
|
|
||||||
if ( gfrom ==MPI_UNDEFINED) {
|
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
|
||||||
assert(ierr==0);
|
|
||||||
list.push_back(rrq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||||
|
this->StencilSendToRecvFromComplete(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
return off_node_bytes;
|
||||||
}
|
}
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
|
||||||
{
|
{
|
||||||
SendToRecvFromComplete(list);
|
SendToRecvFromComplete(waitall);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilBarrier(void)
|
void CartesianCommunicator::StencilBarrier(void)
|
||||||
{
|
{
|
||||||
MPI_Win_sync (ShmWindow);
|
|
||||||
MPI_Barrier (ShmComm);
|
MPI_Barrier (ShmComm);
|
||||||
MPI_Win_sync (ShmWindow);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
int nreq=list.size();
|
int nreq=list.size();
|
||||||
|
|
||||||
|
if (nreq==0) return;
|
||||||
|
|
||||||
std::vector<MPI_Status> status(nreq);
|
std::vector<MPI_Status> status(nreq);
|
||||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
list.resize(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
{
|
{
|
||||||
int ierr = MPI_Barrier(communicator);
|
int ierr = MPI_Barrier(communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||||
{
|
{
|
||||||
int ierr=MPI_Bcast(data,
|
int ierr=MPI_Bcast(data,
|
||||||
@ -565,7 +667,11 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
|||||||
communicator);
|
communicator);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
int CartesianCommunicator::RankWorld(void){
|
||||||
|
int r;
|
||||||
|
MPI_Comm_rank(communicator_world,&r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||||
{
|
{
|
||||||
int ierr= MPI_Bcast(data,
|
int ierr= MPI_Bcast(data,
|
||||||
|
@ -27,6 +27,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include "Grid.h"
|
||||||
#include <mpi.h>
|
#include <mpi.h>
|
||||||
|
//#include <numaif.h>
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
/// Workarounds:
|
/// Workarounds:
|
||||||
@ -42,19 +43,27 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
|
||||||
typedef sem_t *Grid_semaphore;
|
typedef sem_t *Grid_semaphore;
|
||||||
|
|
||||||
|
|
||||||
|
#error /*THis is deprecated*/
|
||||||
|
|
||||||
|
#if 0
|
||||||
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
#define SEM_INIT(S) S = sem_open(sem_name,0,0600,0); assert ( S != SEM_FAILED );
|
||||||
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
|
#define SEM_INIT_EXCL(S) sem_unlink(sem_name); S = sem_open(sem_name,O_CREAT|O_EXCL,0600,0); assert ( S != SEM_FAILED );
|
||||||
#define SEM_POST(S) assert ( sem_post(S) == 0 );
|
#define SEM_POST(S) assert ( sem_post(S) == 0 );
|
||||||
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
|
#define SEM_WAIT(S) assert ( sem_wait(S) == 0 );
|
||||||
|
#else
|
||||||
|
#define SEM_INIT(S) ;
|
||||||
|
#define SEM_INIT_EXCL(S) ;
|
||||||
|
#define SEM_POST(S) ;
|
||||||
|
#define SEM_WAIT(S) ;
|
||||||
|
#endif
|
||||||
#include <sys/mman.h>
|
#include <sys/mman.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL };
|
enum { COMMAND_ISEND, COMMAND_IRECV, COMMAND_WAITALL, COMMAND_SENDRECV };
|
||||||
|
|
||||||
struct Descriptor {
|
struct Descriptor {
|
||||||
uint64_t buf;
|
uint64_t buf;
|
||||||
@ -62,6 +71,12 @@ struct Descriptor {
|
|||||||
int rank;
|
int rank;
|
||||||
int tag;
|
int tag;
|
||||||
int command;
|
int command;
|
||||||
|
uint64_t xbuf;
|
||||||
|
uint64_t rbuf;
|
||||||
|
int xtag;
|
||||||
|
int rtag;
|
||||||
|
int src;
|
||||||
|
int dest;
|
||||||
MPI_Request request;
|
MPI_Request request;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,18 +109,14 @@ public:
|
|||||||
|
|
||||||
void SemInit(void) {
|
void SemInit(void) {
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||||
// printf("SEM_NAME: %s \n",sem_name);
|
|
||||||
SEM_INIT(sem_head);
|
SEM_INIT(sem_head);
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||||
// printf("SEM_NAME: %s \n",sem_name);
|
|
||||||
SEM_INIT(sem_tail);
|
SEM_INIT(sem_tail);
|
||||||
}
|
}
|
||||||
void SemInitExcl(void) {
|
void SemInitExcl(void) {
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_head_%d",universe_rank);
|
||||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
|
||||||
SEM_INIT_EXCL(sem_head);
|
SEM_INIT_EXCL(sem_head);
|
||||||
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
sprintf(sem_name,"/Grid_mpi3_sem_tail_%d",universe_rank);
|
||||||
// printf("SEM_INIT_EXCL: %s \n",sem_name);
|
|
||||||
SEM_INIT_EXCL(sem_tail);
|
SEM_INIT_EXCL(sem_tail);
|
||||||
}
|
}
|
||||||
void WakeUpDMA(void) {
|
void WakeUpDMA(void) {
|
||||||
@ -125,6 +136,13 @@ public:
|
|||||||
while(1){
|
while(1){
|
||||||
WaitForCommand();
|
WaitForCommand();
|
||||||
// std::cout << "Getting command "<<std::endl;
|
// std::cout << "Getting command "<<std::endl;
|
||||||
|
#if 0
|
||||||
|
_mm_monitor((void *)&state->head,0,0);
|
||||||
|
int s=state->start;
|
||||||
|
if ( s != state->head ) {
|
||||||
|
_mm_mwait(0,0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
Event();
|
Event();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -132,6 +150,7 @@ public:
|
|||||||
int Event (void) ;
|
int Event (void) ;
|
||||||
|
|
||||||
uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
|
uint64_t QueueCommand(int command,void *buf, int bytes, int hashtag, MPI_Comm comm,int u_rank) ;
|
||||||
|
void QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) ;
|
||||||
|
|
||||||
void WaitAll() {
|
void WaitAll() {
|
||||||
// std::cout << "Queueing WAIT command "<<std::endl;
|
// std::cout << "Queueing WAIT command "<<std::endl;
|
||||||
@ -141,7 +160,7 @@ public:
|
|||||||
// std::cout << "Waiting from semaphore "<<std::endl;
|
// std::cout << "Waiting from semaphore "<<std::endl;
|
||||||
WaitForComplete();
|
WaitForComplete();
|
||||||
// std::cout << "Checking FIFO is empty "<<std::endl;
|
// std::cout << "Checking FIFO is empty "<<std::endl;
|
||||||
assert ( state->tail == state->head );
|
while ( state->tail != state->head );
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -196,6 +215,12 @@ public:
|
|||||||
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
// std::cout << "Waking up DMA "<< slave<<std::endl;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void QueueSendRecv(int slave,void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src)
|
||||||
|
{
|
||||||
|
Slaves[slave].QueueSendRecv(xbuf,rbuf,bytes,xtag,rtag,comm,dest,src);
|
||||||
|
Slaves[slave].WakeUpDMA();
|
||||||
|
}
|
||||||
|
|
||||||
static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
static void QueueRecv(int slave, void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||||
// std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank <<std::endl;
|
// std::cout<< " Queueing recv "<< bytes<< " slave "<< slave << " from comm "<<rank <<std::endl;
|
||||||
Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
|
Slaves[slave].QueueCommand(COMMAND_IRECV,buf,bytes,tag,comm,rank);
|
||||||
@ -226,6 +251,28 @@ public:
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void QueueRoundRobinSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
|
||||||
|
uint8_t * cxbuf = (uint8_t *) xbuf;
|
||||||
|
uint8_t * crbuf = (uint8_t *) rbuf;
|
||||||
|
static int rrp=0;
|
||||||
|
int procs = VerticalSize-1;
|
||||||
|
int myoff=0;
|
||||||
|
int mywork=bytes;
|
||||||
|
QueueSendRecv(rrp+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
|
||||||
|
rrp = rrp+1;
|
||||||
|
if ( rrp == (VerticalSize-1) ) rrp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void QueueMultiplexedSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src) {
|
||||||
|
uint8_t * cxbuf = (uint8_t *) xbuf;
|
||||||
|
uint8_t * crbuf = (uint8_t *) rbuf;
|
||||||
|
int mywork, myoff, procs;
|
||||||
|
procs = VerticalSize-1;
|
||||||
|
for(int s=0;s<procs;s++) {
|
||||||
|
GetWork(bytes,s,mywork,myoff,procs);
|
||||||
|
QueueSendRecv(s+1,&cxbuf[myoff],&crbuf[myoff],mywork,xtag,rtag,comm,dest,src);
|
||||||
|
}
|
||||||
|
};
|
||||||
static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
static void QueueMultiplexedSend(void *buf, int bytes, int tag, MPI_Comm comm,int rank) {
|
||||||
uint8_t * cbuf = (uint8_t *) buf;
|
uint8_t * cbuf = (uint8_t *) buf;
|
||||||
int mywork, myoff, procs;
|
int mywork, myoff, procs;
|
||||||
@ -275,6 +322,7 @@ std::vector<void *> MPIoffloadEngine::VerticalShmBufs;
|
|||||||
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
|
std::vector<std::vector<int> > MPIoffloadEngine::UniverseRanks;
|
||||||
std::vector<int> MPIoffloadEngine::UserCommunicatorToWorldRanks;
|
std::vector<int> MPIoffloadEngine::UserCommunicatorToWorldRanks;
|
||||||
|
|
||||||
|
int CartesianCommunicator::NodeCount(void) { return HorizontalSize;};
|
||||||
int MPIoffloadEngine::ShmSetup = 0;
|
int MPIoffloadEngine::ShmSetup = 0;
|
||||||
|
|
||||||
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
||||||
@ -370,12 +418,22 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
|||||||
ftruncate(fd, size);
|
ftruncate(fd, size);
|
||||||
|
|
||||||
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
VerticalShmBufs[r] = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||||
|
|
||||||
if ( VerticalShmBufs[r] == MAP_FAILED ) {
|
if ( VerticalShmBufs[r] == MAP_FAILED ) {
|
||||||
perror("failed mmap");
|
perror("failed mmap");
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
for(uint64_t page=0;page<size;page+=4096){
|
||||||
|
void *pages = (void *) ( page + (uint64_t)VerticalShmBufs[r] );
|
||||||
|
int status;
|
||||||
|
int flags=MPOL_MF_MOVE_ALL;
|
||||||
|
int nodes=1; // numa domain == MCDRAM
|
||||||
|
unsigned long count=1;
|
||||||
|
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
||||||
|
if (ierr && (page==0)) perror("numa relocate command failed");
|
||||||
|
}
|
||||||
|
*/
|
||||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||||
check[0] = WorldRank;
|
check[0] = WorldRank;
|
||||||
check[1] = r;
|
check[1] = r;
|
||||||
@ -404,7 +462,7 @@ void MPIoffloadEngine::CommunicatorInit (MPI_Comm &communicator_world,
|
|||||||
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
uint64_t * check = (uint64_t *) VerticalShmBufs[r];
|
||||||
assert(check[0]== WorldRank);
|
assert(check[0]== WorldRank);
|
||||||
assert(check[1]== r);
|
assert(check[1]== r);
|
||||||
std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
// std::cerr<<"SHM "<<r<<" " <<VerticalShmBufs[r]<<std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -542,6 +600,8 @@ int Slave::Event (void) {
|
|||||||
static int head_last;
|
static int head_last;
|
||||||
static int start_last;
|
static int start_last;
|
||||||
int ierr;
|
int ierr;
|
||||||
|
MPI_Status stat;
|
||||||
|
static int i=0;
|
||||||
|
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
// Try to advance the start pointers
|
// Try to advance the start pointers
|
||||||
@ -550,11 +610,6 @@ int Slave::Event (void) {
|
|||||||
if ( s != state->head ) {
|
if ( s != state->head ) {
|
||||||
switch ( state->Descrs[s].command ) {
|
switch ( state->Descrs[s].command ) {
|
||||||
case COMMAND_ISEND:
|
case COMMAND_ISEND:
|
||||||
/*
|
|
||||||
std::cout<< " Send "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
|
||||||
<< " to " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
|
||||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me " <<universe_rank<< std::endl;
|
|
||||||
*/
|
|
||||||
ierr = MPI_Isend((void *)(state->Descrs[s].buf+base),
|
ierr = MPI_Isend((void *)(state->Descrs[s].buf+base),
|
||||||
state->Descrs[s].bytes,
|
state->Descrs[s].bytes,
|
||||||
MPI_CHAR,
|
MPI_CHAR,
|
||||||
@ -568,11 +623,6 @@ int Slave::Event (void) {
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case COMMAND_IRECV:
|
case COMMAND_IRECV:
|
||||||
/*
|
|
||||||
std::cout<< " Recv "<<s << " ptr "<< state<<" "<< state->Descrs[s].buf<< "["<<state->Descrs[s].bytes<<"]"
|
|
||||||
<< " from " << state->Descrs[s].rank<< " tag" << state->Descrs[s].tag
|
|
||||||
<< " Comm " << MPIoffloadEngine::communicator_universe<< " me "<< universe_rank<< std::endl;
|
|
||||||
*/
|
|
||||||
ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base),
|
ierr=MPI_Irecv((void *)(state->Descrs[s].buf+base),
|
||||||
state->Descrs[s].bytes,
|
state->Descrs[s].bytes,
|
||||||
MPI_CHAR,
|
MPI_CHAR,
|
||||||
@ -588,10 +638,32 @@ int Slave::Event (void) {
|
|||||||
return 1;
|
return 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case COMMAND_SENDRECV:
|
||||||
|
|
||||||
|
// fprintf(stderr,"Sendrecv ->%d %d : <-%d %d \n",state->Descrs[s].dest, state->Descrs[s].xtag+i*10,state->Descrs[s].src, state->Descrs[s].rtag+i*10);
|
||||||
|
|
||||||
|
ierr=MPI_Sendrecv((void *)(state->Descrs[s].xbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].dest, state->Descrs[s].xtag+i*10,
|
||||||
|
(void *)(state->Descrs[s].rbuf+base), state->Descrs[s].bytes, MPI_CHAR, state->Descrs[s].src , state->Descrs[s].rtag+i*10,
|
||||||
|
MPIoffloadEngine::communicator_universe,MPI_STATUS_IGNORE);
|
||||||
|
|
||||||
|
assert(ierr==0);
|
||||||
|
|
||||||
|
// fprintf(stderr,"Sendrecv done %d %d\n",ierr,i);
|
||||||
|
// MPI_Barrier(MPIoffloadEngine::HorizontalComm);
|
||||||
|
// fprintf(stderr,"Barrier\n");
|
||||||
|
i++;
|
||||||
|
|
||||||
|
state->start = PERI_PLUS(s);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
break;
|
||||||
|
|
||||||
case COMMAND_WAITALL:
|
case COMMAND_WAITALL:
|
||||||
|
|
||||||
for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
|
for(int t=state->tail;t!=s; t=PERI_PLUS(t) ){
|
||||||
MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
|
if ( state->Descrs[t].command != COMMAND_SENDRECV ) {
|
||||||
|
MPI_Wait((MPI_Request *)&state->Descrs[t].request,MPI_STATUS_IGNORE);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
s=PERI_PLUS(s);
|
s=PERI_PLUS(s);
|
||||||
state->start = s;
|
state->start = s;
|
||||||
@ -613,6 +685,45 @@ int Slave::Event (void) {
|
|||||||
// External interaction with the queue
|
// External interaction with the queue
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
void Slave::QueueSendRecv(void *xbuf, void *rbuf, int bytes, int xtag, int rtag, MPI_Comm comm,int dest,int src)
|
||||||
|
{
|
||||||
|
int head =state->head;
|
||||||
|
int next = PERI_PLUS(head);
|
||||||
|
|
||||||
|
// Set up descriptor
|
||||||
|
int worldrank;
|
||||||
|
int hashtag;
|
||||||
|
MPI_Comm communicator;
|
||||||
|
MPI_Request request;
|
||||||
|
uint64_t relative;
|
||||||
|
|
||||||
|
relative = (uint64_t)xbuf - base;
|
||||||
|
state->Descrs[head].xbuf = relative;
|
||||||
|
|
||||||
|
relative= (uint64_t)rbuf - base;
|
||||||
|
state->Descrs[head].rbuf = relative;
|
||||||
|
|
||||||
|
state->Descrs[head].bytes = bytes;
|
||||||
|
|
||||||
|
MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,xtag,comm,dest);
|
||||||
|
state->Descrs[head].dest = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
|
||||||
|
state->Descrs[head].xtag = hashtag;
|
||||||
|
|
||||||
|
MPIoffloadEngine::MapCommRankToWorldRank(hashtag,worldrank,rtag,comm,src);
|
||||||
|
state->Descrs[head].src = MPIoffloadEngine::UniverseRanks[worldrank][vertical_rank];
|
||||||
|
state->Descrs[head].rtag = hashtag;
|
||||||
|
|
||||||
|
state->Descrs[head].command= COMMAND_SENDRECV;
|
||||||
|
|
||||||
|
// Block until FIFO has space
|
||||||
|
while( state->tail==next );
|
||||||
|
|
||||||
|
// Msync on weak order architectures
|
||||||
|
|
||||||
|
// Advance pointer
|
||||||
|
state->head = next;
|
||||||
|
|
||||||
|
};
|
||||||
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank)
|
uint64_t Slave::QueueCommand(int command,void *buf, int bytes, int tag, MPI_Comm comm,int commrank)
|
||||||
{
|
{
|
||||||
/////////////////////////////////////////
|
/////////////////////////////////////////
|
||||||
@ -812,19 +923,22 @@ void CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_
|
|||||||
assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
assert( (recv_i >= shm) && (recv_i+bytes <= shm+MAX_MPI_SHM_BYTES) );
|
||||||
assert(from!=_processor);
|
assert(from!=_processor);
|
||||||
assert(dest!=_processor);
|
assert(dest!=_processor);
|
||||||
MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
|
|
||||||
MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
MPIoffloadEngine::QueueMultiplexedSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
|
||||||
|
|
||||||
|
//MPIoffloadEngine::QueueRoundRobinSendRecv(xmit,recv,bytes,_processor,from,communicator,dest,from);
|
||||||
|
|
||||||
|
//MPIoffloadEngine::QueueMultiplexedSend(xmit,bytes,_processor,communicator,dest);
|
||||||
|
//MPIoffloadEngine::QueueMultiplexedRecv(recv,bytes,from,communicator,from);
|
||||||
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
MPIoffloadEngine::WaitAll();
|
MPIoffloadEngine::WaitAll();
|
||||||
|
//this->Barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::StencilBarrier(void)
|
void CartesianCommunicator::StencilBarrier(void) { }
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -87,6 +88,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
@ -97,7 +99,7 @@ void CartesianCommunicator::Barrier(void){}
|
|||||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
|
void CartesianCommunicator::Broadcast(int root,void* data, int bytes) {}
|
||||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
|
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes) { }
|
||||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) { return 0;}
|
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor) { return 0;}
|
||||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor ;}
|
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor){ coor = _processor_coor; }
|
||||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||||
{
|
{
|
||||||
source =0;
|
source =0;
|
||||||
|
@ -25,8 +25,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include "Grid.h"
|
#include <Grid/Grid.h>
|
||||||
#include <mpp/shmem.h>
|
#include <mpp/shmem.h>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
@ -51,7 +52,7 @@ typedef struct HandShake_t {
|
|||||||
} HandShake;
|
} HandShake;
|
||||||
|
|
||||||
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
|
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> make_psync_init(void) {
|
||||||
array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
|
std::array<long,_SHMEM_REDUCE_SYNC_SIZE> ret;
|
||||||
ret.fill(SHMEM_SYNC_VALUE);
|
ret.fill(SHMEM_SYNC_VALUE);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -109,7 +110,7 @@ void CartesianCommunicator::GlobalSum(uint32_t &u){
|
|||||||
|
|
||||||
source = u;
|
source = u;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
shmem_barrier_all(); // necessary?
|
shmem_barrier_all(); // necessary?
|
||||||
u = dest;
|
u = dest;
|
||||||
}
|
}
|
||||||
@ -125,7 +126,7 @@ void CartesianCommunicator::GlobalSum(uint64_t &u){
|
|||||||
|
|
||||||
source = u;
|
source = u;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_longlong_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
shmem_barrier_all(); // necessary?
|
shmem_barrier_all(); // necessary?
|
||||||
u = dest;
|
u = dest;
|
||||||
}
|
}
|
||||||
@ -137,7 +138,8 @@ void CartesianCommunicator::GlobalSum(float &f){
|
|||||||
|
|
||||||
source = f;
|
source = f;
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
f = dest;
|
f = dest;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||||
@ -148,14 +150,16 @@ void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
|||||||
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
static std::array<long,_SHMEM_REDUCE_SYNC_SIZE> psync = psync_init;
|
||||||
|
|
||||||
if ( shmem_addr_accessible(f,_processor) ){
|
if ( shmem_addr_accessible(f,_processor) ){
|
||||||
shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(f,f,N,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int i=0;i<N;i++){
|
for(int i=0;i<N;i++){
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
source = f[i];
|
source = f[i];
|
||||||
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_float_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
f[i] = dest;
|
f[i] = dest;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -168,7 +172,8 @@ void CartesianCommunicator::GlobalSum(double &d)
|
|||||||
|
|
||||||
source = d;
|
source = d;
|
||||||
dest = 0;
|
dest = 0;
|
||||||
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
d = dest;
|
d = dest;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||||
@ -180,14 +185,16 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
|||||||
|
|
||||||
|
|
||||||
if ( shmem_addr_accessible(d,_processor) ){
|
if ( shmem_addr_accessible(d,_processor) ){
|
||||||
shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(d,d,N,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int i=0;i<N;i++){
|
for(int i=0;i<N;i++){
|
||||||
source = d[i];
|
source = d[i];
|
||||||
dest =0.0;
|
dest =0.0;
|
||||||
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync);
|
shmem_double_sum_to_all(&dest,&source,1,0,0,_Nprocessors,llwrk,psync.data());
|
||||||
|
shmem_barrier_all();
|
||||||
d[i] = dest;
|
d[i] = dest;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -282,11 +289,13 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
SHMEM_VET(recv);
|
SHMEM_VET(recv);
|
||||||
// shmem_putmem_nb(recv,xmit,bytes,dest,NULL);
|
// shmem_putmem_nb(recv,xmit,bytes,dest,NULL);
|
||||||
shmem_putmem(recv,xmit,bytes,dest);
|
shmem_putmem(recv,xmit,bytes,dest);
|
||||||
|
|
||||||
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) shmem_barrier_all();
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
// shmem_quiet(); // I'm done
|
// shmem_quiet(); // I'm done
|
||||||
shmem_barrier_all();// He's done too
|
if( CommunicatorPolicy == CommunicatorPolicyConcurrent ) shmem_barrier_all();// He's done too
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::Barrier(void)
|
void CartesianCommunicator::Barrier(void)
|
||||||
{
|
{
|
||||||
@ -301,13 +310,13 @@ void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
|||||||
int words = bytes/4;
|
int words = bytes/4;
|
||||||
|
|
||||||
if ( shmem_addr_accessible(data,_processor) ){
|
if ( shmem_addr_accessible(data,_processor) ){
|
||||||
shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32(data,data,words,root,0,0,shmem_n_pes(),psync.data());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int w=0;w<words;w++){
|
for(int w=0;w<words;w++){
|
||||||
word = array[w];
|
word = array[w];
|
||||||
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
|
||||||
if ( shmem_my_pe() != root ) {
|
if ( shmem_my_pe() != root ) {
|
||||||
array[w] = word;
|
array[w] = word;
|
||||||
}
|
}
|
||||||
@ -325,13 +334,17 @@ void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
|||||||
|
|
||||||
for(int w=0;w<words;w++){
|
for(int w=0;w<words;w++){
|
||||||
word = array[w];
|
word = array[w];
|
||||||
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync);
|
shmem_broadcast32((void *)&word,(void *)&word,1,root,0,0,shmem_n_pes(),psync.data());
|
||||||
if ( shmem_my_pe() != root ) {
|
if ( shmem_my_pe() != root ) {
|
||||||
array[w]= word;
|
array[w]= word;
|
||||||
}
|
}
|
||||||
shmem_barrier_all();
|
shmem_barrier_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int CartesianCommunicator::RankWorld(void){
|
||||||
|
return shmem_my_pe();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
|
/*************************************************************************************
|
||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
@ -53,15 +52,13 @@ Gather_plane_simple (const Lattice<vobj> &rhs,commVector<cobj> &buffer,int dimen
|
|||||||
cbmask = 0x3;
|
cbmask = 0x3;
|
||||||
}
|
}
|
||||||
|
|
||||||
int so = plane*rhs._grid->_ostride[dimension]; // base offset for start of plane
|
int so=plane*rhs._grid->_ostride[dimension]; // base offset for start of plane
|
||||||
|
|
||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
|
||||||
int stride=rhs._grid->_slice_stride[dimension];
|
int stride=rhs._grid->_slice_stride[dimension];
|
||||||
if ( cbmask == 0x3 ) {
|
if ( cbmask == 0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*stride;
|
int o = n*stride;
|
||||||
int bo = n*e2;
|
int bo = n*e2;
|
||||||
@ -74,14 +71,13 @@ PARALLEL_NESTED_LOOP2
|
|||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*stride;
|
int o = n*stride;
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindexTable(o+b);
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
||||||
if ( ocb &cbmask ) {
|
if ( ocb &cbmask ) {
|
||||||
table.push_back(std::pair<int,int> (bo++,o+b));
|
table.push_back(std::pair<int,int> (bo++,o+b));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int i=0;i<table.size();i++){
|
||||||
for(int i=0;i<table.size();i++){
|
|
||||||
buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
|
buffer[off+table[i].first]=compress(rhs._odata[so+table[i].second]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -105,29 +101,30 @@ Gather_plane_extract(const Lattice<vobj> &rhs,std::vector<typename cobj::scalar_
|
|||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
int n1=rhs._grid->_slice_stride[dimension];
|
int n1=rhs._grid->_slice_stride[dimension];
|
||||||
int n2=rhs._grid->_slice_block[dimension];
|
|
||||||
if ( cbmask ==0x3){
|
if ( cbmask ==0x3){
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o = n*n1;
|
int o = n*n1;
|
||||||
int offset = b+n*n2;
|
int offset = b+n*e2;
|
||||||
|
|
||||||
cobj temp =compress(rhs._odata[so+o+b]);
|
cobj temp =compress(rhs._odata[so+o+b]);
|
||||||
|
|
||||||
extract<cobj>(temp,pointers,offset);
|
extract<cobj>(temp,pointers,offset);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
assert(0); //Fixme think this is buggy
|
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||||
|
// Test_cshift_red_black code.
|
||||||
for(int n=0;n<e1;n++){
|
std::cout << " Dense packed buffer WARNING " <<std::endl;
|
||||||
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o=n*rhs._grid->_slice_stride[dimension];
|
|
||||||
|
int o=n*n1;
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);
|
||||||
int offset = b+n*rhs._grid->_slice_block[dimension];
|
int offset = b+n*e2;
|
||||||
|
|
||||||
if ( ocb & cbmask ) {
|
if ( ocb & cbmask ) {
|
||||||
cobj temp =compress(rhs._odata[so+o+b]);
|
cobj temp =compress(rhs._odata[so+o+b]);
|
||||||
@ -171,10 +168,10 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,commVector<vo
|
|||||||
|
|
||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
int stride=rhs._grid->_slice_stride[dimension];
|
||||||
|
|
||||||
if ( cbmask ==0x3 ) {
|
if ( cbmask ==0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o =n*rhs._grid->_slice_stride[dimension];
|
int o =n*rhs._grid->_slice_stride[dimension];
|
||||||
int bo =n*rhs._grid->_slice_block[dimension];
|
int bo =n*rhs._grid->_slice_block[dimension];
|
||||||
@ -182,17 +179,21 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
std::vector<std::pair<int,int> > table;
|
||||||
int bo=0;
|
int bo=0;
|
||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o =n*rhs._grid->_slice_stride[dimension];
|
int o =n*rhs._grid->_slice_stride[dimension];
|
||||||
int bo =n*rhs._grid->_slice_block[dimension];
|
|
||||||
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
int ocb=1<<rhs._grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
||||||
if ( ocb & cbmask ) {
|
if ( ocb & cbmask ) {
|
||||||
rhs._odata[so+o+b]=buffer[bo++];
|
table.push_back(std::pair<int,int> (so+o+b,bo++));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
parallel_for(int i=0;i<table.size();i++){
|
||||||
|
// std::cout << "Rcv"<< table[i].first << " " << table[i].second << " " <<buffer[table[i].second]<<std::endl;
|
||||||
|
rhs._odata[table[i].first]=buffer[table[i].second];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,8 +214,7 @@ PARALLEL_NESTED_LOOP2
|
|||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
|
|
||||||
if(cbmask ==0x3 ) {
|
if(cbmask ==0x3 ) {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*rhs._grid->_slice_stride[dimension];
|
int o = n*rhs._grid->_slice_stride[dimension];
|
||||||
int offset = b+n*rhs._grid->_slice_block[dimension];
|
int offset = b+n*rhs._grid->_slice_block[dimension];
|
||||||
@ -222,7 +222,11 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(0); // think this is buggy FIXME
|
|
||||||
|
// Case of SIMD split AND checker dim cannot currently be hit, except in
|
||||||
|
// Test_cshift_red_black code.
|
||||||
|
// std::cout << "Scatter_plane merge assert(0); think this is buggy FIXME "<< std::endl;// think this is buggy FIXME
|
||||||
|
std::cout<<" Unthreaded warning -- buffer is not densely packed ??"<<std::endl;
|
||||||
for(int n=0;n<e1;n++){
|
for(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
int o = n*rhs._grid->_slice_stride[dimension];
|
int o = n*rhs._grid->_slice_stride[dimension];
|
||||||
@ -254,8 +258,7 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
|
|||||||
int e2=rhs._grid->_slice_block[dimension];
|
int e2=rhs._grid->_slice_block[dimension];
|
||||||
int stride = rhs._grid->_slice_stride[dimension];
|
int stride = rhs._grid->_slice_stride[dimension];
|
||||||
if(cbmask == 0x3 ){
|
if(cbmask == 0x3 ){
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride+b;
|
int o =n*stride+b;
|
||||||
@ -264,8 +267,7 @@ PARALLEL_NESTED_LOOP2
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PARALLEL_NESTED_LOOP2
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int n=0;n<e1;n++){
|
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride+b;
|
int o =n*stride+b;
|
||||||
@ -295,8 +297,8 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
|
|||||||
int e1=rhs._grid->_slice_nblock[dimension];
|
int e1=rhs._grid->_slice_nblock[dimension];
|
||||||
int e2=rhs._grid->_slice_block [dimension];
|
int e2=rhs._grid->_slice_block [dimension];
|
||||||
int stride = rhs._grid->_slice_stride[dimension];
|
int stride = rhs._grid->_slice_stride[dimension];
|
||||||
PARALLEL_NESTED_LOOP2
|
|
||||||
for(int n=0;n<e1;n++){
|
parallel_for_nest2(int n=0;n<e1;n++){
|
||||||
for(int b=0;b<e2;b++){
|
for(int b=0;b<e2;b++){
|
||||||
|
|
||||||
int o =n*stride;
|
int o =n*stride;
|
||||||
@ -338,8 +340,8 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
// Map to always positive shift modulo global full dimension.
|
// Map to always positive shift modulo global full dimension.
|
||||||
shift = (shift+fd)%fd;
|
shift = (shift+fd)%fd;
|
||||||
|
|
||||||
ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
|
|
||||||
// the permute type
|
// the permute type
|
||||||
|
ret.checkerboard = grid->CheckerBoardDestination(rhs.checkerboard,shift,dimension);
|
||||||
int permute_dim =grid->PermuteDim(dimension);
|
int permute_dim =grid->PermuteDim(dimension);
|
||||||
int permute_type=grid->PermuteType(dimension);
|
int permute_type=grid->PermuteType(dimension);
|
||||||
int permute_type_dist;
|
int permute_type_dist;
|
||||||
@ -348,7 +350,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
|
|
||||||
int o = 0;
|
int o = 0;
|
||||||
int bo = x * grid->_ostride[dimension];
|
int bo = x * grid->_ostride[dimension];
|
||||||
|
|
||||||
int cb= (cbmask==0x2)? Odd : Even;
|
int cb= (cbmask==0x2)? Odd : Even;
|
||||||
|
|
||||||
int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
|
int sshift = grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,cb);
|
||||||
@ -361,9 +362,23 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
// wrap is whether sshift > rd.
|
// wrap is whether sshift > rd.
|
||||||
// num is sshift mod rd.
|
// num is sshift mod rd.
|
||||||
//
|
//
|
||||||
|
// shift 7
|
||||||
|
//
|
||||||
|
// XoXo YcYc
|
||||||
|
// oXoX cYcY
|
||||||
|
// XoXo YcYc
|
||||||
|
// oXoX cYcY
|
||||||
|
//
|
||||||
|
// sshift --
|
||||||
|
//
|
||||||
|
// XX YY ; 3
|
||||||
|
// XX YY ; 0
|
||||||
|
// XX YY ; 3
|
||||||
|
// XX YY ; 0
|
||||||
|
//
|
||||||
int permute_slice=0;
|
int permute_slice=0;
|
||||||
if(permute_dim){
|
if(permute_dim){
|
||||||
int wrap = sshift/rd;
|
int wrap = sshift/rd; wrap=wrap % ly;
|
||||||
int num = sshift%rd;
|
int num = sshift%rd;
|
||||||
|
|
||||||
if ( x< rd-num ) permute_slice=wrap;
|
if ( x< rd-num ) permute_slice=wrap;
|
||||||
@ -375,7 +390,6 @@ template<class vobj> Lattice<vobj> Cshift_local(Lattice<vobj> &ret,const Lattice
|
|||||||
} else {
|
} else {
|
||||||
permute_type_dist = permute_type;
|
permute_type_dist = permute_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
|
if ( permute_slice ) Copy_plane_permute(ret,rhs,dimension,x,sx,cbmask,permute_type_dist);
|
||||||
|
@ -74,7 +74,6 @@ template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &r
|
|||||||
sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
|
sshift[1] = rhs._grid->CheckerBoardShiftForCB(rhs.checkerboard,dimension,shift,Odd);
|
||||||
|
|
||||||
// std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
|
// std::cout << "Cshift_comms dim "<<dimension<<"cb "<<rhs.checkerboard<<"shift "<<shift<<" sshift " << sshift[0]<<" "<<sshift[1]<<std::endl;
|
||||||
|
|
||||||
if ( sshift[0] == sshift[1] ) {
|
if ( sshift[0] == sshift[1] ) {
|
||||||
// std::cout << "Single pass Cshift_comms" <<std::endl;
|
// std::cout << "Single pass Cshift_comms" <<std::endl;
|
||||||
Cshift_comms(ret,rhs,dimension,shift,0x3);
|
Cshift_comms(ret,rhs,dimension,shift,0x3);
|
||||||
@ -154,10 +153,14 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
(void *)&recv_buf[0],
|
(void *)&recv_buf[0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
grid->Barrier();
|
||||||
// for(int i=0;i<words;i++){
|
/*
|
||||||
// std::cout << "SendRecv ["<<i<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << " 0x" << cbmask<<std::endl;
|
for(int i=0;i<send_buf.size();i++){
|
||||||
// }
|
assert(recv_buf.size()==buffer_size);
|
||||||
|
assert(send_buf.size()==buffer_size);
|
||||||
|
std::cout << "SendRecv_Cshift_comms ["<<i<<" "<< dimension<<"] snd "<<send_buf[i]<<" rcv " << recv_buf[i] << " 0x" << cbmask<<std::endl;
|
||||||
|
}
|
||||||
|
*/
|
||||||
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -243,7 +246,14 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
(void *)&recv_buf_extract[i][0],
|
(void *)&recv_buf_extract[i][0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
/*
|
||||||
|
for(int w=0;w<recv_buf_extract[i].size();w++){
|
||||||
|
assert(recv_buf_extract[i].size()==buffer_size);
|
||||||
|
assert(send_buf_extract[i].size()==buffer_size);
|
||||||
|
std::cout << "SendRecv_Cshift_comms ["<<w<<" "<< dimension<<"] recv "<<recv_buf_extract[i][w]<<" send " << send_buf_extract[nbr_lane][w] << cbmask<<std::endl;
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
grid->Barrier();
|
||||||
rpointers[i] = &recv_buf_extract[i][0];
|
rpointers[i] = &recv_buf_extract[i][0];
|
||||||
} else {
|
} else {
|
||||||
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
||||||
|
@ -39,8 +39,7 @@ namespace Grid {
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
mult(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
mac(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -73,8 +71,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
sub(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -89,8 +86,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
conformable(lhs,rhs);
|
conformable(lhs,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
add(&tmp,&lhs._odata[ss],&rhs._odata[ss]);
|
||||||
@ -108,8 +104,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void mult(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(lhs,ret);
|
conformable(lhs,ret);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs._odata[ss],&rhs);
|
mult(&tmp,&lhs._odata[ss],&rhs);
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -120,8 +115,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void mac(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,lhs);
|
conformable(ret,lhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs._odata[ss],&rhs);
|
mac(&tmp,&lhs._odata[ss],&rhs);
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -132,8 +126,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void sub(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(ret,lhs);
|
conformable(ret,lhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs._odata[ss],&rhs);
|
sub(&tmp,&lhs._odata[ss],&rhs);
|
||||||
@ -147,8 +140,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
void add(Lattice<obj1> &ret,const Lattice<obj2> &lhs,const obj3 &rhs){
|
||||||
ret.checkerboard = lhs.checkerboard;
|
ret.checkerboard = lhs.checkerboard;
|
||||||
conformable(lhs,ret);
|
conformable(lhs,ret);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs._odata[ss],&rhs);
|
add(&tmp,&lhs._odata[ss],&rhs);
|
||||||
@ -166,8 +158,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void mult(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mult(&tmp,&lhs,&rhs._odata[ss]);
|
mult(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -182,8 +173,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void mac(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
mac(&tmp,&lhs,&rhs._odata[ss]);
|
mac(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -198,8 +188,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void sub(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
sub(&tmp,&lhs,&rhs._odata[ss]);
|
sub(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -213,8 +202,7 @@ PARALLEL_FOR_LOOP
|
|||||||
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
void add(Lattice<obj1> &ret,const obj2 &lhs,const Lattice<obj3> &rhs){
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
obj1 tmp;
|
obj1 tmp;
|
||||||
add(&tmp,&lhs,&rhs._odata[ss]);
|
add(&tmp,&lhs,&rhs._odata[ss]);
|
||||||
@ -230,8 +218,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = x.checkerboard;
|
ret.checkerboard = x.checkerboard;
|
||||||
conformable(ret,x);
|
conformable(ret,x);
|
||||||
conformable(x,y);
|
conformable(x,y);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<x._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<x._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = a*x._odata[ss]+y._odata[ss];
|
vobj tmp = a*x._odata[ss]+y._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
@ -245,8 +232,7 @@ PARALLEL_FOR_LOOP
|
|||||||
ret.checkerboard = x.checkerboard;
|
ret.checkerboard = x.checkerboard;
|
||||||
conformable(ret,x);
|
conformable(ret,x);
|
||||||
conformable(x,y);
|
conformable(x,y);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<x._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<x._grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = a*x._odata[ss]+b*y._odata[ss];
|
vobj tmp = a*x._odata[ss]+b*y._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
|
@ -121,8 +121,7 @@ public:
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -144,8 +143,7 @@ PARALLEL_FOR_LOOP
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -167,8 +165,7 @@ PARALLEL_FOR_LOOP
|
|||||||
assert( (cb==Odd) || (cb==Even));
|
assert( (cb==Odd) || (cb==Even));
|
||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
//vobj tmp = eval(ss,expr);
|
//vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,eval(ss,expr));
|
vstream(_odata[ss] ,eval(ss,expr));
|
||||||
@ -191,8 +188,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -213,8 +209,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
#ifdef STREAMING_STORES
|
#ifdef STREAMING_STORES
|
||||||
vobj tmp = eval(ss,expr);
|
vobj tmp = eval(ss,expr);
|
||||||
vstream(_odata[ss] ,tmp);
|
vstream(_odata[ss] ,tmp);
|
||||||
@ -235,8 +230,7 @@ PARALLEL_FOR_LOOP
|
|||||||
checkerboard=cb;
|
checkerboard=cb;
|
||||||
|
|
||||||
_odata.resize(_grid->oSites());
|
_odata.resize(_grid->oSites());
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
vstream(_odata[ss] ,eval(ss,expr));
|
vstream(_odata[ss] ,eval(ss,expr));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -258,8 +252,7 @@ PARALLEL_FOR_LOOP
|
|||||||
_grid = r._grid;
|
_grid = r._grid;
|
||||||
checkerboard = r.checkerboard;
|
checkerboard = r.checkerboard;
|
||||||
_odata.resize(_grid->oSites());// essential
|
_odata.resize(_grid->oSites());// essential
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
_odata[ss]=r._odata[ss];
|
_odata[ss]=r._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -269,8 +262,7 @@ PARALLEL_FOR_LOOP
|
|||||||
virtual ~Lattice(void) = default;
|
virtual ~Lattice(void) = default;
|
||||||
|
|
||||||
template<class sobj> strong_inline Lattice<vobj> & operator = (const sobj & r){
|
template<class sobj> strong_inline Lattice<vobj> & operator = (const sobj & r){
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
this->_odata[ss]=r;
|
this->_odata[ss]=r;
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
@ -279,8 +271,7 @@ PARALLEL_FOR_LOOP
|
|||||||
this->checkerboard = r.checkerboard;
|
this->checkerboard = r.checkerboard;
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<_grid->oSites();ss++){
|
||||||
for(int ss=0;ss<_grid->oSites();ss++){
|
|
||||||
this->_odata[ss]=r._odata[ss];
|
this->_odata[ss]=r._odata[ss];
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -45,90 +45,87 @@ namespace Grid {
|
|||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
template<class vfunctor,class lobj,class robj>
|
template<class vfunctor,class lobj,class robj>
|
||||||
inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
|
inline Lattice<vInteger> LLComparison(vfunctor op,const Lattice<lobj> &lhs,const Lattice<robj> &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(rhs._grid);
|
Lattice<vInteger> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs._odata[ss]);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// compare lattice to scalar
|
// compare lattice to scalar
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
template<class vfunctor,class lobj,class robj>
|
template<class vfunctor,class lobj,class robj>
|
||||||
inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
|
inline Lattice<vInteger> LSComparison(vfunctor op,const Lattice<lobj> &lhs,const robj &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(lhs._grid);
|
Lattice<vInteger> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// compare scalar to lattice
|
// compare scalar to lattice
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
template<class vfunctor,class lobj,class robj>
|
template<class vfunctor,class lobj,class robj>
|
||||||
inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
|
inline Lattice<vInteger> SLComparison(vfunctor op,const lobj &lhs,const Lattice<robj> &rhs)
|
||||||
{
|
{
|
||||||
Lattice<vInteger> ret(rhs._grid);
|
Lattice<vInteger> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
||||||
ret._odata[ss]=op(lhs._odata[ss],rhs);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// Map to functors
|
// Map to functors
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
// Less than
|
// Less than
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(vlt<lobj,robj>(),lhs,rhs);
|
return LLComparison(vlt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const robj & rhs) {
|
inline Lattice<vInteger> operator < (const Lattice<lobj> & lhs, const robj & rhs) {
|
||||||
return LSComparison(vlt<lobj,robj>(),lhs,rhs);
|
return LSComparison(vlt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator < (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator < (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vlt<lobj,robj>(),lhs,rhs);
|
return SLComparison(vlt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Less than equal
|
// Less than equal
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(vle<lobj,robj>(),lhs,rhs);
|
return LLComparison(vle<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const robj & rhs) {
|
inline Lattice<vInteger> operator <= (const Lattice<lobj> & lhs, const robj & rhs) {
|
||||||
return LSComparison(vle<lobj,robj>(),lhs,rhs);
|
return LSComparison(vle<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator <= (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator <= (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vle<lobj,robj>(),lhs,rhs);
|
return SLComparison(vle<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Greater than
|
// Greater than
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(vgt<lobj,robj>(),lhs,rhs);
|
return LLComparison(vgt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const robj & rhs) {
|
inline Lattice<vInteger> operator > (const Lattice<lobj> & lhs, const robj & rhs) {
|
||||||
return LSComparison(vgt<lobj,robj>(),lhs,rhs);
|
return LSComparison(vgt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator > (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator > (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vgt<lobj,robj>(),lhs,rhs);
|
return SLComparison(vgt<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Greater than equal
|
// Greater than equal
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator >= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator >= (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(vge<lobj,robj>(),lhs,rhs);
|
return LLComparison(vge<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
@ -136,38 +133,37 @@ PARALLEL_FOR_LOOP
|
|||||||
return LSComparison(vge<lobj,robj>(),lhs,rhs);
|
return LSComparison(vge<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator >= (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator >= (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vge<lobj,robj>(),lhs,rhs);
|
return SLComparison(vge<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// equal
|
// equal
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(veq<lobj,robj>(),lhs,rhs);
|
return LLComparison(veq<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const robj & rhs) {
|
inline Lattice<vInteger> operator == (const Lattice<lobj> & lhs, const robj & rhs) {
|
||||||
return LSComparison(veq<lobj,robj>(),lhs,rhs);
|
return LSComparison(veq<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator == (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator == (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(veq<lobj,robj>(),lhs,rhs);
|
return SLComparison(veq<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// not equal
|
// not equal
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const Lattice<robj> & rhs) {
|
||||||
return LLComparison(vne<lobj,robj>(),lhs,rhs);
|
return LLComparison(vne<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const robj & rhs) {
|
inline Lattice<vInteger> operator != (const Lattice<lobj> & lhs, const robj & rhs) {
|
||||||
return LSComparison(vne<lobj,robj>(),lhs,rhs);
|
return LSComparison(vne<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
template<class lobj,class robj>
|
template<class lobj,class robj>
|
||||||
inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
|
inline Lattice<vInteger> operator != (const lobj & lhs, const Lattice<robj> & rhs) {
|
||||||
return SLComparison(vne<lobj,robj>(),lhs,rhs);
|
return SLComparison(vne<lobj,robj>(),lhs,rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -34,47 +34,42 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
// Non site, reduced locally reduced routines
|
// Non site, reduced locally reduced routines
|
||||||
/////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////
|
||||||
|
|
||||||
// localNorm2,
|
// localNorm2,
|
||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
|
inline auto localNorm2 (const Lattice<vobj> &rhs)-> Lattice<typename vobj::tensor_reduced>
|
||||||
{
|
{
|
||||||
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
|
||||||
ret._odata[ss]=innerProduct(rhs._odata[ss],rhs._odata[ss]);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// localInnerProduct
|
// localInnerProduct
|
||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
|
inline auto localInnerProduct (const Lattice<vobj> &lhs,const Lattice<vobj> &rhs) -> Lattice<typename vobj::tensor_reduced>
|
||||||
{
|
{
|
||||||
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
Lattice<typename vobj::tensor_reduced> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
|
ret._odata[ss]=innerProduct(lhs._odata[ss],rhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
// outerProduct Scalar x Scalar -> Scalar
|
// outerProduct Scalar x Scalar -> Scalar
|
||||||
// Vector x Vector -> Matrix
|
// Vector x Vector -> Matrix
|
||||||
template<class ll,class rr>
|
template<class ll,class rr>
|
||||||
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
|
inline auto outerProduct (const Lattice<ll> &lhs,const Lattice<rr> &rhs) -> Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
|
Lattice<decltype(outerProduct(lhs._odata[0],rhs._odata[0]))> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
|
||||||
ret._odata[ss]=outerProduct(lhs._odata[ss],rhs._odata[ss]);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -37,8 +37,7 @@ namespace Grid {
|
|||||||
inline Lattice<vobj> operator -(const Lattice<vobj> &r)
|
inline Lattice<vobj> operator -(const Lattice<vobj> &r)
|
||||||
{
|
{
|
||||||
Lattice<vobj> ret(r._grid);
|
Lattice<vobj> ret(r._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<r._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<r._grid->oSites();ss++){
|
|
||||||
vstream(ret._odata[ss], -r._odata[ss]);
|
vstream(ret._odata[ss], -r._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -74,8 +73,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
|
inline auto operator * (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs*rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs*rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss];
|
decltype(lhs*rhs._odata[0]) tmp=lhs*rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs*rhs._odata[ss];
|
// ret._odata[ss]=lhs*rhs._odata[ss];
|
||||||
@ -86,8 +84,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
|
inline auto operator + (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs+rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs+rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];
|
decltype(lhs+rhs._odata[0]) tmp =lhs-rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs+rhs._odata[ss];
|
// ret._odata[ss]=lhs+rhs._odata[ss];
|
||||||
@ -98,11 +95,9 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
|
inline auto operator - (const left &lhs,const Lattice<right> &rhs) -> Lattice<decltype(lhs-rhs._odata[0])>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
|
Lattice<decltype(lhs-rhs._odata[0])> ret(rhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];
|
decltype(lhs-rhs._odata[0]) tmp=lhs-rhs._odata[ss];
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs-rhs._odata[ss];
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -110,8 +105,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
|
inline auto operator * (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]*rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]*rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
|
decltype(lhs._odata[0]*rhs) tmp =lhs._odata[ss]*rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]*rhs;
|
// ret._odata[ss]=lhs._odata[ss]*rhs;
|
||||||
@ -122,8 +116,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
|
inline auto operator + (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]+rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]+rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs;
|
decltype(lhs._odata[0]+rhs) tmp=lhs._odata[ss]+rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]+rhs;
|
// ret._odata[ss]=lhs._odata[ss]+rhs;
|
||||||
@ -134,15 +127,12 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
|
inline auto operator - (const Lattice<left> &lhs,const right &rhs) -> Lattice<decltype(lhs._odata[0]-rhs)>
|
||||||
{
|
{
|
||||||
Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
|
Lattice<decltype(lhs._odata[0]-rhs)> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites(); ss++){
|
|
||||||
decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
|
decltype(lhs._odata[0]-rhs) tmp=lhs._odata[ss]-rhs;
|
||||||
vstream(ret._odata[ss],tmp);
|
vstream(ret._odata[ss],tmp);
|
||||||
// ret._odata[ss]=lhs._odata[ss]-rhs;
|
// ret._odata[ss]=lhs._odata[ss]-rhs;
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -44,22 +44,20 @@ namespace Grid {
|
|||||||
{
|
{
|
||||||
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
|
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i))> ret(lhs._grid);
|
||||||
ret.checkerboard=lhs.checkerboard;
|
ret.checkerboard=lhs.checkerboard;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
|
||||||
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
|
||||||
};
|
};
|
||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))>
|
auto PeekIndex(const Lattice<vobj> &lhs,int i,int j) -> Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
|
Lattice<decltype(peekIndex<Index>(lhs._odata[0],i,j))> ret(lhs._grid);
|
||||||
ret.checkerboard=lhs.checkerboard;
|
ret.checkerboard=lhs.checkerboard;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
|
||||||
ret._odata[ss] = peekIndex<Index>(lhs._odata[ss],i,j);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -68,25 +66,23 @@ PARALLEL_FOR_LOOP
|
|||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
|
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0))> & rhs,int i)
|
||||||
{
|
{
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
|
||||||
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i);
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
|
void PokeIndex(Lattice<vobj> &lhs,const Lattice<decltype(peekIndex<Index>(lhs._odata[0],0,0))> & rhs,int i,int j)
|
||||||
{
|
{
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
|
||||||
pokeIndex<Index>(lhs._odata[ss],rhs._odata[ss],i,j);
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
// Poke a scalar object into the SIMD array
|
// Poke a scalar object into the SIMD array
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
template<class vobj,class sobj>
|
template<class vobj,class sobj>
|
||||||
void pokeSite(const sobj &s,Lattice<vobj> &l,std::vector<int> &site){
|
void pokeSite(const sobj &s,Lattice<vobj> &l,const std::vector<int> &site){
|
||||||
|
|
||||||
GridBase *grid=l._grid;
|
GridBase *grid=l._grid;
|
||||||
|
|
||||||
@ -120,7 +116,7 @@ PARALLEL_FOR_LOOP
|
|||||||
// Peek a scalar object from the SIMD array
|
// Peek a scalar object from the SIMD array
|
||||||
//////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////
|
||||||
template<class vobj,class sobj>
|
template<class vobj,class sobj>
|
||||||
void peekSite(sobj &s,const Lattice<vobj> &l,std::vector<int> &site){
|
void peekSite(sobj &s,const Lattice<vobj> &l,const std::vector<int> &site){
|
||||||
|
|
||||||
GridBase *grid=l._grid;
|
GridBase *grid=l._grid;
|
||||||
|
|
||||||
@ -131,9 +127,6 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
assert( l.checkerboard == l._grid->CheckerBoard(site));
|
assert( l.checkerboard == l._grid->CheckerBoard(site));
|
||||||
|
|
||||||
// FIXME
|
|
||||||
// assert( sizeof(sobj)*Nsimd == sizeof(vobj));
|
|
||||||
|
|
||||||
int rank,odx,idx;
|
int rank,odx,idx;
|
||||||
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
|
grid->GlobalCoorToRankIndex(rank,odx,idx,site);
|
||||||
|
|
||||||
|
@ -40,8 +40,7 @@ namespace Grid {
|
|||||||
|
|
||||||
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
template<class vobj> inline Lattice<vobj> adj(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = adj(lhs._odata[ss]);
|
ret._odata[ss] = adj(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -49,13 +48,10 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
template<class vobj> inline Lattice<vobj> conjugate(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
ret._odata[ss] = conjugate(lhs._odata[ss]);
|
||||||
ret._odata[ss] = conjugate(lhs._odata[ss]);
|
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -57,8 +57,7 @@ namespace Grid {
|
|||||||
sumarray[i]=zero;
|
sumarray[i]=zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
|
||||||
for(int thr=0;thr<grid->SumArraySize();thr++){
|
|
||||||
int nwork, mywork, myoff;
|
int nwork, mywork, myoff;
|
||||||
GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
|
GridThread::GetWork(left._grid->oSites(),thr,mywork,myoff);
|
||||||
|
|
||||||
@ -68,7 +67,7 @@ PARALLEL_FOR_LOOP
|
|||||||
}
|
}
|
||||||
sumarray[thr]=TensorRemove(vnrm) ;
|
sumarray[thr]=TensorRemove(vnrm) ;
|
||||||
}
|
}
|
||||||
|
|
||||||
vector_type vvnrm; vvnrm=zero; // sum across threads
|
vector_type vvnrm; vvnrm=zero; // sum across threads
|
||||||
for(int i=0;i<grid->SumArraySize();i++){
|
for(int i=0;i<grid->SumArraySize();i++){
|
||||||
vvnrm = vvnrm+sumarray[i];
|
vvnrm = vvnrm+sumarray[i];
|
||||||
@ -114,18 +113,17 @@ PARALLEL_FOR_LOOP
|
|||||||
sumarray[i]=zero;
|
sumarray[i]=zero;
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int thr=0;thr<grid->SumArraySize();thr++){
|
||||||
for(int thr=0;thr<grid->SumArraySize();thr++){
|
|
||||||
int nwork, mywork, myoff;
|
int nwork, mywork, myoff;
|
||||||
GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
|
GridThread::GetWork(grid->oSites(),thr,mywork,myoff);
|
||||||
|
|
||||||
vobj vvsum=zero;
|
vobj vvsum=zero;
|
||||||
for(int ss=myoff;ss<mywork+myoff; ss++){
|
for(int ss=myoff;ss<mywork+myoff; ss++){
|
||||||
vvsum = vvsum + arg._odata[ss];
|
vvsum = vvsum + arg._odata[ss];
|
||||||
}
|
}
|
||||||
sumarray[thr]=vvsum;
|
sumarray[thr]=vvsum;
|
||||||
}
|
}
|
||||||
|
|
||||||
vobj vsum=zero; // sum across threads
|
vobj vsum=zero; // sum across threads
|
||||||
for(int i=0;i<grid->SumArraySize();i++){
|
for(int i=0;i<grid->SumArraySize();i++){
|
||||||
vsum = vsum+sumarray[i];
|
vsum = vsum+sumarray[i];
|
||||||
|
@ -30,9 +30,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#define GRID_LATTICE_RNG_H
|
#define GRID_LATTICE_RNG_H
|
||||||
|
|
||||||
#include <random>
|
#include <random>
|
||||||
|
#include <Grid/sitmo_rng/sitmo_prng_engine.hpp>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
//http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-90Ar1.pdf ?
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////
|
||||||
// Allow the RNG state to be less dense than the fine grid
|
// Allow the RNG state to be less dense than the fine grid
|
||||||
@ -68,6 +70,7 @@ namespace Grid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrap seed_seq to give common interface with random_device
|
// Wrap seed_seq to give common interface with random_device
|
||||||
|
// Should rather wrap random_device and have a generate
|
||||||
class fixedSeed {
|
class fixedSeed {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -75,20 +78,31 @@ namespace Grid {
|
|||||||
|
|
||||||
std::seed_seq src;
|
std::seed_seq src;
|
||||||
|
|
||||||
fixedSeed(const std::vector<int> &seeds) : src(seeds.begin(),seeds.end()) {};
|
template<class int_type> fixedSeed(const std::vector<int_type> &seeds) : src(seeds.begin(),seeds.end()) {};
|
||||||
|
|
||||||
result_type operator () (void){
|
|
||||||
|
|
||||||
std::vector<result_type> list(1);
|
|
||||||
|
|
||||||
src.generate(list.begin(),list.end());
|
|
||||||
|
|
||||||
return list[0];
|
|
||||||
|
|
||||||
|
template< class RandomIt > void generate( RandomIt begin, RandomIt end ) {
|
||||||
|
src.generate(begin,end);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class deviceSeed {
|
||||||
|
public:
|
||||||
|
|
||||||
|
std::random_device rd;
|
||||||
|
|
||||||
|
typedef std::random_device::result_type result_type;
|
||||||
|
|
||||||
|
deviceSeed(void) : rd(){};
|
||||||
|
|
||||||
|
template< class RandomIt > void generate( RandomIt begin, RandomIt end ) {
|
||||||
|
for(RandomIt it=begin; it!=end;it++){
|
||||||
|
*it = rd();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// real scalars are one component
|
// real scalars are one component
|
||||||
template<class scalar,class distribution,class generator> void fillScalar(scalar &s,distribution &dist,generator & gen)
|
template<class scalar,class distribution,class generator> void fillScalar(scalar &s,distribution &dist,generator & gen)
|
||||||
{
|
{
|
||||||
@ -114,15 +128,19 @@ namespace Grid {
|
|||||||
typedef uint64_t RngStateType;
|
typedef uint64_t RngStateType;
|
||||||
typedef std::ranlux48 RngEngine;
|
typedef std::ranlux48 RngEngine;
|
||||||
static const int RngStateCount = 15;
|
static const int RngStateCount = 15;
|
||||||
#else
|
#elif RNG_MT19937
|
||||||
typedef std::mt19937 RngEngine;
|
typedef std::mt19937 RngEngine;
|
||||||
typedef uint32_t RngStateType;
|
typedef uint32_t RngStateType;
|
||||||
static const int RngStateCount = std::mt19937::state_size;
|
static const int RngStateCount = std::mt19937::state_size;
|
||||||
|
#elif RNG_SITMO
|
||||||
|
typedef sitmo::prng_engine RngEngine;
|
||||||
|
typedef uint64_t RngStateType;
|
||||||
|
static const int RngStateCount = 4;
|
||||||
#endif
|
#endif
|
||||||
std::vector<RngEngine> _generators;
|
std::vector<RngEngine> _generators;
|
||||||
std::vector<std::uniform_real_distribution<RealD>> _uniform;
|
std::vector<std::uniform_real_distribution<RealD>> _uniform;
|
||||||
std::vector<std::normal_distribution<RealD>> _gaussian;
|
std::vector<std::normal_distribution<RealD>> _gaussian;
|
||||||
std::vector<std::discrete_distribution<int32_t>> _bernoulli;
|
std::vector<std::discrete_distribution<int32_t>> _bernoulli;
|
||||||
|
|
||||||
void GetState(std::vector<RngStateType> & saved,int gen) {
|
void GetState(std::vector<RngStateType> & saved,int gen) {
|
||||||
saved.resize(RngStateCount);
|
saved.resize(RngStateCount);
|
||||||
@ -150,13 +168,6 @@ namespace Grid {
|
|||||||
// FIXME ... do we require lockstep draws of randoms
|
// FIXME ... do we require lockstep draws of randoms
|
||||||
// from all nodes keeping seeds consistent.
|
// from all nodes keeping seeds consistent.
|
||||||
// place a barrier/broadcast in the fill routine
|
// place a barrier/broadcast in the fill routine
|
||||||
template<class source> void Seed(source &src)
|
|
||||||
{
|
|
||||||
typename source::result_type init = src();
|
|
||||||
CartesianCommunicator::BroadcastWorld(0,(void *)&init,sizeof(init));
|
|
||||||
_generators[0] = RngEngine(init);
|
|
||||||
_seeded=1;
|
|
||||||
}
|
|
||||||
|
|
||||||
GridSerialRNG() : GridRNGbase() {
|
GridSerialRNG() : GridRNGbase() {
|
||||||
_generators.resize(1);
|
_generators.resize(1);
|
||||||
@ -239,12 +250,17 @@ namespace Grid {
|
|||||||
CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l));
|
CartesianCommunicator::BroadcastWorld(0,(void *)&l,sizeof(l));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<class source> void Seed(source &src)
|
||||||
|
{
|
||||||
|
_generators[0] = RngEngine(src);
|
||||||
|
_seeded=1;
|
||||||
|
}
|
||||||
void SeedRandomDevice(void){
|
void SeedRandomDevice(void){
|
||||||
std::random_device rd;
|
deviceSeed src;
|
||||||
Seed(rd);
|
Seed(src);
|
||||||
}
|
}
|
||||||
void SeedFixedIntegers(const std::vector<int> &seeds){
|
void SeedFixedIntegers(const std::vector<int> &seeds){
|
||||||
|
CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
|
||||||
fixedSeed src(seeds);
|
fixedSeed src(seeds);
|
||||||
Seed(src);
|
Seed(src);
|
||||||
}
|
}
|
||||||
@ -273,46 +289,6 @@ namespace Grid {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// This loop could be made faster to avoid the Ahmdahl by
|
|
||||||
// i) seed generators on each timeslice, for x=y=z=0;
|
|
||||||
// ii) seed generators on each z for x=y=0
|
|
||||||
// iii)seed generators on each y,z for x=0
|
|
||||||
// iv) seed generators on each y,z,x
|
|
||||||
// made possible by physical indexing.
|
|
||||||
template<class source> void Seed(source &src)
|
|
||||||
{
|
|
||||||
std::vector<int> gcoor;
|
|
||||||
|
|
||||||
int gsites = _grid->_gsites;
|
|
||||||
|
|
||||||
typename source::result_type init = src();
|
|
||||||
RngEngine pseeder(init);
|
|
||||||
std::uniform_int_distribution<uint64_t> ui;
|
|
||||||
|
|
||||||
for(int gidx=0;gidx<gsites;gidx++){
|
|
||||||
|
|
||||||
int rank,o_idx,i_idx;
|
|
||||||
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
|
||||||
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
|
||||||
|
|
||||||
int l_idx=generator_idx(o_idx,i_idx);
|
|
||||||
|
|
||||||
const int num_rand_seed=16;
|
|
||||||
std::vector<int> site_seeds(num_rand_seed);
|
|
||||||
for(int i=0;i<site_seeds.size();i++){
|
|
||||||
site_seeds[i]= ui(pseeder);
|
|
||||||
}
|
|
||||||
|
|
||||||
_grid->Broadcast(0,(void *)&site_seeds[0],sizeof(int)*site_seeds.size());
|
|
||||||
|
|
||||||
if( rank == _grid->ThisRank() ){
|
|
||||||
fixedSeed ssrc(site_seeds);
|
|
||||||
typename source::result_type sinit = ssrc();
|
|
||||||
_generators[l_idx] = RngEngine(sinit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_seeded=1;
|
|
||||||
}
|
|
||||||
|
|
||||||
//FIXME implement generic IO and create state save/restore
|
//FIXME implement generic IO and create state save/restore
|
||||||
//void SaveState(const std::string<char> &file);
|
//void SaveState(const std::string<char> &file);
|
||||||
@ -331,8 +307,7 @@ namespace Grid {
|
|||||||
int words=sizeof(scalar_object)/sizeof(scalar_type);
|
int words=sizeof(scalar_object)/sizeof(scalar_type);
|
||||||
|
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<osites;ss++){
|
||||||
for(int ss=0;ss<osites;ss++){
|
|
||||||
|
|
||||||
std::vector<scalar_object> buf(Nsimd);
|
std::vector<scalar_object> buf(Nsimd);
|
||||||
for(int m=0;m<multiplicity;m++) {// Draw from same generator multiplicity times
|
for(int m=0;m<multiplicity;m++) {// Draw from same generator multiplicity times
|
||||||
@ -354,11 +329,75 @@ PARALLEL_FOR_LOOP
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// This loop could be made faster to avoid the Ahmdahl by
|
||||||
|
// i) seed generators on each timeslice, for x=y=z=0;
|
||||||
|
// ii) seed generators on each z for x=y=0
|
||||||
|
// iii)seed generators on each y,z for x=0
|
||||||
|
// iv) seed generators on each y,z,x
|
||||||
|
// made possible by physical indexing.
|
||||||
|
template<class source> void Seed(source &src)
|
||||||
|
{
|
||||||
|
|
||||||
|
typedef typename source::result_type seed_t;
|
||||||
|
std::uniform_int_distribution<seed_t> uid;
|
||||||
|
|
||||||
|
int numseed=4;
|
||||||
|
int gsites = _grid->_gsites;
|
||||||
|
std::vector<seed_t> site_init(numseed);
|
||||||
|
std::vector<int> gcoor;
|
||||||
|
|
||||||
|
|
||||||
|
// Master RngEngine
|
||||||
|
std::vector<seed_t> master_init(numseed); src.generate(master_init.begin(),master_init.end());
|
||||||
|
_grid->Broadcast(0,(void *)&master_init[0],sizeof(seed_t)*numseed);
|
||||||
|
fixedSeed master_seed(master_init);
|
||||||
|
RngEngine master_engine(master_seed);
|
||||||
|
|
||||||
|
// Per node RngEngine
|
||||||
|
std::vector<seed_t> node_init(numseed);
|
||||||
|
for(int r=0;r<_grid->ProcessorCount();r++) {
|
||||||
|
|
||||||
|
std::vector<seed_t> rank_init(numseed);
|
||||||
|
for(int i=0;i<numseed;i++) rank_init[i] = uid(master_engine);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "SeedSeq for rank "<<r;
|
||||||
|
for(int i=0;i<numseed;i++) std::cout<<" "<<rank_init[i];
|
||||||
|
std::cout <<std::endl;
|
||||||
|
|
||||||
|
if ( r==_grid->ThisRank() ) {
|
||||||
|
for(int i=0;i<numseed;i++) node_init[i] = rank_init[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// Set up a seed_seq wrapper with these 8 words
|
||||||
|
// and draw for each site within node.
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
fixedSeed node_seed(node_init);
|
||||||
|
RngEngine node_engine(node_seed);
|
||||||
|
|
||||||
|
for(int gidx=0;gidx<gsites;gidx++){
|
||||||
|
int rank,o_idx,i_idx;
|
||||||
|
|
||||||
|
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
|
||||||
|
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
|
||||||
|
|
||||||
|
if( rank == _grid->ThisRank() ){
|
||||||
|
int l_idx=generator_idx(o_idx,i_idx);
|
||||||
|
for(int i=0;i<numseed;i++) site_init[i] = uid(node_engine);
|
||||||
|
fixedSeed site_seed(site_init);
|
||||||
|
_generators[l_idx] = RngEngine(site_seed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_seeded=1;
|
||||||
|
}
|
||||||
void SeedRandomDevice(void){
|
void SeedRandomDevice(void){
|
||||||
std::random_device rd;
|
deviceSeed src;
|
||||||
Seed(rd);
|
Seed(src);
|
||||||
}
|
}
|
||||||
void SeedFixedIntegers(const std::vector<int> &seeds){
|
void SeedFixedIntegers(const std::vector<int> &seeds){
|
||||||
|
CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
|
||||||
fixedSeed src(seeds);
|
fixedSeed src(seeds);
|
||||||
Seed(src);
|
Seed(src);
|
||||||
}
|
}
|
||||||
|
@ -42,8 +42,7 @@ namespace Grid {
|
|||||||
-> Lattice<decltype(trace(lhs._odata[0]))>
|
-> Lattice<decltype(trace(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(trace(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = trace(lhs._odata[ss]);
|
ret._odata[ss] = trace(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -56,8 +55,7 @@ PARALLEL_FOR_LOOP
|
|||||||
inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
|
inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<Index>(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(traceIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
|
ret._odata[ss] = traceIndex<Index>(lhs._odata[ss]);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -51,7 +51,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
|||||||
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
template<class vobj> inline void pickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full){
|
||||||
half.checkerboard = cb;
|
half.checkerboard = cb;
|
||||||
int ssh=0;
|
int ssh=0;
|
||||||
//PARALLEL_FOR_LOOP
|
//parallel_for
|
||||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||||
std::vector<int> coor;
|
std::vector<int> coor;
|
||||||
int cbos;
|
int cbos;
|
||||||
@ -68,7 +68,7 @@ inline void subdivides(GridBase *coarse,GridBase *fine)
|
|||||||
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half){
|
||||||
int cb = half.checkerboard;
|
int cb = half.checkerboard;
|
||||||
int ssh=0;
|
int ssh=0;
|
||||||
//PARALLEL_FOR_LOOP
|
//parallel_for
|
||||||
for(int ss=0;ss<full._grid->oSites();ss++){
|
for(int ss=0;ss<full._grid->oSites();ss++){
|
||||||
std::vector<int> coor;
|
std::vector<int> coor;
|
||||||
int cbos;
|
int cbos;
|
||||||
@ -153,8 +153,7 @@ inline void blockZAXPY(Lattice<vobj> &fineZ,
|
|||||||
assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
assert(block_r[d]*coarse->_rdimensions[d]==fine->_rdimensions[d]);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int sf=0;sf<fine->oSites();sf++){
|
||||||
for(int sf=0;sf<fine->oSites();sf++){
|
|
||||||
|
|
||||||
int sc;
|
int sc;
|
||||||
std::vector<int> coor_c(_ndimension);
|
std::vector<int> coor_c(_ndimension);
|
||||||
@ -186,8 +185,7 @@ template<class vobj,class CComplex>
|
|||||||
|
|
||||||
fine_inner = localInnerProduct(fineX,fineY);
|
fine_inner = localInnerProduct(fineX,fineY);
|
||||||
blockSum(coarse_inner,fine_inner);
|
blockSum(coarse_inner,fine_inner);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<coarse->oSites();ss++){
|
||||||
for(int ss=0;ss<coarse->oSites();ss++){
|
|
||||||
CoarseInner._odata[ss] = coarse_inner._odata[ss];
|
CoarseInner._odata[ss] = coarse_inner._odata[ss];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -333,9 +331,6 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
|
|||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
typedef typename vvobj::scalar_object ssobj;
|
typedef typename vvobj::scalar_object ssobj;
|
||||||
|
|
||||||
sobj s;
|
|
||||||
ssobj ss;
|
|
||||||
|
|
||||||
GridBase *ig = in._grid;
|
GridBase *ig = in._grid;
|
||||||
GridBase *og = out._grid;
|
GridBase *og = out._grid;
|
||||||
|
|
||||||
@ -347,10 +342,13 @@ void localConvert(const Lattice<vobj> &in,Lattice<vvobj> &out)
|
|||||||
for(int d=0;d<no;d++){
|
for(int d=0;d<no;d++){
|
||||||
assert(ig->_processors[d] == og->_processors[d]);
|
assert(ig->_processors[d] == og->_processors[d]);
|
||||||
assert(ig->_ldimensions[d] == og->_ldimensions[d]);
|
assert(ig->_ldimensions[d] == og->_ldimensions[d]);
|
||||||
|
assert(ig->lSites() == og->lSites());
|
||||||
}
|
}
|
||||||
|
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<ig->lSites();idx++){
|
||||||
for(int idx=0;idx<ig->lSites();idx++){
|
sobj s;
|
||||||
|
ssobj ss;
|
||||||
|
|
||||||
std::vector<int> lcoor(ni);
|
std::vector<int> lcoor(ni);
|
||||||
ig->LocalIndexToLocalCoor(idx,lcoor);
|
ig->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
peekLocalSite(s,in,lcoor);
|
peekLocalSite(s,in,lcoor);
|
||||||
@ -364,7 +362,6 @@ template<class vobj>
|
|||||||
void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
|
void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -386,16 +383,16 @@ void InsertSlice(Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
dl=0;
|
int ddl=0;
|
||||||
hcoor[orthog] = slice;
|
hcoor[orthog] = slice;
|
||||||
for(int d=0;d<nh;d++){
|
for(int d=0;d<nh;d++){
|
||||||
if ( d!=orthog ) {
|
if ( d!=orthog ) {
|
||||||
hcoor[d]=lcoor[dl++];
|
hcoor[d]=lcoor[ddl++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
peekLocalSite(s,lowDim,lcoor);
|
peekLocalSite(s,lowDim,lcoor);
|
||||||
@ -407,7 +404,6 @@ template<class vobj>
|
|||||||
void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, int orthog)
|
void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -428,16 +424,16 @@ void ExtractSlice(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice, in
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
dl=0;
|
int ddl=0;
|
||||||
hcoor[orthog] = slice;
|
hcoor[orthog] = slice;
|
||||||
for(int d=0;d<nh;d++){
|
for(int d=0;d<nh;d++){
|
||||||
if ( d!=orthog ) {
|
if ( d!=orthog ) {
|
||||||
hcoor[d]=lcoor[dl++];
|
hcoor[d]=lcoor[ddl++];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
peekLocalSite(s,higherDim,hcoor);
|
peekLocalSite(s,higherDim,hcoor);
|
||||||
@ -451,7 +447,6 @@ template<class vobj>
|
|||||||
void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -468,8 +463,8 @@ void InsertSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
@ -487,7 +482,6 @@ template<class vobj>
|
|||||||
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
sobj s;
|
|
||||||
|
|
||||||
GridBase *lg = lowDim._grid;
|
GridBase *lg = lowDim._grid;
|
||||||
GridBase *hg = higherDim._grid;
|
GridBase *hg = higherDim._grid;
|
||||||
@ -504,8 +498,8 @@ void ExtractSliceLocal(Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slic
|
|||||||
}
|
}
|
||||||
|
|
||||||
// the above should guarantee that the operations are local
|
// the above should guarantee that the operations are local
|
||||||
//PARALLEL_FOR_LOOP
|
parallel_for(int idx=0;idx<lg->lSites();idx++){
|
||||||
for(int idx=0;idx<lg->lSites();idx++){
|
sobj s;
|
||||||
std::vector<int> lcoor(nl);
|
std::vector<int> lcoor(nl);
|
||||||
std::vector<int> hcoor(nh);
|
std::vector<int> hcoor(nh);
|
||||||
lg->LocalIndexToLocalCoor(idx,lcoor);
|
lg->LocalIndexToLocalCoor(idx,lcoor);
|
||||||
@ -573,8 +567,7 @@ typename std::enable_if<isSIMDvectorized<vobj>::value && !isSIMDvectorized<sobj>
|
|||||||
in_grid->iCoorFromIindex(in_icoor[lane], lane);
|
in_grid->iCoorFromIindex(in_icoor[lane], lane);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
|
||||||
for(int in_oidx = 0; in_oidx < in_grid->oSites(); in_oidx++){ //loop over outer index
|
|
||||||
//Assemble vector of pointers to output elements
|
//Assemble vector of pointers to output elements
|
||||||
std::vector<sobj*> out_ptrs(in_nsimd);
|
std::vector<sobj*> out_ptrs(in_nsimd);
|
||||||
|
|
||||||
@ -622,8 +615,7 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
|||||||
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
std::vector<SobjOut> in_slex_conv(in_grid->lSites());
|
||||||
unvectorizeToLexOrdArray(in_slex_conv, in);
|
unvectorizeToLexOrdArray(in_slex_conv, in);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
|
||||||
for(int out_oidx=0;out_oidx<out_grid->oSites();out_oidx++){
|
|
||||||
std::vector<int> out_ocoor(ndim);
|
std::vector<int> out_ocoor(ndim);
|
||||||
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
out_grid->oCoorFromOindex(out_ocoor, out_oidx);
|
||||||
|
|
||||||
@ -641,10 +633,6 @@ void precisionChange(Lattice<VobjOut> &out, const Lattice<VobjIn> &in){
|
|||||||
merge(out._odata[out_oidx], ptrs, 0);
|
merge(out._odata[out_oidx], ptrs, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -40,27 +40,24 @@ namespace Grid {
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
|
inline Lattice<vobj> transpose(const Lattice<vobj> &lhs){
|
||||||
Lattice<vobj> ret(lhs._grid);
|
Lattice<vobj> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
ret._odata[ss] = transpose(lhs._odata[ss]);
|
||||||
ret._odata[ss] = transpose(lhs._odata[ss]);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Index level dependent transpose
|
// Index level dependent transpose
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
template<int Index,class vobj>
|
template<int Index,class vobj>
|
||||||
inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
|
inline auto TransposeIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))>
|
||||||
{
|
{
|
||||||
Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
Lattice<decltype(transposeIndex<Index>(lhs._odata[0]))> ret(lhs._grid);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<lhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<lhs._grid->oSites();ss++){
|
ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
|
||||||
ret._odata[ss] = transposeIndex<Index>(lhs._odata[ss]);
|
}
|
||||||
}
|
return ret;
|
||||||
return ret;
|
};
|
||||||
};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -37,8 +37,7 @@ namespace Grid {
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=pow(rhs._odata[ss],y);
|
ret._odata[ss]=pow(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -47,8 +46,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=mod(rhs._odata[ss],y);
|
ret._odata[ss]=mod(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -58,8 +56,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=div(rhs._odata[ss],y);
|
ret._odata[ss]=div(rhs._odata[ss],y);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
@ -69,8 +66,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Lattice<obj> ret(rhs._grid);
|
Lattice<obj> ret(rhs._grid);
|
||||||
ret.checkerboard = rhs.checkerboard;
|
ret.checkerboard = rhs.checkerboard;
|
||||||
conformable(ret,rhs);
|
conformable(ret,rhs);
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<rhs._grid->oSites();ss++){
|
||||||
for(int ss=0;ss<rhs._grid->oSites();ss++){
|
|
||||||
ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp);
|
ret._odata[ss]=Exponentiate(rhs._odata[ss],alpha, Nexp);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -56,8 +56,7 @@ inline void whereWolf(Lattice<vobj> &ret,const Lattice<iobj> &predicate,Lattice<
|
|||||||
std::vector<scalar_object> truevals (Nsimd);
|
std::vector<scalar_object> truevals (Nsimd);
|
||||||
std::vector<scalar_object> falsevals(Nsimd);
|
std::vector<scalar_object> falsevals(Nsimd);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<iftrue._grid->oSites(); ss++){
|
||||||
for(int ss=0;ss<iftrue._grid->oSites(); ss++){
|
|
||||||
|
|
||||||
extract(iftrue._odata[ss] ,truevals);
|
extract(iftrue._odata[ss] ,truevals);
|
||||||
extract(iffalse._odata[ss] ,falsevals);
|
extract(iffalse._odata[ss] ,falsevals);
|
||||||
|
@ -29,9 +29,10 @@ See the full license in the file "LICENSE" in the top level distribution
|
|||||||
directory
|
directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#include <cxxabi.h>
|
#include <cxxabi.h>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
@ -35,37 +35,27 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#endif
|
#endif
|
||||||
#include <arpa/inet.h>
|
#include <arpa/inet.h>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
// 64bit endian swap is a portability pain
|
|
||||||
#ifndef __has_builtin // Optional of course.
|
|
||||||
#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_DECL_BE64TOH
|
|
||||||
#undef Grid_ntohll
|
|
||||||
#define Grid_ntohll be64toh
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if HAVE_DECL_NTOHLL
|
inline uint32_t byte_reverse32(uint32_t f) {
|
||||||
#undef Grid_ntohll
|
f = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
#define Grid_ntohll ntohll
|
return f;
|
||||||
#endif
|
}
|
||||||
|
inline uint64_t byte_reverse64(uint64_t f) {
|
||||||
#ifndef Grid_ntohll
|
uint64_t g;
|
||||||
|
g = ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
|
g = g << 32;
|
||||||
|
f = f >> 32;
|
||||||
|
g|= ((f&0xFF)<<24) | ((f&0xFF00)<<8) | ((f&0xFF0000)>>8) | ((f&0xFF000000UL)>>24) ;
|
||||||
|
return g;
|
||||||
|
}
|
||||||
|
|
||||||
#if BYTE_ORDER == BIG_ENDIAN
|
#if BYTE_ORDER == BIG_ENDIAN
|
||||||
|
inline uint64_t Grid_ntohll(uint64_t A) { return A; }
|
||||||
#define Grid_ntohll(A) (A)
|
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
#if __has_builtin(__builtin_bswap64)
|
|
||||||
#define Grid_ntohll(A) __builtin_bswap64(A)
|
|
||||||
#else
|
#else
|
||||||
#error
|
inline uint64_t Grid_ntohll(uint64_t A) {
|
||||||
#endif
|
return byte_reverse64(A);
|
||||||
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -195,7 +185,7 @@ class BinaryIO {
|
|||||||
std::vector<int> site({x,y,z,t});
|
std::vector<int> site({x,y,z,t});
|
||||||
|
|
||||||
if (grid->IsBoss()) {
|
if (grid->IsBoss()) {
|
||||||
fin.read((char *)&file_object, sizeof(file_object));
|
fin.read((char *)&file_object, sizeof(file_object));assert( fin.fail()==0);
|
||||||
bytes += sizeof(file_object);
|
bytes += sizeof(file_object);
|
||||||
if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object));
|
if (ieee32big) be32toh_v((void *)&file_object, sizeof(file_object));
|
||||||
if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object));
|
if (ieee32) le32toh_v((void *)&file_object, sizeof(file_object));
|
||||||
@ -211,11 +201,13 @@ class BinaryIO {
|
|||||||
std::cout<<GridLogPerformance<<"readObjectSerial: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"readObjectSerial: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/ (double)timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/ (double)timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
|
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class vobj,class fobj,class munger>
|
template<class vobj,class fobj,class munger>
|
||||||
static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
|
static inline uint32_t writeObjectSerial(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
|
||||||
|
const std::string & format)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
|
||||||
@ -231,7 +223,7 @@ class BinaryIO {
|
|||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
std::cout<< GridLogMessage<< "Serial write I/O "<< file<<std::endl;
|
std::cout<< GridLogMessage<< "Serial write I/O "<< file<<std::endl;
|
||||||
GridStopWatch timer; timer.Start();
|
GridStopWatch timer; timer.Start();
|
||||||
|
|
||||||
std::ofstream fout;
|
std::ofstream fout;
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
|
fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
|
||||||
@ -255,23 +247,24 @@ class BinaryIO {
|
|||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
|
|
||||||
if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
|
if(ieee32big) htobe32_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee32) htole32_v((void *)&file_object,sizeof(file_object));
|
if(ieee32) htole32_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
|
if(ieee64big) htobe64_v((void *)&file_object,sizeof(file_object));
|
||||||
if(ieee64) htole64_v((void *)&file_object,sizeof(file_object));
|
if(ieee64) htole64_v((void *)&file_object,sizeof(file_object));
|
||||||
|
|
||||||
// NB could gather an xstrip as an optimisation.
|
// NB could gather an xstrip as an optimisation.
|
||||||
fout.write((char *)&file_object,sizeof(file_object));
|
fout.write((char *)&file_object,sizeof(file_object));assert( fout.fail()==0);
|
||||||
bytes+=sizeof(file_object);
|
bytes+=sizeof(file_object);
|
||||||
}
|
}
|
||||||
}}}}
|
}}}}
|
||||||
timer.Stop();
|
timer.Stop();
|
||||||
std::cout<<GridLogPerformance<<"writeObjectSerial: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"writeObjectSerial: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
|
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
|
static inline uint32_t writeRNGSerial(GridSerialRNG &serial,GridParallelRNG ¶llel,std::string file,int offset)
|
||||||
{
|
{
|
||||||
typedef typename GridSerialRNG::RngStateType RngStateType;
|
typedef typename GridSerialRNG::RngStateType RngStateType;
|
||||||
@ -305,23 +298,23 @@ class BinaryIO {
|
|||||||
int l_idx=parallel.generator_idx(o_idx,i_idx);
|
int l_idx=parallel.generator_idx(o_idx,i_idx);
|
||||||
|
|
||||||
if( rank == grid->ThisRank() ){
|
if( rank == grid->ThisRank() ){
|
||||||
// std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
|
// std::cout << "rank" << rank<<" Getting state for index "<<l_idx<<std::endl;
|
||||||
parallel.GetState(saved,l_idx);
|
parallel.GetState(saved,l_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
grid->Broadcast(rank,(void *)&saved[0],bytes);
|
grid->Broadcast(rank,(void *)&saved[0],bytes);
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
fout.write((char *)&saved[0],bytes);
|
fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
serial.GetState(saved,0);
|
serial.GetState(saved,0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
fout.write((char *)&saved[0],bytes);
|
fout.write((char *)&saved[0],bytes);assert( fout.fail()==0);
|
||||||
}
|
}
|
||||||
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
grid->Broadcast(0,(void *)&csum,sizeof(csum));
|
||||||
return csum;
|
return csum;
|
||||||
@ -355,20 +348,20 @@ class BinaryIO {
|
|||||||
int l_idx=parallel.generator_idx(o_idx,i_idx);
|
int l_idx=parallel.generator_idx(o_idx,i_idx);
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
fin.read((char *)&saved[0],bytes);
|
fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
}
|
}
|
||||||
|
|
||||||
grid->Broadcast(0,(void *)&saved[0],bytes);
|
grid->Broadcast(0,(void *)&saved[0],bytes);
|
||||||
|
|
||||||
if( rank == grid->ThisRank() ){
|
if( rank == grid->ThisRank() ){
|
||||||
parallel.SetState(saved,l_idx);
|
parallel.SetState(saved,l_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
fin.read((char *)&saved[0],bytes);
|
fin.read((char *)&saved[0],bytes);assert( fin.fail()==0);
|
||||||
serial.SetState(saved,0);
|
serial.SetState(saved,0);
|
||||||
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
Uint32Checksum((uint32_t *)&saved[0],bytes,csum);
|
||||||
}
|
}
|
||||||
@ -380,7 +373,8 @@ class BinaryIO {
|
|||||||
|
|
||||||
|
|
||||||
template<class vobj,class fobj,class munger>
|
template<class vobj,class fobj,class munger>
|
||||||
static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string &format)
|
static inline uint32_t readObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
|
||||||
|
const std::string &format)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
|
||||||
@ -415,15 +409,15 @@ class BinaryIO {
|
|||||||
|
|
||||||
if ( d == 0 ) parallel[d] = 0;
|
if ( d == 0 ) parallel[d] = 0;
|
||||||
if (parallel[d]) {
|
if (parallel[d]) {
|
||||||
range[d] = grid->_ldimensions[d];
|
range[d] = grid->_ldimensions[d];
|
||||||
start[d] = grid->_processor_coor[d]*range[d];
|
start[d] = grid->_processor_coor[d]*range[d];
|
||||||
ioproc[d]= grid->_processor_coor[d];
|
ioproc[d]= grid->_processor_coor[d];
|
||||||
} else {
|
} else {
|
||||||
range[d] = grid->_gdimensions[d];
|
range[d] = grid->_gdimensions[d];
|
||||||
start[d] = 0;
|
start[d] = 0;
|
||||||
ioproc[d]= 0;
|
ioproc[d]= 0;
|
||||||
|
|
||||||
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
|
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
|
||||||
}
|
}
|
||||||
slice_vol = slice_vol * range[d];
|
slice_vol = slice_vol * range[d];
|
||||||
}
|
}
|
||||||
@ -434,9 +428,9 @@ class BinaryIO {
|
|||||||
std::cout<< std::dec ;
|
std::cout<< std::dec ;
|
||||||
std::cout<< GridLogMessage<< "Parallel read I/O to "<< file << " with " <<tmp<< " IOnodes for subslice ";
|
std::cout<< GridLogMessage<< "Parallel read I/O to "<< file << " with " <<tmp<< " IOnodes for subslice ";
|
||||||
for(int d=0;d<grid->_ndimension;d++){
|
for(int d=0;d<grid->_ndimension;d++){
|
||||||
std::cout<< range[d];
|
std::cout<< range[d];
|
||||||
if( d< grid->_ndimension-1 )
|
if( d< grid->_ndimension-1 )
|
||||||
std::cout<< " x ";
|
std::cout<< " x ";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
@ -472,8 +466,8 @@ class BinaryIO {
|
|||||||
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
||||||
|
|
||||||
for(int d=0;d<nd;d++){
|
for(int d=0;d<nd;d++){
|
||||||
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
||||||
gsite[d] = tsite[d]+start[d]; // global site
|
gsite[d] = tsite[d]+start[d]; // global site
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
@ -488,28 +482,28 @@ class BinaryIO {
|
|||||||
////////////////////////////////
|
////////////////////////////////
|
||||||
if (myrank == iorank) {
|
if (myrank == iorank) {
|
||||||
|
|
||||||
fin.seekg(offset+g_idx*sizeof(fileObj));
|
fin.seekg(offset+g_idx*sizeof(fileObj));
|
||||||
fin.read((char *)&fileObj,sizeof(fileObj));
|
fin.read((char *)&fileObj,sizeof(fileObj));assert( fin.fail()==0);
|
||||||
bytes+=sizeof(fileObj);
|
bytes+=sizeof(fileObj);
|
||||||
|
|
||||||
if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee32big) be32toh_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee32) le32toh_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee32) le32toh_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee64big) be64toh_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee64) le64toh_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee64) le64toh_v((void *)&fileObj,sizeof(fileObj));
|
||||||
|
|
||||||
munge(fileObj,siteObj,csum);
|
munge(fileObj,siteObj,csum);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Possibly do transport through pt2pt
|
// Possibly do transport through pt2pt
|
||||||
if ( rank != iorank ) {
|
if ( rank != iorank ) {
|
||||||
if ( (myrank == rank) || (myrank==iorank) ) {
|
if ( (myrank == rank) || (myrank==iorank) ) {
|
||||||
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj));
|
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,iorank,rank,sizeof(siteObj));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Poke at destination
|
// Poke at destination
|
||||||
if ( myrank == rank ) {
|
if ( myrank == rank ) {
|
||||||
pokeLocalSite(siteObj,Umu,lsite);
|
pokeLocalSite(siteObj,Umu,lsite);
|
||||||
}
|
}
|
||||||
grid->Barrier(); // necessary?
|
grid->Barrier(); // necessary?
|
||||||
}
|
}
|
||||||
@ -520,7 +514,7 @@ class BinaryIO {
|
|||||||
|
|
||||||
timer.Stop();
|
timer.Stop();
|
||||||
std::cout<<GridLogPerformance<<"readObjectParallel: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"readObjectParallel: read "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
@ -529,7 +523,8 @@ class BinaryIO {
|
|||||||
// Parallel writer
|
// Parallel writer
|
||||||
//////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////
|
||||||
template<class vobj,class fobj,class munger>
|
template<class vobj,class fobj,class munger>
|
||||||
static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,const std::string & format)
|
static inline uint32_t writeObjectParallel(Lattice<vobj> &Umu,std::string file,munger munge,int offset,
|
||||||
|
const std::string & format)
|
||||||
{
|
{
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
GridBase *grid = Umu._grid;
|
GridBase *grid = Umu._grid;
|
||||||
@ -558,15 +553,15 @@ class BinaryIO {
|
|||||||
if ( d!= grid->_ndimension-1 ) parallel[d] = 0;
|
if ( d!= grid->_ndimension-1 ) parallel[d] = 0;
|
||||||
|
|
||||||
if (parallel[d]) {
|
if (parallel[d]) {
|
||||||
range[d] = grid->_ldimensions[d];
|
range[d] = grid->_ldimensions[d];
|
||||||
start[d] = grid->_processor_coor[d]*range[d];
|
start[d] = grid->_processor_coor[d]*range[d];
|
||||||
ioproc[d]= grid->_processor_coor[d];
|
ioproc[d]= grid->_processor_coor[d];
|
||||||
} else {
|
} else {
|
||||||
range[d] = grid->_gdimensions[d];
|
range[d] = grid->_gdimensions[d];
|
||||||
start[d] = 0;
|
start[d] = 0;
|
||||||
ioproc[d]= 0;
|
ioproc[d]= 0;
|
||||||
|
|
||||||
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
|
if ( grid->_processor_coor[d] != 0 ) IOnode = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
slice_vol = slice_vol * range[d];
|
slice_vol = slice_vol * range[d];
|
||||||
@ -577,13 +572,13 @@ class BinaryIO {
|
|||||||
grid->GlobalSum(tmp);
|
grid->GlobalSum(tmp);
|
||||||
std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <<tmp<< " IOnodes for subslice ";
|
std::cout<< GridLogMessage<< "Parallel write I/O from "<< file << " with " <<tmp<< " IOnodes for subslice ";
|
||||||
for(int d=0;d<grid->_ndimension;d++){
|
for(int d=0;d<grid->_ndimension;d++){
|
||||||
std::cout<< range[d];
|
std::cout<< range[d];
|
||||||
if( d< grid->_ndimension-1 )
|
if( d< grid->_ndimension-1 )
|
||||||
std::cout<< " x ";
|
std::cout<< " x ";
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
GridStopWatch timer; timer.Start();
|
GridStopWatch timer; timer.Start();
|
||||||
uint64_t bytes=0;
|
uint64_t bytes=0;
|
||||||
|
|
||||||
@ -619,8 +614,8 @@ class BinaryIO {
|
|||||||
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
Lexicographic::CoorFromIndex(tsite,tlex,range);
|
||||||
|
|
||||||
for(int d=0;d<nd;d++){
|
for(int d=0;d<nd;d++){
|
||||||
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
lsite[d] = tsite[d]%grid->_ldimensions[d]; // local site
|
||||||
gsite[d] = tsite[d]+start[d]; // global site
|
gsite[d] = tsite[d]+start[d]; // global site
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -640,36 +635,36 @@ class BinaryIO {
|
|||||||
|
|
||||||
// Pair of nodes may need to do pt2pt send
|
// Pair of nodes may need to do pt2pt send
|
||||||
if ( rank != iorank ) { // comms is necessary
|
if ( rank != iorank ) { // comms is necessary
|
||||||
if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
|
if ( (myrank == rank) || (myrank==iorank) ) { // and we have to do it
|
||||||
// Send to IOrank
|
// Send to IOrank
|
||||||
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
|
grid->SendRecvPacket((void *)&siteObj,(void *)&siteObj,rank,iorank,sizeof(siteObj));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
grid->Barrier(); // necessary?
|
grid->Barrier(); // necessary?
|
||||||
|
|
||||||
if (myrank == iorank) {
|
if (myrank == iorank) {
|
||||||
|
|
||||||
munge(siteObj,fileObj,csum);
|
munge(siteObj,fileObj,csum);
|
||||||
|
|
||||||
if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee32big) htobe32_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee32) htole32_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee32) htole32_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee64big) htobe64_v((void *)&fileObj,sizeof(fileObj));
|
||||||
if(ieee64) htole64_v((void *)&fileObj,sizeof(fileObj));
|
if(ieee64) htole64_v((void *)&fileObj,sizeof(fileObj));
|
||||||
|
|
||||||
fout.seekp(offset+g_idx*sizeof(fileObj));
|
fout.seekp(offset+g_idx*sizeof(fileObj));
|
||||||
fout.write((char *)&fileObj,sizeof(fileObj));
|
fout.write((char *)&fileObj,sizeof(fileObj));assert( fout.fail()==0);
|
||||||
bytes+=sizeof(fileObj);
|
bytes+=sizeof(fileObj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
grid->GlobalSum(csum);
|
grid->GlobalSum(csum);
|
||||||
grid->GlobalSum(bytes);
|
grid->GlobalSum(bytes);
|
||||||
|
|
||||||
timer.Stop();
|
timer.Stop();
|
||||||
std::cout<<GridLogPerformance<<"writeObjectParallel: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
std::cout<<GridLogPerformance<<"writeObjectParallel: wrote "<< bytes <<" bytes in "<<timer.Elapsed() <<" "
|
||||||
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
<< (double)bytes/timer.useconds() <<" MB/s " <<std::endl;
|
||||||
|
|
||||||
return csum;
|
return csum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,7 +242,6 @@ class NerscIO : public BinaryIO {
|
|||||||
static inline unsigned int writeHeader(NerscField &field,std::string file)
|
static inline unsigned int writeHeader(NerscField &field,std::string file)
|
||||||
{
|
{
|
||||||
std::ofstream fout(file,std::ios::out|std::ios::in);
|
std::ofstream fout(file,std::ios::out|std::ios::in);
|
||||||
|
|
||||||
fout.seekp(0,std::ios::beg);
|
fout.seekp(0,std::ios::beg);
|
||||||
dump_nersc_header(field, fout);
|
dump_nersc_header(field, fout);
|
||||||
field.data_start = fout.tellp();
|
field.data_start = fout.tellp();
|
||||||
@ -264,10 +263,13 @@ static inline int readHeader(std::string file,GridBase *grid, NerscField &field
|
|||||||
getline(fin,line); // read one line and insist is
|
getline(fin,line); // read one line and insist is
|
||||||
|
|
||||||
removeWhitespace(line);
|
removeWhitespace(line);
|
||||||
|
std::cout << GridLogMessage << "* " << line << std::endl;
|
||||||
|
|
||||||
assert(line==std::string("BEGIN_HEADER"));
|
assert(line==std::string("BEGIN_HEADER"));
|
||||||
|
|
||||||
do {
|
do {
|
||||||
getline(fin,line); // read one line
|
getline(fin,line); // read one line
|
||||||
|
std::cout << GridLogMessage << "* "<<line<< std::endl;
|
||||||
int eq = line.find("=");
|
int eq = line.find("=");
|
||||||
if(eq >0) {
|
if(eq >0) {
|
||||||
std::string key=line.substr(0,eq);
|
std::string key=line.substr(0,eq);
|
||||||
@ -322,6 +324,8 @@ static inline int readHeader(std::string file,GridBase *grid, NerscField &field
|
|||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Now the meat: the object readers
|
// Now the meat: the object readers
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
#define PARALLEL_READ
|
||||||
|
#define PARALLEL_WRITE
|
||||||
|
|
||||||
template<class vsimd>
|
template<class vsimd>
|
||||||
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
|
static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,NerscField& header,std::string file)
|
||||||
@ -345,25 +349,41 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
|||||||
// munger is a function of <floating point, Real, data_type>
|
// munger is a function of <floating point, Real, data_type>
|
||||||
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
// csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||||
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3F>
|
||||||
|
(Umu,file,Nersc3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
if ( ieee64 || ieee64big ) {
|
if ( ieee64 || ieee64big ) {
|
||||||
//csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||||
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>, LorentzColour2x3D>
|
||||||
|
(Umu,file,Nersc3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
//csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixF>
|
||||||
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
if ( ieee64 || ieee64big ) {
|
if ( ieee64 || ieee64big ) {
|
||||||
// csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
#ifdef PARALLEL_READ
|
||||||
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
csum=BinaryIO::readObjectParallel<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||||
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::readObjectSerial<iLorentzColourMatrix<vsimd>,LorentzColourMatrixD>
|
||||||
|
(Umu,file,NerscSimpleMunger<LorentzColourMatrixD,LorentzColourMatrix>(),offset,format);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
@ -371,12 +391,17 @@ static inline void readConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu,
|
|||||||
|
|
||||||
NerscStatistics<GaugeField>(Umu,clone);
|
NerscStatistics<GaugeField>(Umu,clone);
|
||||||
|
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" checksum "<<std::hex<< csum<< std::dec
|
||||||
|
<<" header "<<std::hex<<header.checksum<<std::dec <<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" plaquette "<<clone.plaquette
|
||||||
|
<<" header "<<header.plaquette<<std::endl;
|
||||||
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<<" link_trace "<<clone.link_trace
|
||||||
|
<<" header "<<header.link_trace<<std::endl;
|
||||||
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
|
assert(fabs(clone.plaquette -header.plaquette ) < 1.0e-5 );
|
||||||
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
|
assert(fabs(clone.link_trace-header.link_trace) < 1.0e-6 );
|
||||||
|
|
||||||
assert(csum == header.checksum );
|
assert(csum == header.checksum );
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Read NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
std::cout<<GridLogMessage <<"NERSC Configuration "<<file<< " and plaquette, link trace, and checksum agree"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class vsimd>
|
template<class vsimd>
|
||||||
@ -416,19 +441,11 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
|
|||||||
Nersc3x2unmunger<fobj2D,sobj> munge;
|
Nersc3x2unmunger<fobj2D,sobj> munge;
|
||||||
BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum);
|
BinaryIO::Uint32Checksum<vobj,fobj2D>(Umu, munge,header.checksum);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
|
#ifdef PARALLEL_WRITE
|
||||||
|
csum=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#else
|
||||||
csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
csum=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#endif
|
||||||
std::string file1 = file+"para";
|
|
||||||
int offset1 = writeHeader(header,file1);
|
|
||||||
int csum1=BinaryIO::writeObjectParallel<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
|
|
||||||
//int csum1=BinaryIO::writeObjectSerial<vobj,fobj2D>(Umu,file1,munge,offset,header.floating_point);
|
|
||||||
|
|
||||||
|
|
||||||
std::cout << GridLogMessage << " TESTING PARALLEL WRITE offsets " << offset1 << " "<< offset << std::endl;
|
|
||||||
std::cout << GridLogMessage << " TESTING PARALLEL WRITE csums " << csum1 << " "<<std::hex<< csum << std::dec<< std::endl;
|
|
||||||
|
|
||||||
assert(offset1==offset);
|
|
||||||
assert(csum1==csum);
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
header.floating_point = std::string("IEEE64BIG");
|
header.floating_point = std::string("IEEE64BIG");
|
||||||
@ -436,8 +453,11 @@ static inline void writeConfiguration(Lattice<iLorentzColourMatrix<vsimd> > &Umu
|
|||||||
NerscSimpleUnmunger<fobj3D,sobj> munge;
|
NerscSimpleUnmunger<fobj3D,sobj> munge;
|
||||||
BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum);
|
BinaryIO::Uint32Checksum<vobj,fobj3D>(Umu, munge,header.checksum);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
// csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
#ifdef PARALLEL_WRITE
|
||||||
csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
csum=BinaryIO::writeObjectParallel<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#else
|
||||||
|
csum=BinaryIO::writeObjectSerial<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Written NERSC Configuration "<<file<< " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl;
|
std::cout<<GridLogMessage <<"Written NERSC Configuration "<<file<< " checksum "<<std::hex<<csum<< std::dec<<" plaq "<< header.plaquette <<std::endl;
|
||||||
@ -511,8 +531,6 @@ static inline void readRNGState(GridSerialRNG &serial,GridParallelRNG & parallel
|
|||||||
// munger is a function of <floating point, Real, data_type>
|
// munger is a function of <floating point, Real, data_type>
|
||||||
uint32_t csum=BinaryIO::readRNGSerial(serial,parallel,file,offset);
|
uint32_t csum=BinaryIO::readRNGSerial(serial,parallel,file,offset);
|
||||||
|
|
||||||
std::cerr<<" Csum "<< csum << " "<< header.checksum <<std::endl;
|
|
||||||
|
|
||||||
assert(csum == header.checksum );
|
assert(csum == header.checksum );
|
||||||
|
|
||||||
std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
|
std::cout<<GridLogMessage <<"Read NERSC RNG file "<<file<< " format "<< data_type <<std::endl;
|
||||||
|
@ -26,8 +26,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <PerfCount.h>
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
@ -172,7 +172,7 @@ public:
|
|||||||
const char * name = PerformanceCounterConfigs[PCT].name;
|
const char * name = PerformanceCounterConfigs[PCT].name;
|
||||||
fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
fd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
||||||
if (fd == -1) {
|
if (fd == -1) {
|
||||||
fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
|
fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
|
||||||
perror("Error is");
|
perror("Error is");
|
||||||
}
|
}
|
||||||
int norm = PerformanceCounterConfigs[PCT].normalisation;
|
int norm = PerformanceCounterConfigs[PCT].normalisation;
|
||||||
@ -181,7 +181,7 @@ public:
|
|||||||
name = PerformanceCounterConfigs[norm].name;
|
name = PerformanceCounterConfigs[norm].name;
|
||||||
cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
cyclefd = perf_event_open(&pe, 0, -1, -1, 0); // pid 0, cpu -1 current process any cpu. group -1
|
||||||
if (cyclefd == -1) {
|
if (cyclefd == -1) {
|
||||||
fprintf(stderr, "Error opening leader %llx for event %s\n", pe.config,name);
|
fprintf(stderr, "Error opening leader %llx for event %s\n",(long long) pe.config,name);
|
||||||
perror("Error is");
|
perror("Error is");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
@ -1,11 +1,9 @@
|
|||||||
#include <Grid.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <PerfCount.h>
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
#include <Stat.h>
|
#include <Grid/perfmon/Stat.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
|
||||||
bool PmuStat::pmu_initialized=false;
|
bool PmuStat::pmu_initialized=false;
|
||||||
|
|
||||||
|
|
@ -14,7 +14,7 @@
|
|||||||
#ifndef SOURCE_PUGIXML_CPP
|
#ifndef SOURCE_PUGIXML_CPP
|
||||||
#define SOURCE_PUGIXML_CPP
|
#define SOURCE_PUGIXML_CPP
|
||||||
|
|
||||||
#include <pugixml/pugixml.h>
|
#include <Grid/pugixml/pugixml.h>
|
||||||
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
@ -29,8 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#ifndef GRID_QCD_H
|
#ifndef GRID_QCD_BASE_H
|
||||||
#define GRID_QCD_H
|
#define GRID_QCD_BASE_H
|
||||||
namespace Grid{
|
namespace Grid{
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -62,7 +62,6 @@ namespace QCD {
|
|||||||
#define SpinIndex 1
|
#define SpinIndex 1
|
||||||
#define LorentzIndex 0
|
#define LorentzIndex 0
|
||||||
|
|
||||||
|
|
||||||
// Also should make these a named enum type
|
// Also should make these a named enum type
|
||||||
static const int DaggerNo=0;
|
static const int DaggerNo=0;
|
||||||
static const int DaggerYes=1;
|
static const int DaggerYes=1;
|
||||||
@ -494,26 +493,5 @@ namespace QCD {
|
|||||||
} // Grid
|
} // Grid
|
||||||
|
|
||||||
|
|
||||||
#include <Grid/qcd/utils/SpaceTimeGrid.h>
|
|
||||||
#include <Grid/qcd/spin/Dirac.h>
|
|
||||||
#include <Grid/qcd/spin/TwoSpinor.h>
|
|
||||||
#include <Grid/qcd/utils/LinalgUtils.h>
|
|
||||||
#include <Grid/qcd/utils/CovariantCshift.h>
|
|
||||||
|
|
||||||
// Include representations
|
|
||||||
#include <Grid/qcd/utils/SUn.h>
|
|
||||||
#include <Grid/qcd/utils/SUnAdjoint.h>
|
|
||||||
#include <Grid/qcd/utils/SUnTwoIndex.h>
|
|
||||||
#include <Grid/qcd/representations/hmc_types.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/Actions.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/smearing/Smearing.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/hmc/integrators/Integrator.h>
|
|
||||||
#include <Grid/qcd/hmc/integrators/Integrator_algorithm.h>
|
|
||||||
#include <Grid/qcd/hmc/HMC.h>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
50
lib/qcd/action/Action.h
Normal file
50
lib/qcd/action/Action.h
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/Actions.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: neo <cossu@post.kek.jp>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_ACTION_H
|
||||||
|
#define GRID_QCD_ACTION_H
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Abstract base interface
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Fermion actions; prevent coupling fermion.cc files to other headers
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/Fermion.h>
|
||||||
|
////////////////////////////////////////
|
||||||
|
// Pseudo fermion combinations for HMC
|
||||||
|
////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/pseudofermion/PseudoFermion.h>
|
||||||
|
|
||||||
|
#endif
|
@ -150,4 +150,5 @@ using ActionSet = std::vector<ActionLevel<GaugeField, R> >;
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
45
lib/qcd/action/ActionCore.h
Normal file
45
lib/qcd/action/ActionCore.h
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/ActionCore.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: neo <cossu@post.kek.jp>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef QCD_ACTION_CORE
|
||||||
|
#define QCD_ACTION_CORE
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/ActionBase.h>
|
||||||
|
#include <Grid/qcd/action/ActionParams.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Gauge Actions
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/gauge/Gauge.h>
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Fermion prereqs
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
|
#endif
|
@ -45,6 +45,10 @@ namespace QCD {
|
|||||||
WilsonImplParams() : overlapCommsCompute(false) {};
|
WilsonImplParams() : overlapCommsCompute(false) {};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct StaggeredImplParams {
|
||||||
|
StaggeredImplParams() {};
|
||||||
|
};
|
||||||
|
|
||||||
struct OneFlavourRationalParams {
|
struct OneFlavourRationalParams {
|
||||||
RealD lo;
|
RealD lo;
|
||||||
RealD hi;
|
RealD hi;
|
||||||
|
@ -30,8 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Eigen/Dense>
|
#include <Grid/Eigen/Dense>
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -57,10 +57,23 @@ void CayleyFermion5D<Impl>::Dminus(const FermionField &psi, FermionField &chi)
|
|||||||
{
|
{
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
|
|
||||||
this->DW(psi,this->tmp(),DaggerNo);
|
FermionField tmp_f(this->FermionGrid());
|
||||||
|
this->DW(psi,tmp_f,DaggerNo);
|
||||||
|
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
|
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
||||||
|
{
|
||||||
|
int Ls=this->Ls;
|
||||||
|
|
||||||
|
FermionField tmp_f(this->FermionGrid());
|
||||||
|
this->DW(psi,tmp_f,DaggerYes);
|
||||||
|
|
||||||
|
for(int s=0;s<Ls;s++){
|
||||||
|
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],tmp_f,s,s);// chi = (1-c[s] D_W) psi
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,17 +120,6 @@ template<class Impl> void CayleyFermion5D<Impl>::CayleyZeroCounters(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
void CayleyFermion5D<Impl>::DminusDag(const FermionField &psi, FermionField &chi)
|
|
||||||
{
|
|
||||||
int Ls=this->Ls;
|
|
||||||
|
|
||||||
this->DW(psi,this->tmp(),DaggerYes);
|
|
||||||
|
|
||||||
for(int s=0;s<Ls;s++){
|
|
||||||
axpby_ssp(chi,Coeff_t(1.0),psi,-cs[s],this->tmp(),s,s);// chi = (1-c[s] D_W) psi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
@ -168,7 +170,6 @@ void CayleyFermion5D<Impl>::Mooee (const FermionField &psi, FermionField &
|
|||||||
lower[0] =-mass*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5D(psi,psi,chi,lower,diag,upper);
|
M5D(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
@ -190,7 +191,12 @@ void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &
|
|||||||
lower[s]=-cee[s-1];
|
lower[s]=-cee[s-1];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Conjugate the terms
|
||||||
|
for (int s=0;s<Ls;s++){
|
||||||
|
diag[s] =conjugate(diag[s]);
|
||||||
|
upper[s]=conjugate(upper[s]);
|
||||||
|
lower[s]=conjugate(lower[s]);
|
||||||
|
}
|
||||||
M5Ddag(psi,psi,chi,lower,diag,upper);
|
M5Ddag(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,9 +218,23 @@ void CayleyFermion5D<Impl>::MeooeDag5D (const FermionField &psi, FermionField
|
|||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
std::vector<Coeff_t> diag =bs;
|
std::vector<Coeff_t> diag =bs;
|
||||||
std::vector<Coeff_t> upper=cs;
|
std::vector<Coeff_t> upper=cs;
|
||||||
std::vector<Coeff_t> lower=cs;
|
std::vector<Coeff_t> lower=cs;
|
||||||
upper[Ls-1]=-mass*upper[Ls-1];
|
|
||||||
lower[0] =-mass*lower[0];
|
for (int s=0;s<Ls;s++){
|
||||||
|
if ( s== 0 ) {
|
||||||
|
upper[s] = cs[s+1];
|
||||||
|
lower[s] =-mass*cs[Ls-1];
|
||||||
|
} else if ( s==(Ls-1) ) {
|
||||||
|
upper[s] =-mass*cs[0];
|
||||||
|
lower[s] = cs[s-1];
|
||||||
|
} else {
|
||||||
|
upper[s] = cs[s+1];
|
||||||
|
lower[s] = cs[s-1];
|
||||||
|
}
|
||||||
|
upper[s] = conjugate(upper[s]);
|
||||||
|
lower[s] = conjugate(lower[s]);
|
||||||
|
diag[s] = conjugate(diag[s]);
|
||||||
|
}
|
||||||
M5Ddag(psi,psi,Din,lower,diag,upper);
|
M5Ddag(psi,psi,Din,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +320,7 @@ void CayleyFermion5D<Impl>::MDeriv (GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDeriv(mat,U,Din,dag);
|
this->DhopDeriv(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDeriv(mat,Din,V,dag);
|
this->DhopDeriv(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -315,7 +335,7 @@ void CayleyFermion5D<Impl>::MoeDeriv(GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDerivOE(mat,U,Din,dag);
|
this->DhopDerivOE(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDerivOE(mat,Din,V,dag);
|
this->DhopDerivOE(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -330,7 +350,7 @@ void CayleyFermion5D<Impl>::MeoDeriv(GaugeField &mat,const FermionField &U,const
|
|||||||
this->DhopDerivEO(mat,U,Din,dag);
|
this->DhopDerivEO(mat,U,Din,dag);
|
||||||
} else {
|
} else {
|
||||||
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
// U d/du [D_w D5]^dag V = U D5^dag d/du DW^dag Y // implicit adj on U in call
|
||||||
Meooe5D(U,Din);
|
MeooeDag5D(U,Din);
|
||||||
this->DhopDerivEO(mat,Din,V,dag);
|
this->DhopDerivEO(mat,Din,V,dag);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_CAYLEY_FERMION_H
|
#ifndef GRID_QCD_CAYLEY_FERMION_H
|
||||||
#define GRID_QCD_CAYLEY_FERMION_H
|
#define GRID_QCD_CAYLEY_FERMION_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
@ -192,7 +194,9 @@ template void CayleyFermion5D< A >::M5Ddag(const FermionField &psi,const Fermion
|
|||||||
template void CayleyFermion5D< A >::MooeeInv (const FermionField &psi, FermionField &chi); \
|
template void CayleyFermion5D< A >::MooeeInv (const FermionField &psi, FermionField &chi); \
|
||||||
template void CayleyFermion5D< A >::MooeeInvDag (const FermionField &psi, FermionField &chi);
|
template void CayleyFermion5D< A >::MooeeInvDag (const FermionField &psi, FermionField &chi);
|
||||||
|
|
||||||
#define CAYLEY_DPERP_CACHE
|
#undef CAYLEY_DPERP_DENSE
|
||||||
|
#define CAYLEY_DPERP_CACHE
|
||||||
#undef CAYLEY_DPERP_LINALG
|
#undef CAYLEY_DPERP_LINALG
|
||||||
|
#define CAYLEY_DPERP_VEC
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -54,8 +55,8 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
@ -98,8 +99,8 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
@ -137,8 +138,7 @@ void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &
|
|||||||
MooeeInvCalls++;
|
MooeeInvCalls++;
|
||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
|
|
||||||
// flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops
|
// flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops
|
||||||
@ -181,11 +181,22 @@ void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &
|
|||||||
assert(psi.checkerboard == psi.checkerboard);
|
assert(psi.checkerboard == psi.checkerboard);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
|
|
||||||
|
std::vector<Coeff_t> ueec(Ls);
|
||||||
|
std::vector<Coeff_t> deec(Ls);
|
||||||
|
std::vector<Coeff_t> leec(Ls);
|
||||||
|
std::vector<Coeff_t> ueemc(Ls);
|
||||||
|
std::vector<Coeff_t> leemc(Ls);
|
||||||
|
for(int s=0;s<ueec.size();s++){
|
||||||
|
ueec[s] = conjugate(uee[s]);
|
||||||
|
deec[s] = conjugate(dee[s]);
|
||||||
|
leec[s] = conjugate(lee[s]);
|
||||||
|
ueemc[s]= conjugate(ueem[s]);
|
||||||
|
leemc[s]= conjugate(leem[s]);
|
||||||
|
}
|
||||||
MooeeInvCalls++;
|
MooeeInvCalls++;
|
||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
||||||
for(int ss=0;ss<grid->oSites();ss+=Ls){ // adds Ls
|
|
||||||
|
|
||||||
auto tmp = psi._odata[0];
|
auto tmp = psi._odata[0];
|
||||||
|
|
||||||
@ -193,25 +204,25 @@ PARALLEL_FOR_LOOP
|
|||||||
chi[ss]=psi[ss];
|
chi[ss]=psi[ss];
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
spProj5m(tmp,chi[ss+s-1]);
|
spProj5m(tmp,chi[ss+s-1]);
|
||||||
chi[ss+s] = psi[ss+s]-uee[s-1]*tmp;
|
chi[ss+s] = psi[ss+s]-ueec[s-1]*tmp;
|
||||||
}
|
}
|
||||||
// U_m^{-\dagger}
|
// U_m^{-\dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
spProj5p(tmp,chi[ss+s]);
|
spProj5p(tmp,chi[ss+s]);
|
||||||
chi[ss+Ls-1] = chi[ss+Ls-1] - ueem[s]*tmp;
|
chi[ss+Ls-1] = chi[ss+Ls-1] - ueemc[s]*tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
// L_m^{-\dagger} D^{-dagger}
|
// L_m^{-\dagger} D^{-dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
spProj5m(tmp,chi[ss+Ls-1]);
|
spProj5m(tmp,chi[ss+Ls-1]);
|
||||||
chi[ss+s] = (1.0/dee[s])*chi[ss+s]-(leem[s]/dee[Ls-1])*tmp;
|
chi[ss+s] = (1.0/deec[s])*chi[ss+s]-(leemc[s]/deec[Ls-1])*tmp;
|
||||||
}
|
}
|
||||||
chi[ss+Ls-1]= (1.0/dee[Ls-1])*chi[ss+Ls-1];
|
chi[ss+Ls-1]= (1.0/deec[Ls-1])*chi[ss+Ls-1];
|
||||||
|
|
||||||
// Apply L^{-dagger}
|
// Apply L^{-dagger}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
spProj5p(tmp,chi[ss+s+1]);
|
spProj5p(tmp,chi[ss+s+1]);
|
||||||
chi[ss+s] = chi[ss+s] - lee[s]*tmp;
|
chi[ss+s] = chi[ss+s] - leec[s]*tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/Eigen/Dense>
|
#include <Grid/Eigen/Dense>
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -38,20 +39,17 @@ namespace QCD {
|
|||||||
/*
|
/*
|
||||||
* Dense matrix versions of routines
|
* Dense matrix versions of routines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(psi,chi,DaggerYes,InverseYes);
|
this->MooeeInternal(psi,chi,DaggerYes,InverseYes);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(psi,chi,DaggerNo,InverseYes);
|
this->MooeeInternal(psi,chi,DaggerNo,InverseYes);
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv)
|
void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv)
|
||||||
{
|
{
|
||||||
@ -125,9 +123,20 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CAYLEY_DPERP_DENSE
|
||||||
|
INSTANTIATE_DPERP(GparityWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(GparityWilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(WilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(WilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplD);
|
||||||
|
|
||||||
template void CayleyFermion5D<GparityWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<GparityWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<GparityWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<GparityWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<WilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<WilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
template void CayleyFermion5D<WilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
template void CayleyFermion5D<WilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
template void CayleyFermion5D<ZWilsonImplF>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
template void CayleyFermion5D<ZWilsonImplD>::MooeeInternal(const FermionField &psi, FermionField &chi,int dag, int inv);
|
||||||
|
#endif
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
@ -29,7 +29,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
@ -47,17 +48,18 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
std::vector<Coeff_t> &diag,
|
std::vector<Coeff_t> &diag,
|
||||||
std::vector<Coeff_t> &upper)
|
std::vector<Coeff_t> &upper)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,Ls-1);
|
axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,Ls-1);
|
||||||
} else if ( s==(Ls-1)) {
|
} else if ( s==(Ls-1)) {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,0);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,0);
|
||||||
axpby_ssp_pplus (chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pplus (chi,one,chi,lower[s],psi,s,s-1);
|
||||||
} else {
|
} else {
|
||||||
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pminus(chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pplus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pplus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,17 +71,18 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
std::vector<Coeff_t> &diag,
|
std::vector<Coeff_t> &diag,
|
||||||
std::vector<Coeff_t> &upper)
|
std::vector<Coeff_t> &upper)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,Ls-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,Ls-1);
|
||||||
} else if ( s==(Ls-1)) {
|
} else if ( s==(Ls-1)) {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,0);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,0);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
} else {
|
} else {
|
||||||
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
axpby_ssp_pplus (chi,diag[s],phi,upper[s],psi,s,s+1);
|
||||||
axpby_ssp_pminus(chi,1.0,chi,lower[s],psi,s,s-1);
|
axpby_ssp_pminus(chi,one,chi,lower[s],psi,s,s-1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,62 +90,68 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInv (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
|
Coeff_t czero(0.0);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
// Apply (L^{\prime})^{-1}
|
// Apply (L^{\prime})^{-1}
|
||||||
axpby_ssp (chi,1.0,psi, 0.0,psi,0,0); // chi[0]=psi[0]
|
axpby_ssp (chi,one,psi, czero,psi,0,0); // chi[0]=psi[0]
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
axpby_ssp_pplus(chi,1.0,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
axpby_ssp_pplus(chi,one,psi,-lee[s-1],chi,s,s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
||||||
}
|
}
|
||||||
// L_m^{-1}
|
// L_m^{-1}
|
||||||
for (int s=0;s<Ls-1;s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
for (int s=0;s<Ls-1;s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||||
axpby_ssp_pminus(chi,1.0,chi,-leem[s],chi,Ls-1,s);
|
axpby_ssp_pminus(chi,one,chi,-leem[s],chi,Ls-1,s);
|
||||||
}
|
}
|
||||||
// U_m^{-1} D^{-1}
|
// U_m^{-1} D^{-1}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
// Chi[s] + 1/d chi[s]
|
// Chi[s] + 1/d chi[s]
|
||||||
axpby_ssp_pplus(chi,1.0/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
|
axpby_ssp_pplus(chi,one/dee[s],chi,-ueem[s]/dee[Ls-1],chi,s,Ls-1);
|
||||||
}
|
}
|
||||||
axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable
|
axpby_ssp(chi,one/dee[Ls-1],chi,czero,chi,Ls-1,Ls-1); // Modest avoidable
|
||||||
|
|
||||||
// Apply U^{-1}
|
// Apply U^{-1}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
axpby_ssp_pminus (chi,1.0,chi,-uee[s],chi,s,s+1); // chi[Ls]
|
axpby_ssp_pminus (chi,one,chi,-uee[s],chi,s,s+1); // chi[Ls]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
void CayleyFermion5D<Impl>::MooeeInvDag (const FermionField &psi, FermionField &chi)
|
||||||
{
|
{
|
||||||
|
Coeff_t one(1.0);
|
||||||
|
Coeff_t czero(0.0);
|
||||||
chi.checkerboard=psi.checkerboard;
|
chi.checkerboard=psi.checkerboard;
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
// Apply (U^{\prime})^{-dagger}
|
// Apply (U^{\prime})^{-dagger}
|
||||||
axpby_ssp (chi,1.0,psi, 0.0,psi,0,0); // chi[0]=psi[0]
|
axpby_ssp (chi,one,psi, czero,psi,0,0); // chi[0]=psi[0]
|
||||||
for (int s=1;s<Ls;s++){
|
for (int s=1;s<Ls;s++){
|
||||||
axpby_ssp_pminus(chi,1.0,psi,-uee[s-1],chi,s,s-1);
|
axpby_ssp_pminus(chi,one,psi,-conjugate(uee[s-1]),chi,s,s-1);
|
||||||
}
|
}
|
||||||
// U_m^{-\dagger}
|
// U_m^{-\dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
axpby_ssp_pplus(chi,1.0,chi,-ueem[s],chi,Ls-1,s);
|
axpby_ssp_pplus(chi,one,chi,-conjugate(ueem[s]),chi,Ls-1,s);
|
||||||
}
|
}
|
||||||
// L_m^{-\dagger} D^{-dagger}
|
// L_m^{-\dagger} D^{-dagger}
|
||||||
for (int s=0;s<Ls-1;s++){
|
for (int s=0;s<Ls-1;s++){
|
||||||
axpby_ssp_pminus(chi,1.0/dee[s],chi,-leem[s]/dee[Ls-1],chi,s,Ls-1);
|
axpby_ssp_pminus(chi,one/conjugate(dee[s]),chi,-conjugate(leem[s]/dee[Ls-1]),chi,s,Ls-1);
|
||||||
}
|
}
|
||||||
axpby_ssp(chi,1.0/dee[Ls-1],chi,0.0,chi,Ls-1,Ls-1); // Modest avoidable
|
axpby_ssp(chi,one/conjugate(dee[Ls-1]),chi,czero,chi,Ls-1,Ls-1); // Modest avoidable
|
||||||
|
|
||||||
// Apply L^{-dagger}
|
// Apply L^{-dagger}
|
||||||
for (int s=Ls-2;s>=0;s--){
|
for (int s=Ls-2;s>=0;s--){
|
||||||
axpby_ssp_pplus (chi,1.0,chi,-lee[s],chi,s,s+1); // chi[Ls]
|
axpby_ssp_pplus (chi,one,chi,-conjugate(lee[s]),chi,s,s+1); // chi[Ls]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef CAYLEY_DPERP_LINALG
|
#ifdef CAYLEY_DPERP_LINALG
|
||||||
INSTANTIATE(WilsonImplF);
|
INSTANTIATE_DPERP(WilsonImplF);
|
||||||
INSTANTIATE(WilsonImplD);
|
INSTANTIATE_DPERP(WilsonImplD);
|
||||||
INSTANTIATE(GparityWilsonImplF);
|
INSTANTIATE_DPERP(GparityWilsonImplF);
|
||||||
INSTANTIATE(GparityWilsonImplD);
|
INSTANTIATE_DPERP(GparityWilsonImplD);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplF);
|
||||||
|
INSTANTIATE_DPERP(ZWilsonImplD);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,11 +30,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
|
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD { /*
|
namespace QCD {
|
||||||
|
/*
|
||||||
* Dense matrix versions of routines
|
* Dense matrix versions of routines
|
||||||
*/
|
*/
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
@ -91,8 +93,7 @@ void CayleyFermion5D<Impl>::M5D(const FermionField &psi,
|
|||||||
|
|
||||||
assert(Nc==3);
|
assert(Nc==3);
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
||||||
for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
|
||||||
#if 0
|
#if 0
|
||||||
alignas(64) SiteHalfSpinor hp;
|
alignas(64) SiteHalfSpinor hp;
|
||||||
alignas(64) SiteHalfSpinor hm;
|
alignas(64) SiteHalfSpinor hm;
|
||||||
@ -232,8 +233,7 @@ void CayleyFermion5D<Impl>::M5Ddag(const FermionField &psi,
|
|||||||
|
|
||||||
M5Dcalls++;
|
M5Dcalls++;
|
||||||
M5Dtime-=usecond();
|
M5Dtime-=usecond();
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
||||||
for(int ss=0;ss<grid->oSites();ss+=LLs){ // adds LLs
|
|
||||||
#if 0
|
#if 0
|
||||||
alignas(64) SiteHalfSpinor hp;
|
alignas(64) SiteHalfSpinor hp;
|
||||||
alignas(64) SiteHalfSpinor hm;
|
alignas(64) SiteHalfSpinor hm;
|
||||||
@ -792,13 +792,11 @@ void CayleyFermion5D<Impl>::MooeeInternal(const FermionField &psi, FermionField
|
|||||||
MooeeInvTime-=usecond();
|
MooeeInvTime-=usecond();
|
||||||
|
|
||||||
if ( switcheroo<Coeff_t>::iscomplex() ) {
|
if ( switcheroo<Coeff_t>::iscomplex() ) {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto site=0;site<vol;site++){
|
||||||
for(auto site=0;site<vol;site++){
|
|
||||||
MooeeInternalZAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
MooeeInternalZAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto site=0;site<vol;site++){
|
||||||
for(auto site=0;site<vol;site++){
|
|
||||||
MooeeInternalAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
MooeeInternalAsm(psi,chi,LLs,site,*_Matp,*_Matm);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
@ -29,6 +29,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_CONTINUED_FRACTION_H
|
#ifndef GRID_QCD_CONTINUED_FRACTION_H
|
||||||
#define GRID_QCD_CONTINUED_FRACTION_H
|
#define GRID_QCD_CONTINUED_FRACTION_H
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_DOMAIN_WALL_FERMION_H
|
#ifndef GRID_QCD_DOMAIN_WALL_FERMION_H
|
||||||
#define GRID_QCD_DOMAIN_WALL_FERMION_H
|
#define GRID_QCD_DOMAIN_WALL_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -2,16 +2,11 @@
|
|||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/Actions.h
|
Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
|
||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
|
||||||
Author: neo <cossu@post.kek.jp>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -30,8 +25,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#ifndef GRID_QCD_ACTIONS_H
|
#ifndef GRID_QCD_FERMION_H
|
||||||
#define GRID_QCD_ACTIONS_H
|
#define GRID_QCD_FERMION_H
|
||||||
|
|
||||||
// * Linear operators (Hermitian and non-hermitian) .. my LinearOperator
|
// * Linear operators (Hermitian and non-hermitian) .. my LinearOperator
|
||||||
// * System solvers (Hermitian and non-hermitian) .. my OperatorFunction
|
// * System solvers (Hermitian and non-hermitian) .. my OperatorFunction
|
||||||
@ -108,36 +103,6 @@ typedef SymanzikGaugeAction<ConjugateGimplD> ConjugateSymanzikGaugeAction
|
|||||||
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
|
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
#define FermOp4dVecTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonImplF>; \
|
|
||||||
template class A<WilsonImplD>; \
|
|
||||||
template class A<ZWilsonImplF>; \
|
|
||||||
template class A<ZWilsonImplD>; \
|
|
||||||
template class A<GparityWilsonImplF>; \
|
|
||||||
template class A<GparityWilsonImplD>;
|
|
||||||
|
|
||||||
#define AdjointFermOpTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonAdjImplF>; \
|
|
||||||
template class A<WilsonAdjImplD>;
|
|
||||||
|
|
||||||
#define TwoIndexFermOpTemplateInstantiate(A) \
|
|
||||||
template class A<WilsonTwoIndexSymmetricImplF>; \
|
|
||||||
template class A<WilsonTwoIndexSymmetricImplD>;
|
|
||||||
|
|
||||||
#define FermOp5dVecTemplateInstantiate(A) \
|
|
||||||
template class A<DomainWallVec5dImplF>; \
|
|
||||||
template class A<DomainWallVec5dImplD>; \
|
|
||||||
template class A<ZDomainWallVec5dImplF>; \
|
|
||||||
template class A<ZDomainWallVec5dImplD>;
|
|
||||||
|
|
||||||
#define FermOpTemplateInstantiate(A) \
|
|
||||||
FermOp4dVecTemplateInstantiate(A) \
|
|
||||||
FermOp5dVecTemplateInstantiate(A)
|
|
||||||
|
|
||||||
|
|
||||||
#define GparityFermOpTemplateInstantiate(A)
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
// Fermion operators / actions
|
// Fermion operators / actions
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
@ -145,9 +110,9 @@ typedef SymanzikGaugeAction<ConjugateGimplD> ConjugateSymanzikGaugeAction
|
|||||||
#include <Grid/qcd/action/fermion/WilsonFermion.h> // 4d wilson like
|
#include <Grid/qcd/action/fermion/WilsonFermion.h> // 4d wilson like
|
||||||
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
||||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
||||||
|
|
||||||
//#include <Grid/qcd/action/fermion/CloverFermion.h>
|
//#include <Grid/qcd/action/fermion/CloverFermion.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
|
||||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h> // Cayley types
|
#include <Grid/qcd/action/fermion/CayleyFermion5D.h> // Cayley types
|
||||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||||
@ -158,14 +123,16 @@ typedef SymanzikGaugeAction<ConjugateGimplD> ConjugateSymanzikGaugeAction
|
|||||||
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/ShamirZolotarevFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonCayleyZolotarevFermion.h>
|
||||||
|
|
||||||
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h> // Continued fraction
|
#include <Grid/qcd/action/fermion/ContinuedFractionFermion5D.h> // Continued fraction
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonContfracTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonContfracZolotarevFermion.h>
|
||||||
|
|
||||||
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h> // Partial fraction
|
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h> // Partial fraction
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionTanhFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h>
|
#include <Grid/qcd/action/fermion/OverlapWilsonPartialFractionZolotarevFermion.h>
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
|
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
|
||||||
@ -269,23 +236,20 @@ typedef MobiusFermion<GparityWilsonImplR> GparityMobiusFermionR;
|
|||||||
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
typedef MobiusFermion<GparityWilsonImplF> GparityMobiusFermionF;
|
||||||
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
typedef MobiusFermion<GparityWilsonImplD> GparityMobiusFermionD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplR> ImprovedStaggeredFermion5DR;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplF> ImprovedStaggeredFermion5DF;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredImplD> ImprovedStaggeredFermion5DD;
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplR> ImprovedStaggeredFermionVec5dR;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplF> ImprovedStaggeredFermionVec5dF;
|
||||||
|
typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplD> ImprovedStaggeredFermionVec5dD;
|
||||||
|
|
||||||
|
|
||||||
}}
|
}}
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
// G5 herm -- this has to live in QCD since dirac matrix is not in the broader sector of code
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
|
|
||||||
|
|
||||||
////////////////////////////////////////
|
|
||||||
// Pseudo fermion combinations for HMC
|
|
||||||
////////////////////////////////////////
|
|
||||||
#include <Grid/qcd/action/pseudofermion/EvenOddSchurDifferentiable.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavour.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourRatio.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOdd.h>
|
|
||||||
#include <Grid/qcd/action/pseudofermion/TwoFlavourEvenOddRatio.h>
|
|
||||||
|
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
|
#include <Grid/qcd/action/pseudofermion/OneFlavourRational.h>
|
||||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
|
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
|
80
lib/qcd/action/fermion/FermionCore.h
Normal file
80
lib/qcd/action/fermion/FermionCore.h
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_FERMION_CORE_H
|
||||||
|
#define GRID_QCD_FERMION_CORE_H
|
||||||
|
|
||||||
|
#include <Grid/GridCore.h>
|
||||||
|
#include <Grid/GridQCDcore.h>
|
||||||
|
#include <Grid/qcd/action/ActionCore.h>
|
||||||
|
|
||||||
|
////////////////////////////////////////////
|
||||||
|
// Fermion prereqs
|
||||||
|
////////////////////////////////////////////
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonCompressor.h> //used by all wilson type fermions
|
||||||
|
#include <Grid/qcd/action/fermion/FermionOperatorImpl.h>
|
||||||
|
#include <Grid/qcd/action/fermion/FermionOperator.h>
|
||||||
|
#include <Grid/qcd/action/fermion/WilsonKernels.h> //used by all wilson type fermions
|
||||||
|
#include <Grid/qcd/action/fermion/StaggeredKernels.h> //used by all wilson type fermions
|
||||||
|
|
||||||
|
#define FermOpStaggeredTemplateInstantiate(A) \
|
||||||
|
template class A<StaggeredImplF>; \
|
||||||
|
template class A<StaggeredImplD>;
|
||||||
|
|
||||||
|
#define FermOpStaggeredVec5dTemplateInstantiate(A) \
|
||||||
|
template class A<StaggeredVec5dImplF>; \
|
||||||
|
template class A<StaggeredVec5dImplD>;
|
||||||
|
|
||||||
|
#define FermOp4dVecTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonImplF>; \
|
||||||
|
template class A<WilsonImplD>; \
|
||||||
|
template class A<ZWilsonImplF>; \
|
||||||
|
template class A<ZWilsonImplD>; \
|
||||||
|
template class A<GparityWilsonImplF>; \
|
||||||
|
template class A<GparityWilsonImplD>;
|
||||||
|
|
||||||
|
#define AdjointFermOpTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonAdjImplF>; \
|
||||||
|
template class A<WilsonAdjImplD>;
|
||||||
|
|
||||||
|
#define TwoIndexFermOpTemplateInstantiate(A) \
|
||||||
|
template class A<WilsonTwoIndexSymmetricImplF>; \
|
||||||
|
template class A<WilsonTwoIndexSymmetricImplD>;
|
||||||
|
|
||||||
|
#define FermOp5dVecTemplateInstantiate(A) \
|
||||||
|
template class A<DomainWallVec5dImplF>; \
|
||||||
|
template class A<DomainWallVec5dImplD>; \
|
||||||
|
template class A<ZDomainWallVec5dImplF>; \
|
||||||
|
template class A<ZDomainWallVec5dImplD>;
|
||||||
|
|
||||||
|
#define FermOpTemplateInstantiate(A) \
|
||||||
|
FermOp4dVecTemplateInstantiate(A) \
|
||||||
|
FermOp5dVecTemplateInstantiate(A)
|
||||||
|
|
||||||
|
#define GparityFermOpTemplateInstantiate(A)
|
||||||
|
|
||||||
|
#endif
|
@ -194,8 +194,7 @@ namespace QCD {
|
|||||||
GaugeLinkField tmp(mat._grid);
|
GaugeLinkField tmp(mat._grid);
|
||||||
tmp = zero;
|
tmp = zero;
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int sss=0;sss<tmp._grid->oSites();sss++){
|
||||||
for(int sss=0;sss<tmp._grid->oSites();sss++){
|
|
||||||
int sU=sss;
|
int sU=sss;
|
||||||
for(int s=0;s<Ls;s++){
|
for(int s=0;s<Ls;s++){
|
||||||
int sF = s+Ls*sU;
|
int sF = s+Ls*sU;
|
||||||
@ -235,11 +234,13 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
|
|||||||
typedef Lattice<SiteSpinor> FermionField;
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
typedef Lattice<SitePropagator> PropagatorField;
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////
|
||||||
// Make the doubled gauge field a *scalar*
|
// Make the doubled gauge field a *scalar*
|
||||||
|
/////////////////////////////////////////////////
|
||||||
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
||||||
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
||||||
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
||||||
|
|
||||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
|
||||||
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
typedef WilsonCompressor<SiteHalfSpinor, SiteSpinor> Compressor;
|
||||||
@ -271,11 +272,11 @@ class DomainWallVec5dImpl : public PeriodicGaugeImpl< GaugeImplTypes< S,Nrepres
|
|||||||
|
|
||||||
inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu)
|
inline void DoubleStore(GridBase *GaugeGrid, DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||||
{
|
{
|
||||||
SiteScalarGaugeField ScalarUmu;
|
SiteScalarGaugeField ScalarUmu;
|
||||||
SiteDoubledGaugeField ScalarUds;
|
SiteDoubledGaugeField ScalarUds;
|
||||||
|
|
||||||
GaugeLinkField U(Umu._grid);
|
GaugeLinkField U(Umu._grid);
|
||||||
GaugeField Uadj(Umu._grid);
|
GaugeField Uadj(Umu._grid);
|
||||||
for (int mu = 0; mu < Nd; mu++) {
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
U = PeekIndex<LorentzIndex>(Umu, mu);
|
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
U = adj(Cshift(U, mu, -1));
|
U = adj(Cshift(U, mu, -1));
|
||||||
@ -333,7 +334,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
|||||||
typedef iImplPropagator<Simd> SitePropagator;
|
typedef iImplPropagator<Simd> SitePropagator;
|
||||||
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||||
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||||
|
|
||||||
typedef Lattice<SiteSpinor> FermionField;
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
typedef Lattice<SitePropagator> PropagatorField;
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
@ -356,7 +357,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
|||||||
StencilImpl &St) {
|
StencilImpl &St) {
|
||||||
|
|
||||||
typedef SiteHalfSpinor vobj;
|
typedef SiteHalfSpinor vobj;
|
||||||
typedef typename SiteHalfSpinor::scalar_object sobj;
|
typedef typename SiteHalfSpinor::scalar_object sobj;
|
||||||
|
|
||||||
vobj vtmp;
|
vobj vtmp;
|
||||||
sobj stmp;
|
sobj stmp;
|
||||||
@ -445,8 +446,7 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
|||||||
Uconj = where(coor==neglink,-Uconj,Uconj);
|
Uconj = where(coor==neglink,-Uconj,Uconj);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
|
||||||
Uds[ss](0)(mu) = U[ss]();
|
Uds[ss](0)(mu) = U[ss]();
|
||||||
Uds[ss](1)(mu) = Uconj[ss]();
|
Uds[ss](1)(mu) = Uconj[ss]();
|
||||||
}
|
}
|
||||||
@ -459,8 +459,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Utmp = where(coor==0,Uconj,Utmp);
|
Utmp = where(coor==0,Uconj,Utmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
|
||||||
Uds[ss](0)(mu+4) = Utmp[ss]();
|
Uds[ss](0)(mu+4) = Utmp[ss]();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,8 +468,7 @@ PARALLEL_FOR_LOOP
|
|||||||
Utmp = where(coor==0,U,Utmp);
|
Utmp = where(coor==0,U,Utmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss=U.begin();ss<U.end();ss++){
|
||||||
for(auto ss=U.begin();ss<U.end();ss++){
|
|
||||||
Uds[ss](1)(mu+4) = Utmp[ss]();
|
Uds[ss](1)(mu+4) = Utmp[ss]();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,8 +482,7 @@ PARALLEL_FOR_LOOP
|
|||||||
GaugeLinkField link(mat._grid);
|
GaugeLinkField link(mat._grid);
|
||||||
// use lorentz for flavour as hack.
|
// use lorentz for flavour as hack.
|
||||||
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
auto tmp = TraceIndex<SpinIndex>(outerProduct(Btilde, A));
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
||||||
for (auto ss = tmp.begin(); ss < tmp.end(); ss++) {
|
|
||||||
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
link[ss]() = tmp[ss](0, 0) - conjugate(tmp[ss](1, 1));
|
||||||
}
|
}
|
||||||
PokeIndex<LorentzIndex>(mat, link, mu);
|
PokeIndex<LorentzIndex>(mat, link, mu);
|
||||||
@ -498,8 +495,7 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
GaugeLinkField tmp(mat._grid);
|
GaugeLinkField tmp(mat._grid);
|
||||||
tmp = zero;
|
tmp = zero;
|
||||||
PARALLEL_FOR_LOOP
|
parallel_for(int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
||||||
for (int ss = 0; ss < tmp._grid->oSites(); ss++) {
|
|
||||||
for (int s = 0; s < Ls; s++) {
|
for (int s = 0; s < Ls; s++) {
|
||||||
int sF = s + Ls * ss;
|
int sF = s + Ls * ss;
|
||||||
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
auto ttmp = traceIndex<SpinIndex>(outerProduct(Btilde[sF], Atilde[sF]));
|
||||||
@ -512,6 +508,323 @@ PARALLEL_FOR_LOOP
|
|||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Single flavour one component spinors with colour index
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class S, class Representation = FundamentalRepresentation >
|
||||||
|
class StaggeredImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
typedef RealD _Coeff_t ;
|
||||||
|
static const int Dimension = Representation::Dimension;
|
||||||
|
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||||
|
|
||||||
|
//Necessary?
|
||||||
|
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||||
|
|
||||||
|
const bool LsVectorised=false;
|
||||||
|
typedef _Coeff_t Coeff_t;
|
||||||
|
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
|
||||||
|
template <typename vtype> using iImplSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplHalfSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||||
|
template <typename vtype> using iImplPropagator = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
|
||||||
|
typedef iImplScalar<Simd> SiteComplex;
|
||||||
|
typedef iImplSpinor<Simd> SiteSpinor;
|
||||||
|
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||||
|
typedef iImplDoubledGaugeField<Simd> SiteDoubledGaugeField;
|
||||||
|
typedef iImplPropagator<Simd> SitePropagator;
|
||||||
|
|
||||||
|
typedef Lattice<SiteComplex> ComplexField;
|
||||||
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
typedef SimpleCompressor<SiteSpinor> Compressor;
|
||||||
|
typedef StaggeredImplParams ImplParams;
|
||||||
|
typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
|
||||||
|
|
||||||
|
ImplParams Params;
|
||||||
|
|
||||||
|
StaggeredImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||||
|
|
||||||
|
inline void multLink(SiteSpinor &phi,
|
||||||
|
const SiteDoubledGaugeField &U,
|
||||||
|
const SiteSpinor &chi,
|
||||||
|
int mu){
|
||||||
|
mult(&phi(), &U(mu), &chi());
|
||||||
|
}
|
||||||
|
inline void multLinkAdd(SiteSpinor &phi,
|
||||||
|
const SiteDoubledGaugeField &U,
|
||||||
|
const SiteSpinor &chi,
|
||||||
|
int mu){
|
||||||
|
mac(&phi(), &U(mu), &chi());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class ref>
|
||||||
|
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||||
|
reg = memory;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void DoubleStore(GridBase *GaugeGrid,
|
||||||
|
DoubledGaugeField &UUUds, // for Naik term
|
||||||
|
DoubledGaugeField &Uds,
|
||||||
|
const GaugeField &Uthin,
|
||||||
|
const GaugeField &Ufat) {
|
||||||
|
conformable(Uds._grid, GaugeGrid);
|
||||||
|
conformable(Uthin._grid, GaugeGrid);
|
||||||
|
conformable(Ufat._grid, GaugeGrid);
|
||||||
|
GaugeLinkField U(GaugeGrid);
|
||||||
|
GaugeLinkField UU(GaugeGrid);
|
||||||
|
GaugeLinkField UUU(GaugeGrid);
|
||||||
|
GaugeLinkField Udag(GaugeGrid);
|
||||||
|
GaugeLinkField UUUdag(GaugeGrid);
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
// Staggered Phase.
|
||||||
|
Lattice<iScalar<vInteger> > coor(GaugeGrid);
|
||||||
|
Lattice<iScalar<vInteger> > x(GaugeGrid); LatticeCoordinate(x,0);
|
||||||
|
Lattice<iScalar<vInteger> > y(GaugeGrid); LatticeCoordinate(y,1);
|
||||||
|
Lattice<iScalar<vInteger> > z(GaugeGrid); LatticeCoordinate(z,2);
|
||||||
|
Lattice<iScalar<vInteger> > t(GaugeGrid); LatticeCoordinate(t,3);
|
||||||
|
|
||||||
|
Lattice<iScalar<vInteger> > lin_z(GaugeGrid); lin_z=x+y;
|
||||||
|
Lattice<iScalar<vInteger> > lin_t(GaugeGrid); lin_t=x+y+z;
|
||||||
|
|
||||||
|
ComplexField phases(GaugeGrid); phases=1.0;
|
||||||
|
|
||||||
|
if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
|
||||||
|
|
||||||
|
// 1 hop based on fat links
|
||||||
|
U = PeekIndex<LorentzIndex>(Ufat, mu);
|
||||||
|
Udag = adj( Cshift(U, mu, -1));
|
||||||
|
|
||||||
|
U = U *phases;
|
||||||
|
Udag = Udag *phases;
|
||||||
|
|
||||||
|
PokeIndex<LorentzIndex>(Uds, U, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Uds, Udag, mu + 4);
|
||||||
|
|
||||||
|
// 3 hop based on thin links. Crazy huh ?
|
||||||
|
U = PeekIndex<LorentzIndex>(Uthin, mu);
|
||||||
|
UU = Gimpl::CovShiftForward(U,mu,U);
|
||||||
|
UUU= Gimpl::CovShiftForward(U,mu,UU);
|
||||||
|
|
||||||
|
UUUdag = adj( Cshift(UUU, mu, -3));
|
||||||
|
|
||||||
|
UUU = UUU *phases;
|
||||||
|
UUUdag = UUUdag *phases;
|
||||||
|
|
||||||
|
PokeIndex<LorentzIndex>(UUUds, UUU, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUds, UUUdag, mu+4);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||||
|
GaugeLinkField link(mat._grid);
|
||||||
|
link = TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||||
|
PokeIndex<LorentzIndex>(mat,link,mu);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||||
|
assert (0);
|
||||||
|
// Must never hit
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Single flavour one component spinors with colour index. 5d vec
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class S, class Representation = FundamentalRepresentation >
|
||||||
|
class StaggeredVec5dImpl : public PeriodicGaugeImpl<GaugeImplTypes<S, Representation::Dimension > > {
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
typedef RealD _Coeff_t ;
|
||||||
|
static const int Dimension = Representation::Dimension;
|
||||||
|
typedef PeriodicGaugeImpl<GaugeImplTypes<S, Dimension > > Gimpl;
|
||||||
|
|
||||||
|
//Necessary?
|
||||||
|
constexpr bool is_fundamental() const{return Dimension == Nc ? 1 : 0;}
|
||||||
|
|
||||||
|
const bool LsVectorised=true;
|
||||||
|
|
||||||
|
typedef _Coeff_t Coeff_t;
|
||||||
|
|
||||||
|
INHERIT_GIMPL_TYPES(Gimpl);
|
||||||
|
|
||||||
|
template <typename vtype> using iImplScalar = iScalar<iScalar<iScalar<vtype> > >;
|
||||||
|
template <typename vtype> using iImplSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplHalfSpinor = iScalar<iScalar<iVector<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplDoubledGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nds>;
|
||||||
|
template <typename vtype> using iImplGaugeField = iVector<iScalar<iMatrix<vtype, Dimension> >, Nd>;
|
||||||
|
template <typename vtype> using iImplGaugeLink = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
template <typename vtype> using iImplPropagator = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
|
||||||
|
|
||||||
|
// Make the doubled gauge field a *scalar*
|
||||||
|
typedef iImplDoubledGaugeField<typename Simd::scalar_type> SiteDoubledGaugeField; // This is a scalar
|
||||||
|
typedef iImplGaugeField<typename Simd::scalar_type> SiteScalarGaugeField; // scalar
|
||||||
|
typedef iImplGaugeLink<typename Simd::scalar_type> SiteScalarGaugeLink; // scalar
|
||||||
|
typedef iImplPropagator<Simd> SitePropagator;
|
||||||
|
|
||||||
|
typedef Lattice<SiteDoubledGaugeField> DoubledGaugeField;
|
||||||
|
typedef Lattice<SitePropagator> PropagatorField;
|
||||||
|
|
||||||
|
typedef iImplScalar<Simd> SiteComplex;
|
||||||
|
typedef iImplSpinor<Simd> SiteSpinor;
|
||||||
|
typedef iImplHalfSpinor<Simd> SiteHalfSpinor;
|
||||||
|
|
||||||
|
|
||||||
|
typedef Lattice<SiteComplex> ComplexField;
|
||||||
|
typedef Lattice<SiteSpinor> FermionField;
|
||||||
|
|
||||||
|
typedef SimpleCompressor<SiteSpinor> Compressor;
|
||||||
|
typedef StaggeredImplParams ImplParams;
|
||||||
|
typedef CartesianStencil<SiteSpinor, SiteSpinor> StencilImpl;
|
||||||
|
|
||||||
|
ImplParams Params;
|
||||||
|
|
||||||
|
StaggeredVec5dImpl(const ImplParams &p = ImplParams()) : Params(p){};
|
||||||
|
|
||||||
|
template <class ref>
|
||||||
|
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||||
|
vsplat(reg, memory);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void multLink(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||||
|
const SiteHalfSpinor &chi, int mu) {
|
||||||
|
SiteGaugeLink UU;
|
||||||
|
for (int i = 0; i < Dimension; i++) {
|
||||||
|
for (int j = 0; j < Dimension; j++) {
|
||||||
|
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mult(&phi(), &UU(), &chi());
|
||||||
|
}
|
||||||
|
inline void multLinkAdd(SiteHalfSpinor &phi, const SiteDoubledGaugeField &U,
|
||||||
|
const SiteHalfSpinor &chi, int mu) {
|
||||||
|
SiteGaugeLink UU;
|
||||||
|
for (int i = 0; i < Dimension; i++) {
|
||||||
|
for (int j = 0; j < Dimension; j++) {
|
||||||
|
vsplat(UU()()(i, j), U(mu)()(i, j));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mac(&phi(), &UU(), &chi());
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void DoubleStore(GridBase *GaugeGrid,
|
||||||
|
DoubledGaugeField &UUUds, // for Naik term
|
||||||
|
DoubledGaugeField &Uds,
|
||||||
|
const GaugeField &Uthin,
|
||||||
|
const GaugeField &Ufat)
|
||||||
|
{
|
||||||
|
|
||||||
|
GridBase * InputGrid = Uthin._grid;
|
||||||
|
conformable(InputGrid,Ufat._grid);
|
||||||
|
|
||||||
|
GaugeLinkField U(InputGrid);
|
||||||
|
GaugeLinkField UU(InputGrid);
|
||||||
|
GaugeLinkField UUU(InputGrid);
|
||||||
|
GaugeLinkField Udag(InputGrid);
|
||||||
|
GaugeLinkField UUUdag(InputGrid);
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
// Staggered Phase.
|
||||||
|
Lattice<iScalar<vInteger> > coor(InputGrid);
|
||||||
|
Lattice<iScalar<vInteger> > x(InputGrid); LatticeCoordinate(x,0);
|
||||||
|
Lattice<iScalar<vInteger> > y(InputGrid); LatticeCoordinate(y,1);
|
||||||
|
Lattice<iScalar<vInteger> > z(InputGrid); LatticeCoordinate(z,2);
|
||||||
|
Lattice<iScalar<vInteger> > t(InputGrid); LatticeCoordinate(t,3);
|
||||||
|
|
||||||
|
Lattice<iScalar<vInteger> > lin_z(InputGrid); lin_z=x+y;
|
||||||
|
Lattice<iScalar<vInteger> > lin_t(InputGrid); lin_t=x+y+z;
|
||||||
|
|
||||||
|
ComplexField phases(InputGrid); phases=1.0;
|
||||||
|
|
||||||
|
if ( mu == 1 ) phases = where( mod(x ,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 2 ) phases = where( mod(lin_z,2)==(Integer)0, phases,-phases);
|
||||||
|
if ( mu == 3 ) phases = where( mod(lin_t,2)==(Integer)0, phases,-phases);
|
||||||
|
|
||||||
|
// 1 hop based on fat links
|
||||||
|
U = PeekIndex<LorentzIndex>(Ufat, mu);
|
||||||
|
Udag = adj( Cshift(U, mu, -1));
|
||||||
|
|
||||||
|
U = U *phases;
|
||||||
|
Udag = Udag *phases;
|
||||||
|
|
||||||
|
|
||||||
|
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||||
|
SiteScalarGaugeLink ScalarU;
|
||||||
|
SiteDoubledGaugeField ScalarUds;
|
||||||
|
|
||||||
|
std::vector<int> lcoor;
|
||||||
|
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||||
|
peekLocalSite(ScalarUds, Uds, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, U, lcoor);
|
||||||
|
ScalarUds(mu) = ScalarU();
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, Udag, lcoor);
|
||||||
|
ScalarUds(mu + 4) = ScalarU();
|
||||||
|
|
||||||
|
pokeLocalSite(ScalarUds, Uds, lcoor);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3 hop based on thin links. Crazy huh ?
|
||||||
|
U = PeekIndex<LorentzIndex>(Uthin, mu);
|
||||||
|
UU = Gimpl::CovShiftForward(U,mu,U);
|
||||||
|
UUU= Gimpl::CovShiftForward(U,mu,UU);
|
||||||
|
|
||||||
|
UUUdag = adj( Cshift(UUU, mu, -3));
|
||||||
|
|
||||||
|
UUU = UUU *phases;
|
||||||
|
UUUdag = UUUdag *phases;
|
||||||
|
|
||||||
|
for (int lidx = 0; lidx < GaugeGrid->lSites(); lidx++) {
|
||||||
|
|
||||||
|
SiteScalarGaugeLink ScalarU;
|
||||||
|
SiteDoubledGaugeField ScalarUds;
|
||||||
|
|
||||||
|
std::vector<int> lcoor;
|
||||||
|
GaugeGrid->LocalIndexToLocalCoor(lidx, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarUds, UUUds, lcoor);
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, UUU, lcoor);
|
||||||
|
ScalarUds(mu) = ScalarU();
|
||||||
|
|
||||||
|
peekLocalSite(ScalarU, UUUdag, lcoor);
|
||||||
|
ScalarUds(mu + 4) = ScalarU();
|
||||||
|
|
||||||
|
pokeLocalSite(ScalarUds, UUUds, lcoor);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce4D(GaugeField &mat, FermionField &Btilde, FermionField &A,int mu){
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void InsertForce5D(GaugeField &mat, FermionField &Btilde, FermionField Ã,int mu){
|
||||||
|
assert (0);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
typedef WilsonImpl<vComplex, FundamentalRepresentation > WilsonImplR; // Real.. whichever prec
|
||||||
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
typedef WilsonImpl<vComplexF, FundamentalRepresentation > WilsonImplF; // Float
|
||||||
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
typedef WilsonImpl<vComplexD, FundamentalRepresentation > WilsonImplD; // Double
|
||||||
@ -540,6 +853,14 @@ PARALLEL_FOR_LOOP
|
|||||||
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
typedef GparityWilsonImpl<vComplexF, Nc> GparityWilsonImplF; // Float
|
||||||
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
typedef GparityWilsonImpl<vComplexD, Nc> GparityWilsonImplD; // Double
|
||||||
|
|
||||||
|
typedef StaggeredImpl<vComplex, FundamentalRepresentation > StaggeredImplR; // Real.. whichever prec
|
||||||
|
typedef StaggeredImpl<vComplexF, FundamentalRepresentation > StaggeredImplF; // Float
|
||||||
|
typedef StaggeredImpl<vComplexD, FundamentalRepresentation > StaggeredImplD; // Double
|
||||||
|
|
||||||
|
typedef StaggeredVec5dImpl<vComplex, FundamentalRepresentation > StaggeredVec5dImplR; // Real.. whichever prec
|
||||||
|
typedef StaggeredVec5dImpl<vComplexF, FundamentalRepresentation > StaggeredVec5dImplF; // Float
|
||||||
|
typedef StaggeredVec5dImpl<vComplexD, FundamentalRepresentation > StaggeredVec5dImplD; // Double
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
403
lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
Normal file
403
lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
Normal file
@ -0,0 +1,403 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid.h>
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermionStatic::directions({0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3});
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermionStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
|
||||||
|
|
||||||
|
/////////////////////////////////
|
||||||
|
// Constructor and gauge import
|
||||||
|
/////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid,
|
||||||
|
RealD _mass,
|
||||||
|
const ImplParams &p)
|
||||||
|
: Kernels(p),
|
||||||
|
_grid(&Fgrid),
|
||||||
|
_cbgrid(&Hgrid),
|
||||||
|
Stencil(&Fgrid, npoint, Even, directions, displacements),
|
||||||
|
StencilEven(&Hgrid, npoint, Even, directions, displacements), // source is Even
|
||||||
|
StencilOdd(&Hgrid, npoint, Odd, directions, displacements), // source is Odd
|
||||||
|
mass(_mass),
|
||||||
|
Lebesgue(_grid),
|
||||||
|
LebesgueEvenOdd(_cbgrid),
|
||||||
|
Umu(&Fgrid),
|
||||||
|
UmuEven(&Hgrid),
|
||||||
|
UmuOdd(&Hgrid),
|
||||||
|
UUUmu(&Fgrid),
|
||||||
|
UUUmuEven(&Hgrid),
|
||||||
|
UUUmuOdd(&Hgrid) ,
|
||||||
|
_tmp(&Hgrid)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
RealD _c1, RealD _c2,RealD _u0,
|
||||||
|
const ImplParams &p)
|
||||||
|
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
|
||||||
|
{
|
||||||
|
c1=_c1;
|
||||||
|
c2=_c2;
|
||||||
|
u0=_u0;
|
||||||
|
ImportGauge(_Uthin,_Ufat);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
ImprovedStaggeredFermion<Impl>::ImprovedStaggeredFermion(GaugeField &_Uthin,GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p)
|
||||||
|
: ImprovedStaggeredFermion(Fgrid,Hgrid,_mass,p)
|
||||||
|
{
|
||||||
|
ImportGaugeSimple(_Utriple,_Ufat);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
// Momentum space propagator should be
|
||||||
|
// https://arxiv.org/pdf/hep-lat/9712010.pdf
|
||||||
|
//
|
||||||
|
// mom space action.
|
||||||
|
// gamma_mu i ( c1 sin pmu + c2 sin 3 pmu ) + m
|
||||||
|
//
|
||||||
|
// must track through staggered flavour/spin reduction in literature to
|
||||||
|
// turn to free propagator for the one component chi field, a la page 4/5
|
||||||
|
// of above link to implmement fourier based solver.
|
||||||
|
////////////////////////////////////////////////////////////
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin)
|
||||||
|
{
|
||||||
|
ImportGauge(_Uthin,_Uthin);
|
||||||
|
};
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGaugeSimple(const GaugeField &_Utriple,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
// Trivial import; phases and fattening and such like preapplied
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
GaugeLinkField U(GaugeGrid());
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(_Utriple, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U, mu );
|
||||||
|
|
||||||
|
U = adj( Cshift(U, mu, -3));
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, -U, mu+4 );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(_Ufat, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U, mu);
|
||||||
|
|
||||||
|
U = adj( Cshift(U, mu, -1));
|
||||||
|
PokeIndex<LorentzIndex>(Umu, -U, mu+4);
|
||||||
|
|
||||||
|
}
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven,UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
GaugeLinkField U(GaugeGrid());
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Double Store should take two fields for Naik and one hop separately.
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Apply scale factors to get the right fermion Kinetic term
|
||||||
|
// Could pass coeffs into the double store to save work.
|
||||||
|
// 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) )
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << " Umu " << Umu._odata[0]<<std::endl;
|
||||||
|
std::cout << " UUUmu " << UUUmu._odata[0]<<std::endl;
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven, UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////
|
||||||
|
// Implement the interface
|
||||||
|
/////////////////////////////
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion<Impl>::M(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerNo);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion<Impl>::Mdag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerYes);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerNo);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerYes);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerYes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
typename FermionField::scalar_type scal(mass);
|
||||||
|
out = scal * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Mooee(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
out = (1.0 / (mass)) * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::MooeeInvDag(const FermionField &in,
|
||||||
|
FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
MooeeInv(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////
|
||||||
|
// Internal
|
||||||
|
///////////////////////////////////
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DerivInternal(StencilImpl &st, DoubledGaugeField &U, DoubledGaugeField &UUU,
|
||||||
|
GaugeField & mat,
|
||||||
|
const FermionField &A, const FermionField &B, int dag) {
|
||||||
|
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
|
||||||
|
FermionField Btilde(B._grid);
|
||||||
|
FermionField Atilde(B._grid);
|
||||||
|
Atilde = A;
|
||||||
|
|
||||||
|
st.HaloExchange(B, compressor);
|
||||||
|
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
////////////////////////
|
||||||
|
// Call the single hop
|
||||||
|
////////////////////////
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < B._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopDir(st, U, UUU, st.CommBuf(), sss, sss, B, Btilde, mu,1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force in three link terms
|
||||||
|
//
|
||||||
|
// Impl::InsertForce4D(mat, Btilde, Atilde, mu);
|
||||||
|
//
|
||||||
|
// dU_ac(x)/dt = i p_ab U_bc(x)
|
||||||
|
//
|
||||||
|
// => dS_f/dt = dS_f/dU_ac(x) . dU_ac(x)/dt = i p_ab U_bc(x) dS_f/dU_ac(x)
|
||||||
|
//
|
||||||
|
// One link: form fragments S_f = A U B
|
||||||
|
//
|
||||||
|
// write Btilde = U(x) B(x+mu)
|
||||||
|
//
|
||||||
|
// mat+= TraceIndex<SpinIndex>(outerProduct(Btilde,A));
|
||||||
|
//
|
||||||
|
// Three link: form fragments S_f = A UUU B
|
||||||
|
//
|
||||||
|
// mat+= outer ( A, UUUB) <-- Best take DhopDeriv with one linke or identity matrix
|
||||||
|
// mat+= outer ( AU, UUB) <-- and then use covariant cshift?
|
||||||
|
// mat+= outer ( AUU, UB) <-- Returned from call to DhopDir
|
||||||
|
|
||||||
|
assert(0);// need to figure out the force interface with a blasted three link term.
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _grid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
mat.checkerboard = U.checkerboard;
|
||||||
|
|
||||||
|
DerivInternal(Stencil, Umu, UUUmu, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _cbgrid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
assert(V.checkerboard == Even);
|
||||||
|
assert(U.checkerboard == Odd);
|
||||||
|
mat.checkerboard = Odd;
|
||||||
|
|
||||||
|
DerivInternal(StencilEven, UmuOdd, UUUmuOdd, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag) {
|
||||||
|
|
||||||
|
conformable(U._grid, _cbgrid);
|
||||||
|
conformable(U._grid, V._grid);
|
||||||
|
conformable(U._grid, mat._grid);
|
||||||
|
|
||||||
|
assert(V.checkerboard == Odd);
|
||||||
|
assert(U.checkerboard == Even);
|
||||||
|
mat.checkerboard = Even;
|
||||||
|
|
||||||
|
DerivInternal(StencilOdd, UmuEven, UUUmuEven, mat, U, V, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Dhop(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _grid); // verifies full grid
|
||||||
|
conformable(in._grid, out._grid);
|
||||||
|
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
|
||||||
|
DhopInternal(Stencil, Lebesgue, Umu, UUUmu, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopOE(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard == Even);
|
||||||
|
out.checkerboard = Odd;
|
||||||
|
|
||||||
|
DhopInternal(StencilEven, LebesgueEvenOdd, UmuOdd, UUUmuOdd, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopEO(const FermionField &in, FermionField &out, int dag) {
|
||||||
|
conformable(in._grid, _cbgrid); // verifies half grid
|
||||||
|
conformable(in._grid, out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard == Odd);
|
||||||
|
out.checkerboard = Even;
|
||||||
|
|
||||||
|
DhopInternal(StencilOdd, LebesgueEvenOdd, UmuEven, UUUmuEven, in, out, dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
DhopDir(in, out, dir, disp);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopDir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
Stencil.HaloExchange(in, compressor);
|
||||||
|
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sss, sss, in, out, dir, disp);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion<Impl>::DhopInternal(StencilImpl &st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out, int dag) {
|
||||||
|
assert((dag == DaggerNo) || (dag == DaggerYes));
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
st.HaloExchange(in, compressor);
|
||||||
|
|
||||||
|
if (dag == DaggerYes) {
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
PARALLEL_FOR_LOOP
|
||||||
|
for (int sss = 0; sss < in._grid->oSites(); sss++) {
|
||||||
|
Kernels::DhopSite(st, lo, U, UUU, st.CommBuf(), 1, sss, in, out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
|
||||||
|
//AdjointFermOpTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
//TwoIndexFermOpTemplateInstantiate(ImprovedStaggeredFermion);
|
||||||
|
|
||||||
|
}}
|
167
lib/qcd/action/fermion/ImprovedStaggeredFermion.h
Normal file
167
lib/qcd/action/fermion/ImprovedStaggeredFermion.h
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggered.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi, Peter Boyle
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_IMPR_STAG_FERMION_H
|
||||||
|
#define GRID_QCD_IMPR_STAG_FERMION_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
class ImprovedStaggeredFermionStatic {
|
||||||
|
public:
|
||||||
|
static const std::vector<int> directions;
|
||||||
|
static const std::vector<int> displacements;
|
||||||
|
static const int npoint = 16;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
class ImprovedStaggeredFermion : public StaggeredKernels<Impl>, public ImprovedStaggeredFermionStatic {
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
typedef StaggeredKernels<Impl> Kernels;
|
||||||
|
|
||||||
|
FermionField _tmp;
|
||||||
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Implement the abstract base
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
GridBase *GaugeGrid(void) { return _grid; }
|
||||||
|
GridBase *GaugeRedBlackGrid(void) { return _cbgrid; }
|
||||||
|
GridBase *FermionGrid(void) { return _grid; }
|
||||||
|
GridBase *FermionRedBlackGrid(void) { return _cbgrid; }
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
// override multiply; cut number routines if pass dagger argument
|
||||||
|
// and also make interface more uniformly consistent
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
RealD M(const FermionField &in, FermionField &out);
|
||||||
|
RealD Mdag(const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////
|
||||||
|
// half checkerboard operations
|
||||||
|
/////////////////////////////////////////////////////////
|
||||||
|
void Meooe(const FermionField &in, FermionField &out);
|
||||||
|
void MeooeDag(const FermionField &in, FermionField &out);
|
||||||
|
void Mooee(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeDag(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInv(const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInvDag(const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
////////////////////////
|
||||||
|
// Derivative interface
|
||||||
|
////////////////////////
|
||||||
|
// Interface calls an internal routine
|
||||||
|
void DhopDeriv (GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
void DhopDerivOE(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
void DhopDerivEO(GaugeField &mat, const FermionField &U, const FermionField &V, int dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// non-hermitian hopping term; half cb or both
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void Dhop (const FermionField &in, FermionField &out, int dag);
|
||||||
|
void DhopOE(const FermionField &in, FermionField &out, int dag);
|
||||||
|
void DhopEO(const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Multigrid assistance; force term uses too
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void Mdir(const FermionField &in, FermionField &out, int dir, int disp);
|
||||||
|
void DhopDir(const FermionField &in, FermionField &out, int dir, int disp);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Extra methods added by derived
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void DerivInternal(StencilImpl &st,
|
||||||
|
DoubledGaugeField &U,DoubledGaugeField &UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A, const FermionField &B, int dag);
|
||||||
|
|
||||||
|
void DhopInternal(StencilImpl &st, LebesgueOrder &lo, DoubledGaugeField &U,DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in, FermionField &out, int dag);
|
||||||
|
|
||||||
|
// Constructor
|
||||||
|
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
ImprovedStaggeredFermion(GaugeField &_Uthin, GaugeField &_Utriple, GaugeField &_Ufat, GridCartesian &Fgrid,
|
||||||
|
GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
ImprovedStaggeredFermion(GridCartesian &Fgrid, GridRedBlackCartesian &Hgrid, RealD _mass,
|
||||||
|
const ImplParams &p = ImplParams());
|
||||||
|
|
||||||
|
|
||||||
|
// DoubleStore impl dependent
|
||||||
|
void ImportGaugeSimple(const GaugeField &_Utriple, const GaugeField &_Ufat);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin, const GaugeField &_Ufat);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Data members require to support the functionality
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// protected:
|
||||||
|
public:
|
||||||
|
// any other parameters of action ???
|
||||||
|
|
||||||
|
RealD mass;
|
||||||
|
RealD u0;
|
||||||
|
RealD c1;
|
||||||
|
RealD c2;
|
||||||
|
|
||||||
|
GridBase *_grid;
|
||||||
|
GridBase *_cbgrid;
|
||||||
|
|
||||||
|
// Defines the stencils for even and odd
|
||||||
|
StencilImpl Stencil;
|
||||||
|
StencilImpl StencilEven;
|
||||||
|
StencilImpl StencilOdd;
|
||||||
|
|
||||||
|
// Copy of the gauge field , with even and odd subsets
|
||||||
|
DoubledGaugeField Umu;
|
||||||
|
DoubledGaugeField UmuEven;
|
||||||
|
DoubledGaugeField UmuOdd;
|
||||||
|
|
||||||
|
DoubledGaugeField UUUmu;
|
||||||
|
DoubledGaugeField UUUmuEven;
|
||||||
|
DoubledGaugeField UUUmuOdd;
|
||||||
|
|
||||||
|
LebesgueOrder Lebesgue;
|
||||||
|
LebesgueOrder LebesgueEvenOdd;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||||
|
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
355
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
Normal file
355
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
Normal file
@ -0,0 +1,355 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermion5DStatic::directions({1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4});
|
||||||
|
const std::vector<int>
|
||||||
|
ImprovedStaggeredFermion5DStatic::displacements({1, 1, 1, 1, -1, -1, -1, -1, 3, 3, 3, 3, -3, -3, -3, -3});
|
||||||
|
|
||||||
|
// 5d lattice for DWF.
|
||||||
|
template<class Impl>
|
||||||
|
ImprovedStaggeredFermion5D<Impl>::ImprovedStaggeredFermion5D(GaugeField &_Uthin,GaugeField &_Ufat,
|
||||||
|
GridCartesian &FiveDimGrid,
|
||||||
|
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||||
|
GridCartesian &FourDimGrid,
|
||||||
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
|
RealD _mass,
|
||||||
|
RealD _c1,RealD _c2, RealD _u0,
|
||||||
|
const ImplParams &p) :
|
||||||
|
Kernels(p),
|
||||||
|
_FiveDimGrid (&FiveDimGrid),
|
||||||
|
_FiveDimRedBlackGrid(&FiveDimRedBlackGrid),
|
||||||
|
_FourDimGrid (&FourDimGrid),
|
||||||
|
_FourDimRedBlackGrid(&FourDimRedBlackGrid),
|
||||||
|
Stencil (&FiveDimGrid,npoint,Even,directions,displacements),
|
||||||
|
StencilEven(&FiveDimRedBlackGrid,npoint,Even,directions,displacements), // source is Even
|
||||||
|
StencilOdd (&FiveDimRedBlackGrid,npoint,Odd ,directions,displacements), // source is Odd
|
||||||
|
mass(_mass),
|
||||||
|
c1(_c1),
|
||||||
|
c2(_c2),
|
||||||
|
u0(_u0),
|
||||||
|
Umu(&FourDimGrid),
|
||||||
|
UmuEven(&FourDimRedBlackGrid),
|
||||||
|
UmuOdd (&FourDimRedBlackGrid),
|
||||||
|
UUUmu(&FourDimGrid),
|
||||||
|
UUUmuEven(&FourDimRedBlackGrid),
|
||||||
|
UUUmuOdd(&FourDimRedBlackGrid),
|
||||||
|
Lebesgue(&FourDimGrid),
|
||||||
|
LebesgueEvenOdd(&FourDimRedBlackGrid),
|
||||||
|
_tmp(&FiveDimRedBlackGrid)
|
||||||
|
{
|
||||||
|
|
||||||
|
// some assertions
|
||||||
|
assert(FiveDimGrid._ndimension==5);
|
||||||
|
assert(FourDimGrid._ndimension==4);
|
||||||
|
assert(FourDimRedBlackGrid._ndimension==4);
|
||||||
|
assert(FiveDimRedBlackGrid._ndimension==5);
|
||||||
|
assert(FiveDimRedBlackGrid._checker_dim==1); // Don't checker the s direction
|
||||||
|
|
||||||
|
// extent of fifth dim and not spread out
|
||||||
|
Ls=FiveDimGrid._fdimensions[0];
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[0]==Ls);
|
||||||
|
assert(FiveDimGrid._processors[0] ==1);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[0] ==1);
|
||||||
|
|
||||||
|
// Other dimensions must match the decomposition of the four-D fields
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
assert(FiveDimGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._processors[d+1] ==FourDimGrid._processors[d]);
|
||||||
|
assert(FourDimRedBlackGrid._processors[d] ==FourDimGrid._processors[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._fdimensions[d+1] ==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._fdimensions[d+1]==FourDimGrid._fdimensions[d]);
|
||||||
|
assert(FourDimRedBlackGrid._fdimensions[d] ==FourDimGrid._fdimensions[d]);
|
||||||
|
|
||||||
|
assert(FiveDimGrid._simd_layout[d+1] ==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==FourDimGrid._simd_layout[d]);
|
||||||
|
assert(FourDimRedBlackGrid._simd_layout[d] ==FourDimGrid._simd_layout[d]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (Impl::LsVectorised) {
|
||||||
|
|
||||||
|
int nsimd = Simd::Nsimd();
|
||||||
|
|
||||||
|
// Dimension zero of the five-d is the Ls direction
|
||||||
|
assert(FiveDimGrid._simd_layout[0] ==nsimd);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[0]==nsimd);
|
||||||
|
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
assert(FourDimGrid._simd_layout[d]=1);
|
||||||
|
assert(FourDimRedBlackGrid._simd_layout[d]=1);
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[d+1]==1);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// Dimension zero of the five-d is the Ls direction
|
||||||
|
assert(FiveDimRedBlackGrid._simd_layout[0]==1);
|
||||||
|
assert(FiveDimGrid._simd_layout[0] ==1);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate the required comms buffer
|
||||||
|
ImportGauge(_Uthin,_Ufat);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin)
|
||||||
|
{
|
||||||
|
ImportGauge(_Uthin,_Uthin);
|
||||||
|
};
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat)
|
||||||
|
{
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Double Store should take two fields for Naik and one hop separately.
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
Impl::DoubleStore(GaugeGrid(), UUUmu, Umu, _Uthin, _Ufat );
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
// Apply scale factors to get the right fermion Kinetic term
|
||||||
|
// Could pass coeffs into the double store to save work.
|
||||||
|
// 0.5 ( U p(x+mu) - Udag(x-mu) p(x-mu) )
|
||||||
|
////////////////////////////////////////////////////////
|
||||||
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
|
|
||||||
|
auto U = PeekIndex<LorentzIndex>(Umu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*( 0.5*c1/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(Umu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(Umu, U*(-0.5*c1/u0), mu+4);
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*( 0.5*c2/u0/u0/u0), mu );
|
||||||
|
|
||||||
|
U = PeekIndex<LorentzIndex>(UUUmu, mu+4);
|
||||||
|
PokeIndex<LorentzIndex>(UUUmu, U*(-0.5*c2/u0/u0/u0), mu+4);
|
||||||
|
}
|
||||||
|
|
||||||
|
pickCheckerboard(Even, UmuEven, Umu);
|
||||||
|
pickCheckerboard(Odd, UmuOdd , Umu);
|
||||||
|
pickCheckerboard(Even, UUUmuEven, UUUmu);
|
||||||
|
pickCheckerboard(Odd, UUUmuOdd, UUUmu);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDir(const FermionField &in, FermionField &out,int dir5,int disp)
|
||||||
|
{
|
||||||
|
int dir = dir5-1; // Maps to the ordering above in "directions" that is passed to stencil
|
||||||
|
// we drop off the innermost fifth dimension
|
||||||
|
|
||||||
|
Compressor compressor;
|
||||||
|
Stencil.HaloExchange(in,compressor);
|
||||||
|
|
||||||
|
parallel_for(int ss=0;ss<Umu._grid->oSites();ss++){
|
||||||
|
for(int s=0;s<Ls;s++){
|
||||||
|
int sU=ss;
|
||||||
|
int sF = s+Ls*sU;
|
||||||
|
Kernels::DhopDir(Stencil, Umu, UUUmu, Stencil.CommBuf(), sF, sU, in, out, dir, disp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DerivInternal(StencilImpl & st,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
DoubledGaugeField & UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
// No force terms in multi-rhs solver staggered
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDeriv(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDerivEO(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopDerivOE(GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag)
|
||||||
|
{
|
||||||
|
assert(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField & U,DoubledGaugeField & UUU,
|
||||||
|
const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
Compressor compressor;
|
||||||
|
int LLs = in._grid->_rdimensions[0];
|
||||||
|
st.HaloExchange(in,compressor);
|
||||||
|
|
||||||
|
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
||||||
|
if (dag == DaggerYes) {
|
||||||
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
|
int sU=ss;
|
||||||
|
Kernels::DhopSiteDag(st, lo, U, UUU, st.CommBuf(), LLs, sU,in, out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||||
|
int sU=ss;
|
||||||
|
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard==Even);
|
||||||
|
out.checkerboard = Odd;
|
||||||
|
|
||||||
|
DhopInternal(StencilEven,LebesgueEvenOdd,UmuOdd,UUUmuOdd,in,out,dag);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||||
|
conformable(in._grid,out._grid); // drops the cb check
|
||||||
|
|
||||||
|
assert(in.checkerboard==Odd);
|
||||||
|
out.checkerboard = Even;
|
||||||
|
|
||||||
|
DhopInternal(StencilOdd,LebesgueEvenOdd,UmuEven,UUUmuEven,in,out,dag);
|
||||||
|
}
|
||||||
|
template<class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||||
|
{
|
||||||
|
conformable(in._grid,FermionGrid()); // verifies full grid
|
||||||
|
conformable(in._grid,out._grid);
|
||||||
|
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
|
||||||
|
DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
// Implement the general interface. Here we use SAME mass on all slices
|
||||||
|
/////////////////////////////////////////////////////////////////////////
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Mdir(const FermionField &in, FermionField &out, int dir, int disp) {
|
||||||
|
DhopDir(in, out, dir, disp);
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion5D<Impl>::M(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerNo);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
RealD ImprovedStaggeredFermion5D<Impl>::Mdag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Dhop(in, out, DaggerYes);
|
||||||
|
return axpy_norm(out, mass, in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Meooe(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerNo);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerNo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MeooeDag(const FermionField &in, FermionField &out) {
|
||||||
|
if (in.checkerboard == Odd) {
|
||||||
|
DhopEO(in, out, DaggerYes);
|
||||||
|
} else {
|
||||||
|
DhopOE(in, out, DaggerYes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::Mooee(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
typename FermionField::scalar_type scal(mass);
|
||||||
|
out = scal * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeDag(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
Mooee(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeInv(const FermionField &in, FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
out = (1.0 / (mass)) * in;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class Impl>
|
||||||
|
void ImprovedStaggeredFermion5D<Impl>::MooeeInvDag(const FermionField &in,
|
||||||
|
FermionField &out) {
|
||||||
|
out.checkerboard = in.checkerboard;
|
||||||
|
MooeeInv(in, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FermOpStaggeredTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||||
|
FermOpStaggeredVec5dTemplateInstantiate(ImprovedStaggeredFermion5D);
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
||||||
|
|
||||||
|
|
167
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
Normal file
167
lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
|
||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/ImprovedStaggeredFermion5D.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: AzusaYamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#ifndef GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
|
||||||
|
#define GRID_QCD_IMPROVED_STAGGERED_FERMION_5D_H
|
||||||
|
|
||||||
|
namespace Grid {
|
||||||
|
namespace QCD {
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// This is the 4d red black case appropriate to support
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
class ImprovedStaggeredFermion5DStatic {
|
||||||
|
public:
|
||||||
|
// S-direction is INNERMOST and takes no part in the parity.
|
||||||
|
static const std::vector<int> directions;
|
||||||
|
static const std::vector<int> displacements;
|
||||||
|
const int npoint = 16;
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Impl>
|
||||||
|
class ImprovedStaggeredFermion5D : public StaggeredKernels<Impl>, public ImprovedStaggeredFermion5DStatic
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
typedef StaggeredKernels<Impl> Kernels;
|
||||||
|
|
||||||
|
FermionField _tmp;
|
||||||
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Implement the abstract base
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
GridBase *GaugeGrid(void) { return _FourDimGrid ;}
|
||||||
|
GridBase *GaugeRedBlackGrid(void) { return _FourDimRedBlackGrid ;}
|
||||||
|
GridBase *FermionGrid(void) { return _FiveDimGrid;}
|
||||||
|
GridBase *FermionRedBlackGrid(void) { return _FiveDimRedBlackGrid;}
|
||||||
|
|
||||||
|
// full checkerboard operations; leave unimplemented as abstract for now
|
||||||
|
RealD M (const FermionField &in, FermionField &out);
|
||||||
|
RealD Mdag (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
// half checkerboard operations
|
||||||
|
void Meooe (const FermionField &in, FermionField &out);
|
||||||
|
void Mooee (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInv (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void MeooeDag (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeDag (const FermionField &in, FermionField &out);
|
||||||
|
void MooeeInvDag (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
|
void Mdir (const FermionField &in, FermionField &out,int dir,int disp);
|
||||||
|
void DhopDir(const FermionField &in, FermionField &out,int dir,int disp);
|
||||||
|
|
||||||
|
// These can be overridden by fancy 5d chiral action
|
||||||
|
void DhopDeriv (GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
void DhopDerivEO(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
void DhopDerivOE(GaugeField &mat,const FermionField &U,const FermionField &V,int dag);
|
||||||
|
|
||||||
|
// Implement hopping term non-hermitian hopping term; half cb or both
|
||||||
|
void Dhop (const FermionField &in, FermionField &out,int dag);
|
||||||
|
void DhopOE(const FermionField &in, FermionField &out,int dag);
|
||||||
|
void DhopEO(const FermionField &in, FermionField &out,int dag);
|
||||||
|
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// New methods added
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
void DerivInternal(StencilImpl & st,
|
||||||
|
DoubledGaugeField & U,
|
||||||
|
DoubledGaugeField & UUU,
|
||||||
|
GaugeField &mat,
|
||||||
|
const FermionField &A,
|
||||||
|
const FermionField &B,
|
||||||
|
int dag);
|
||||||
|
|
||||||
|
void DhopInternal(StencilImpl & st,
|
||||||
|
LebesgueOrder &lo,
|
||||||
|
DoubledGaugeField &U,
|
||||||
|
DoubledGaugeField &UUU,
|
||||||
|
const FermionField &in,
|
||||||
|
FermionField &out,
|
||||||
|
int dag);
|
||||||
|
|
||||||
|
// Constructors
|
||||||
|
ImprovedStaggeredFermion5D(GaugeField &_Uthin,
|
||||||
|
GaugeField &_Ufat,
|
||||||
|
GridCartesian &FiveDimGrid,
|
||||||
|
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||||
|
GridCartesian &FourDimGrid,
|
||||||
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
|
double _mass,
|
||||||
|
RealD _c1=9.0/8.0, RealD _c2=-1.0/24.0,RealD _u0=1.0,
|
||||||
|
const ImplParams &p= ImplParams());
|
||||||
|
|
||||||
|
// DoubleStore
|
||||||
|
void ImportGauge(const GaugeField &_U);
|
||||||
|
void ImportGauge(const GaugeField &_Uthin,const GaugeField &_Ufat);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Data members require to support the functionality
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
public:
|
||||||
|
|
||||||
|
GridBase *_FourDimGrid;
|
||||||
|
GridBase *_FourDimRedBlackGrid;
|
||||||
|
GridBase *_FiveDimGrid;
|
||||||
|
GridBase *_FiveDimRedBlackGrid;
|
||||||
|
|
||||||
|
RealD mass;
|
||||||
|
RealD c1;
|
||||||
|
RealD c2;
|
||||||
|
RealD u0;
|
||||||
|
int Ls;
|
||||||
|
|
||||||
|
//Defines the stencils for even and odd
|
||||||
|
StencilImpl Stencil;
|
||||||
|
StencilImpl StencilEven;
|
||||||
|
StencilImpl StencilOdd;
|
||||||
|
|
||||||
|
// Copy of the gauge field , with even and odd subsets
|
||||||
|
DoubledGaugeField Umu;
|
||||||
|
DoubledGaugeField UmuEven;
|
||||||
|
DoubledGaugeField UmuOdd;
|
||||||
|
|
||||||
|
DoubledGaugeField UUUmu;
|
||||||
|
DoubledGaugeField UUUmuEven;
|
||||||
|
DoubledGaugeField UUUmuOdd;
|
||||||
|
|
||||||
|
LebesgueOrder Lebesgue;
|
||||||
|
LebesgueOrder LebesgueEvenOdd;
|
||||||
|
|
||||||
|
// Comms buffer
|
||||||
|
std::vector<SiteHalfSpinor,alignedAllocator<SiteHalfSpinor> > comm_buf;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}}
|
||||||
|
|
||||||
|
#endif
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_MOBIUS_FERMION_H
|
#ifndef GRID_QCD_MOBIUS_FERMION_H
|
||||||
#define GRID_QCD_MOBIUS_FERMION_H
|
#define GRID_QCD_MOBIUS_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
#ifndef GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
||||||
#define GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
#define GRID_QCD_MOBIUS_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
#define OVERLAP_WILSON_CAYLEY_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_CAYLEY_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
#define OVERLAP_WILSON_CONTFRAC_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_CONTFRAC_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
#ifndef OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
||||||
#define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
#define OVERLAP_WILSON_PARTFRAC_TANH_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
#ifndef OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
||||||
#define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
#define OVERLAP_WILSON_PARTFRAC_ZOLOTAREV_FERMION_H
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
|
|
||||||
|
@ -26,7 +26,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid.h>
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/PartialFractionFermion5D.h>
|
||||||
|
|
||||||
namespace Grid {
|
namespace Grid {
|
||||||
namespace QCD {
|
namespace QCD {
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user