1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-19 08:17:05 +01:00

Merge branch 'develop' into feature/scidac-wp1

This commit is contained in:
Peter Boyle
2024-03-06 14:55:21 -05:00
103 changed files with 5678 additions and 589 deletions

View File

@ -30,27 +30,16 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
using namespace std;
using namespace Grid;
template<class d>
struct scal {
d internal;
};
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
};
int main (int argc, char ** argv)
{
char hostname[HOST_NAME_MAX+1];
gethostname(hostname, HOST_NAME_MAX+1);
std::string host(hostname);
Grid_init(&argc,&argv);
const int Ls=12;
std::cout << GridLogMessage << "::::: NB: to enable a quick bit reproducibility check use the --checksums flag. " << std::endl;
{
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
@ -92,7 +81,14 @@ int main (int argc, char ** argv)
SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermionD> HermOpEO(Ddwf);
SchurDiagMooeeOperator<DomainWallFermionF,LatticeFermionF> HermOpEO_f(Ddwf_f);
std::cout << GridLogMessage << "::::::::::::: Starting mixed CG" << std::endl;
int nsecs=600;
if( GridCmdOptionExists(argv,argv+argc,"--seconds") ){
std::string arg = GridCmdOptionPayload(argv,argv+argc,"--seconds");
GridCmdOptionInt(arg,nsecs);
}
std::cout << GridLogMessage << "::::::::::::: Starting mixed CG for "<<nsecs <<" seconds" << std::endl;
MixedPrecisionConjugateGradient<LatticeFermionD,LatticeFermionF> mCG(1.0e-8, 10000, 50, FrbGrid_f, HermOpEO_f, HermOpEO);
double t1,t2,flops;
double MdagMsiteflops = 1452; // Mobius (real coeffs)
@ -101,7 +97,14 @@ int main (int argc, char ** argv)
std:: cout << " MdagM site flops = "<< 4*MdagMsiteflops<<std::endl;
std:: cout << " CG site flops = "<< CGsiteflops <<std::endl;
int iters;
for(int i=0;i<10;i++){
time_t start = time(NULL);
uint32_t csum, csumref;
csumref=0;
int iter=0;
do {
std::cerr << "******************* SINGLE PRECISION SOLVE "<<iter<<std::endl;
result_o = Zero();
t1=usecond();
mCG(src_o,result_o);
@ -111,10 +114,28 @@ int main (int argc, char ** argv)
flops+= CGsiteflops*FrbGrid->gSites()*iters;
std::cout << " SinglePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl;
std::cout << " SinglePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
}
std::cout << GridLogMessage << "::::::::::::: Starting regular CG" << std::endl;
csum = crc(result_o);
if ( csumref == 0 ) {
csumref = csum;
} else {
if ( csum != csumref ) {
std::cerr << host<<" FAILURE " <<iter <<" csum "<<std::hex<<csum<< " != "<<csumref <<std::dec<<std::endl;
assert(0);
} else {
std::cout << host <<" OK " <<iter <<" csum "<<std::hex<<csum<<std::dec<<" -- OK! "<<std::endl;
}
}
iter ++;
} while (time(NULL) < (start + nsecs/2) );
std::cout << GridLogMessage << "::::::::::::: Starting double precision CG" << std::endl;
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
for(int i=0;i<1;i++){
csumref=0;
int i=0;
do {
std::cerr << "******************* DOUBLE PRECISION SOLVE "<<i<<std::endl;
result_o_2 = Zero();
t1=usecond();
CG(HermOpEO,src_o,result_o_2);
@ -122,46 +143,30 @@ int main (int argc, char ** argv)
iters = CG.IterationsToComplete;
flops = MdagMsiteflops*4*FrbGrid->gSites()*iters;
flops+= CGsiteflops*FrbGrid->gSites()*iters;
std::cout << " DoublePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl;
std::cout << " DoublePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
}
// MemoryManager::Print();
csum = crc(result_o);
if ( csumref == 0 ) {
csumref = csum;
} else {
if ( csum != csumref ) {
std::cerr << i <<" csum "<<std::hex<<csum<< " != "<<csumref <<std::dec<<std::endl;
assert(0);
} else {
std::cout << i <<" csum "<<std::hex<<csum<<std::dec<<" -- OK! "<<std::endl;
}
}
i++;
} while (time(NULL) < (start + nsecs) );
LatticeFermionD diff_o(FrbGrid);
RealD diff = axpy_norm(diff_o, -1.0, result_o, result_o_2);
std::cout << GridLogMessage << "::::::::::::: Diff between mixed and regular CG: " << diff << std::endl;
#ifdef HAVE_LIME
if( GridCmdOptionExists(argv,argv+argc,"--checksums") ){
assert(diff < 1e-4);
std::string file1("./Propagator1");
emptyUserRecord record;
uint32_t nersc_csum;
uint32_t scidac_csuma;
uint32_t scidac_csumb;
typedef SpinColourVectorD FermionD;
typedef vSpinColourVectorD vFermionD;
BinarySimpleMunger<FermionD,FermionD> munge;
std::string format = getFormatString<vFermionD>();
BinaryIO::writeLatticeObject<vFermionD,FermionD>(result_o,file1,munge, 0, format,
nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << " Mixed checksums "<<std::hex << scidac_csuma << " "<<scidac_csumb<<std::endl;
BinaryIO::writeLatticeObject<vFermionD,FermionD>(result_o_2,file1,munge, 0, format,
nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << " CG checksums "<<std::hex << scidac_csuma << " "<<scidac_csumb<<std::endl;
}
#endif
}
MemoryManager::Print();
Grid_finalize();
}

321
tests/core/Test_sliceSum.cc Normal file
View File

@ -0,0 +1,321 @@
#include <Grid/Grid.h>
template<class vobj> inline void sliceSumCPU(const Grid::Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
{
using namespace Grid;
///////////////////////////////////////////////////////
// FIXME precision promoted summation
// may be important for correlation functions
// But easily avoided by using double precision fields
///////////////////////////////////////////////////////
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_object::scalar_type scalar_type;
GridBase *grid = Data.Grid();
assert(grid!=NULL);
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
assert(orthogdim >= 0);
assert(orthogdim < Nd);
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
Vector<vobj> lvSum(rd); // will locally sum vectors first
Vector<sobj> lsSum(ld,Zero()); // sum across these down to scalars
ExtractBuffer<sobj> extracted(Nsimd); // splitting the SIMD
result.resize(fd); // And then global sum to return the same vector to every node
for(int r=0;r<rd;r++){
lvSum[r]=Zero();
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
int ostride=grid->_ostride[orthogdim];
//Reduce Data down to lvSum
sliceSumReduction_cpu(Data,lvSum,rd, e1,e2,stride,ostride,Nsimd);
// Sum across simd lanes in the plane, breaking out orthog dir.
Coordinate icoor(Nd);
for(int rt=0;rt<rd;rt++){
extract(lvSum[rt],extracted);
for(int idx=0;idx<Nsimd;idx++){
grid->iCoorFromIindex(icoor,idx);
int ldx =rt+icoor[orthogdim]*rd;
lsSum[ldx]=lsSum[ldx]+extracted[idx];
}
}
// sum over nodes.
for(int t=0;t<fd;t++){
int pt = t/ld; // processor plane
int lt = t%ld;
if ( pt == grid->_processor_coor[orthogdim] ) {
result[t]=lsSum[lt];
} else {
result[t]=Zero();
}
}
scalar_type * ptr = (scalar_type *) &result[0];
int words = fd*sizeof(sobj)/sizeof(scalar_type);
grid->GlobalSumVector(ptr, words);
}
int main (int argc, char ** argv) {
using namespace Grid;
Grid_init(&argc,&argv);
Coordinate latt_size({64,64,64,16});
auto simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
std::vector<int> seeds({1, 2, 3, 4});
GridParallelRNG pRNG(&Grid);
pRNG.SeedFixedIntegers(seeds);
LatticeComplexD test_data(&Grid);
gaussian(pRNG,test_data);
std::vector<TComplexD> reduction_reference;
std::vector<TComplexD> reduction_result;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result = sliceSum(test_data,0);
}
int trace_id = traceStart("sliceSum benchmark - ComplexD");
std::cout << GridLogMessage << "Testing ComplexD" << std::endl;
std::cout << GridLogMessage << "sizeof(ComplexD) = " << sizeof(ComplexD) << std::endl;
std::cout << GridLogMessage << "sizeof(vComplexD) = " << sizeof(vComplexD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data,reduction_reference,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result = sliceSum(test_data,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference.size();t++) {
auto diff = reduction_reference[t]-reduction_result[t];
assert(abs(TensorRemove(diff)) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinVectorD test_data_cv(&Grid);
gaussian(pRNG,test_data_cv);
std::vector<SpinVectorD> reduction_reference_cv;
std::vector<SpinVectorD> reduction_result_cv;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_cv = sliceSum(test_data_cv,0);
}
trace_id = traceStart("sliceSum benchmark - SpinVectorD");
std::cout << GridLogMessage << "Testing SpinVectorD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinVectorD) = " << sizeof(SpinVectorD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinVectorD) = " << sizeof(vSpinVectorD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_cv,reduction_reference_cv,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_cv = sliceSum(test_data_cv,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_cv.size();t++) {
auto diff = reduction_reference_cv[t]-reduction_result_cv[t];
assert(abs(diff()(0)()) < 1e-8 );
assert(abs(diff()(1)()) < 1e-8 );
assert(abs(diff()(2)()) < 1e-8 );
assert(abs(diff()(3)()) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinColourVectorD test_data_scv(&Grid);
gaussian(pRNG,test_data_scv);
std::vector<SpinColourVectorD> reduction_reference_scv;
std::vector<SpinColourVectorD> reduction_result_scv;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_scv = sliceSum(test_data_scv,0);
}
trace_id = traceStart("sliceSum benchmark - SpinColourVectorD");
std::cout << GridLogMessage << "Testing SpinColourVectorD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinColourVectorD) = " << sizeof(SpinColourVectorD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinColourVectorD) = " << sizeof(vSpinColourVectorD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_scv,reduction_reference_scv,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_scv = sliceSum(test_data_scv,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_scv.size();t++) {
auto diff = reduction_reference_scv[t]-reduction_result_scv[t];
// std::cout << diff <<std::endl;
assert(abs(diff()(0)(0)) < 1e-8 );
assert(abs(diff()(0)(1)) < 1e-8 );
assert(abs(diff()(0)(2)) < 1e-8 );
assert(abs(diff()(1)(0)) < 1e-8 );
assert(abs(diff()(1)(1)) < 1e-8 );
assert(abs(diff()(1)(2)) < 1e-8 );
assert(abs(diff()(2)(0)) < 1e-8 );
assert(abs(diff()(2)(1)) < 1e-8 );
assert(abs(diff()(2)(2)) < 1e-8 );
assert(abs(diff()(3)(0)) < 1e-8 );
assert(abs(diff()(3)(1)) < 1e-8 );
assert(abs(diff()(3)(2)) < 1e-8 );
}
}
traceStop(trace_id);
LatticeSpinColourMatrixD test_data_scm(&Grid);
gaussian(pRNG,test_data_scm);
std::vector<SpinColourMatrixD> reduction_reference_scm;
std::vector<SpinColourMatrixD> reduction_result_scm;
//warmup
for (int sweeps = 0; sweeps < 5; sweeps++) {
reduction_result_scm = sliceSum(test_data_scm,0);
}
trace_id = traceStart("sliceSum benchmark - SpinColourMatrixD");
std::cout << GridLogMessage << "Testing SpinColourMatrixD" << std::endl;
std::cout << GridLogMessage << "sizeof(SpinColourMatrixD) = " << sizeof(SpinColourMatrixD) << std::endl;
std::cout << GridLogMessage << "sizeof(vSpinColourMatrixD) = " << sizeof(vSpinColourMatrixD) << std::endl;
for (int i = 0; i < Nd; i++) {
RealD t=-usecond();
tracePush("sliceSum");
sliceSumCPU(test_data_scm,reduction_reference_scm,i);
tracePop("sliceSum");
t+=usecond();
std::cout << GridLogMessage << "Orthog. dir. = " << i << std::endl;
std::cout << GridLogMessage << "CPU sliceSum took "<<t<<" usecs"<<std::endl;
RealD tgpu=-usecond();
tracePush("sliceSumGpu");
reduction_result_scm = sliceSum(test_data_scm,i);
tracePop("sliceSumGpu");
tgpu+=usecond();
std::cout << GridLogMessage <<"GPU sliceSum took "<<tgpu<<" usecs"<<std::endl<<std::endl;;
for(int t=0;t<reduction_reference_scm.size();t++) {
auto diff = reduction_reference_scm[t]-reduction_result_scm[t];
// std::cout << diff <<std::endl;
for (int is = 0; is < Ns; is++) {
for (int js = 0; js < Ns; js++) {
for (int ic = 0; ic < Nc; ic++) {
for (int jc = 0; jc < Nc; jc++) {
assert(abs(diff()(is,js)(ic,jc)) < 1e-8);
}
}
}
}
}
}
traceStop(trace_id);
Grid_finalize();
return 0;
}

View File

@ -32,6 +32,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
using namespace std;
using namespace Grid;
// This is to optimize the SIMD
template<class vobj> void gpermute(vobj & inout,int perm){
vobj tmp=inout;
if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;}
@ -39,7 +40,8 @@ template<class vobj> void gpermute(vobj & inout,int perm){
if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;}
if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
@ -47,20 +49,21 @@ int main (int argc, char ** argv)
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout= GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
std::cout << " mpi "<<mpi_layout<<std::endl;
std::cout << " simd "<<simd_layout<<std::endl;
std::cout << " latt "<<latt_size<<std::endl;
std::cout << GridLogMessage << " mpi "<<mpi_layout<<std::endl;
std::cout << GridLogMessage << " simd "<<simd_layout<<std::endl;
std::cout << GridLogMessage << " latt "<<latt_size<<std::endl;
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
// Initialize configuration as hot start.
GridParallelRNG pRNG(&GRID);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField Umu(&GRID);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
SU<Nc>::HotConfiguration(pRNG,Umu);
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
LatticeComplex trplaq(&GRID);
// Store Umu in U. Peek/Poke mean respectively getElement/setElement.
std::vector<LatticeColourMatrix> U(Nd, Umu.Grid());
for (int mu = 0; mu < Nd; mu++) {
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
@ -70,9 +73,7 @@ int main (int argc, char ** argv)
LatticeComplex cplaq(&GRID); cplaq=Zero();
/////////////////////////////////////////////////
// Create a padded cell of extra padding depth=1
/////////////////////////////////////////////////
int depth = 1;
PaddedCell Ghost(depth,&GRID);
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
@ -114,18 +115,25 @@ int main (int argc, char ** argv)
}
#endif
///// Array for the site plaquette
// Array for the site plaquette
GridBase *GhostGrid = Ughost.Grid();
LatticeComplex gplaq(GhostGrid);
// Now we're going to put together the "stencil" that will be useful to us when
// calculating the plaquette. Our eventual goal is to make the product
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x),
// which requires, in order, the sites x, x+mu, x+nu, and x. We arrive at these
// sites relative to x through "shifts", which is represented here by a 4-d
// vector of 0s (no movement) and 1s (shift one unit) at each site. The
// "stencil" is the set of all these shifts.
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
// push_back creates an element at the end of shifts and
// assigns the data in the argument to it.
shifts.push_back(shift_0);
shifts.push_back(shift_mu);
shifts.push_back(shift_nu);
@ -135,41 +143,51 @@ int main (int argc, char ** argv)
GeneralLocalStencil gStencil(GhostGrid,shifts);
gplaq=Zero();
{
autoView( gp_v , gplaq, CpuWrite);
autoView( t_v , trplaq, CpuRead);
autoView( U_v , Ughost, CpuRead);
for(int ss=0;ss<gp_v.size();ss++){
int s=0;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
auto SE0 = gStencil.GetEntry(s+0,ss);
auto SE1 = gStencil.GetEntry(s+1,ss);
auto SE2 = gStencil.GetEntry(s+2,ss);
auto SE3 = gStencil.GetEntry(s+3,ss);
int o0 = SE0->_offset;
int o1 = SE1->_offset;
int o2 = SE2->_offset;
int o3 = SE3->_offset;
auto U0 = U_v[o0](mu);
auto U1 = U_v[o1](nu);
auto U2 = adj(U_v[o2](mu));
auto U3 = adj(U_v[o3](nu));
// Before doing accelerator stuff, there is an opening and closing of "Views". I guess the
// "Views" are stored in *_v variables listed below.
autoView( gp_v , gplaq, CpuWrite);
autoView( t_v , trplaq, CpuRead);
autoView( U_v , Ughost, CpuRead);
gpermute(U0,SE0->_permute);
gpermute(U1,SE1->_permute);
gpermute(U2,SE2->_permute);
gpermute(U3,SE3->_permute);
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
s=s+4;
}
}
// This is now a loop over stencil shift elements. That is, s increases as we make our
// way through the spacetimes sites, but also as we make our way around the plaquette.
for(int ss=0;ss<gp_v.size();ss++){
int s=0;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
auto SE0 = gStencil.GetEntry(s+0,ss);
auto SE1 = gStencil.GetEntry(s+1,ss);
auto SE2 = gStencil.GetEntry(s+2,ss);
auto SE3 = gStencil.GetEntry(s+3,ss);
// Due to our strategy, each offset corresponds to a site.
int o0 = SE0->_offset;
int o1 = SE1->_offset;
int o2 = SE2->_offset;
int o3 = SE3->_offset;
auto U0 = U_v[o0](mu);
auto U1 = U_v[o1](nu);
auto U2 = adj(U_v[o2](mu));
auto U3 = adj(U_v[o3](nu));
gpermute(U0,SE0->_permute);
gpermute(U1,SE1->_permute);
gpermute(U2,SE2->_permute);
gpermute(U3,SE3->_permute);
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
s=s+4;
}
}
}
// Here is my understanding of this part: The padded cell has its own periodic BCs, so
// if I take a step to the right at the right-most side of the cell, I end up on the
// left-most side. This means that the plaquettes in the padding are wrong. Luckily
// all we care about are the plaquettes in the cell, which we obtain from Extract.
cplaq = Ghost.Extract(gplaq);
RealD vol = cplaq.Grid()->gSites();
RealD faces = (Nd * (Nd-1))/2;

View File

@ -0,0 +1,181 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/smearing/Test_fatLinks.cc
Copyright (C) 2023
Author: D. A. Clarke <clarke.davida@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/*
@file Test_fatLinks.cc
@brief test of the HISQ smearing
*/
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/qcd/smearing/HISQSmearing.h>
using namespace Grid;
/*! @brief parameter file to easily adjust Nloop */
struct ConfParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(
ConfParameters,
int, benchmark,
int, Nloop);
template <class ReaderClass>
ConfParameters(Reader<ReaderClass>& Reader){
read(Reader, "parameters", *this);
}
};
bool testSmear(GridCartesian& GRID, LatticeGaugeFieldD Umu, LatticeGaugeFieldD Usmr, LatticeGaugeFieldD Unaik,
LatticeGaugeFieldD Ucontrol, Real c1, Real cnaik, Real c3, Real c5, Real c7, Real clp) {
Smear_HISQ<PeriodicGimplD> hisq_fat(&GRID,c1,cnaik,c3,c5,c7,clp);
LatticeGaugeFieldD diff(&GRID), Uproj(&GRID);
hisq_fat.smear(Usmr, Unaik, Umu);
bool result;
if (cnaik < 1e-30) { // Testing anything but Naik term
diff = Ucontrol-Usmr;
auto absDiff = norm2(diff)/norm2(Ucontrol);
if (absDiff < 1e-30) {
Grid_pass(" |Umu-Usmr|/|Umu| = ",absDiff);
result = true;
} else {
Grid_error(" |Umu-Usmr|/|Umu| = ",absDiff);
result = false;
}
} else { // Testing Naik specifically
diff = Ucontrol-Unaik;
auto absDiff = norm2(diff)/norm2(Ucontrol);
if (absDiff < 1e-30) {
Grid_pass(" |Umu-Unaik|/|Umu| = ",absDiff);
result = true;
} else {
Grid_error(" |Umu-Unaik|/|Umu| = ",absDiff);
result = false;
}
hisq_fat.projectU3(Uproj,Ucontrol);
// NerscIO::writeConfiguration(Unaik,"nersc.l8t4b3360.naik");
}
return result;
}
int main (int argc, char** argv) {
// Params for the test.
int Ns = 8;
int Nt = 4;
Coordinate latt_size(Nd,0); latt_size[0]=Ns; latt_size[1]=Ns; latt_size[2]=Ns; latt_size[3]=Nt;
std::string conf_in = "nersc.l8t4b3360";
int threads = GridThread::GetThreads();
typedef LatticeGaugeFieldD LGF;
// Initialize the Grid
Grid_init(&argc,&argv);
Coordinate simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
Grid_log("mpi = ",mpi_layout);
Grid_log("simd = ",simd_layout);
Grid_log("latt = ",latt_size);
Grid_log("threads = ",threads);
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
XmlReader Reader("fatParams.xml",false,"grid");
ConfParameters param(Reader);
if(param.benchmark) Grid_log(" Nloop = ",param.Nloop);
LGF Umu(&GRID), Usmr(&GRID), Unaik(&GRID), Ucontrol(&GRID);
// Read the configuration into Umu
FieldMetaData header;
NerscIO::readConfiguration(Umu, header, conf_in);
bool pass=true;
// Carry out various tests
NerscIO::readConfiguration(Ucontrol, header, "nersc.l8t4b3360.357lplink.control");
pass *= testSmear(GRID,Umu,Usmr,Unaik,Ucontrol,1/8.,0.,1/16.,1/64.,1/384.,-1/8.);
NerscIO::readConfiguration(Ucontrol, header, "nersc.l8t4b3360.357link.control");
pass *= testSmear(GRID,Umu,Usmr,Unaik,Ucontrol,1/8.,0.,1/16.,1/64.,1/384.,0.);
NerscIO::readConfiguration(Ucontrol, header, "nersc.l8t4b3360.35link.control");
pass *= testSmear(GRID,Umu,Usmr,Unaik,Ucontrol,1/8.,0.,1/16.,1/64.,0.,0.);
NerscIO::readConfiguration(Ucontrol, header, "nersc.l8t4b3360.3link.control");
pass *= testSmear(GRID,Umu,Usmr,Unaik,Ucontrol,1/8.,0.,1/16.,0.,0.,0.);
NerscIO::readConfiguration(Ucontrol, header, "nersc.l8t4b3360.naik.control");
pass *= testSmear(GRID,Umu,Usmr,Unaik,Ucontrol,0.,0.8675309,0.,0.,0.,0.);
if(pass){
Grid_pass("All tests passed.");
} else {
Grid_error("At least one test failed.");
}
// Test a C-style instantiation
double path_coeff[6] = {1, 2, 3, 4, 5, 6};
Smear_HISQ<PeriodicGimplD> hisq_fat_Cstyle(&GRID,path_coeff);
if (param.benchmark) {
autoView(U_v, Umu, CpuRead); // Gauge accessor
// Read in lattice sequentially, Nloop times
double lookupTime = 0.;
for(int i=0;i<param.Nloop;i++) {
double start = usecond();
for(int ss=0;ss<U_v.size();ss++)
for(int mu=0;mu<Nd;mu++) {
auto U1 = U_v[ss](mu);
}
double stop = usecond();
lookupTime += stop-start; // microseconds
}
Grid_log("Time to lookup: ",lookupTime,"[ms]");
// Raise a matrix to the power nmat, for each link.
auto U1 = U_v[0](0);
for(int nmat=1;nmat<8;nmat++) {
double multTime = 0.;
for(int i=0;i<param.Nloop;i++) {
double start=usecond();
for(int ss=0;ss<U_v.size();ss++)
for(int mu=0;mu<Nd;mu++) {
auto U2 = U1;
for(int j=1;j<nmat;j++) {
U2 *= U1;
}
}
double stop=usecond();
multTime += stop-start;
}
Grid_log("Time to multiply ",nmat," matrices: ",multTime," [ms]");
}
}
Grid_finalize();
}