1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

definetely the right merge upstream/develop

This commit is contained in:
Alessandro Lupo
2023-06-16 14:19:46 +01:00
446 changed files with 46860 additions and 16333 deletions

View File

@ -0,0 +1,184 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/IO/Test_field_array_io.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
//This test demonstrates and checks a single-file write of an arbitrary array of fields
uint64_t writeHeader(const uint32_t size, const uint32_t checksum, const std::string &format, const std::string &file){
std::ofstream fout(file,std::ios::out|std::ios::in);
fout.seekp(0,std::ios::beg);
fout << std::setw(10) << size << std::endl;
fout << std::hex << std::setw(10) << checksum << std::endl;
fout << format << std::endl;
return fout.tellp();
}
uint64_t readHeader(uint32_t &size, uint32_t &checksum, std::string &format, const std::string &file){
std::ifstream fin(file);
std::string line;
getline(fin,line);
{
std::stringstream ss; ss <<line ; ss >> size;
}
getline(fin,line);
{
std::stringstream ss; ss <<line ; ss >> std::hex >> checksum;
}
getline(fin,format);
removeWhitespace(format);
return fin.tellg();
}
template<typename FieldType>
void writeFieldArray(const std::string &file, const std::vector<FieldType> &data){
typedef typename FieldType::vector_object vobj;
typedef typename FieldType::scalar_object sobj;
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
BinarySimpleMunger<sobj, sobj> munge; //straight copy
//We need a 2-pass header write, first to establish the size, the second pass writes the checksum
std::string format = getFormatString<typename FieldType::vector_object>();
uint64_t offset; //leave 64 bits for header
if ( grid->IsBoss() ) {
NerscIO::truncate(file);
offset = writeHeader(data.size(), 0, format, file);
}
grid->Broadcast(0,(void *)&offset,sizeof(offset)); //use as a barrier
std::cout << "Data offset write " << offset << std::endl;
std::cout << "Data size write " << data.size() << std::endl;
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
std::cout << "Field size = " << field_size << " B" << std::endl;
uint32_t checksum = 0;
for(int i=0;i<data.size();i++){
std::cout << "Data field write " << i << " offset " << offset << std::endl;
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::writeLatticeObject<vobj,sobj>(const_cast<FieldType &>(data[i]),file,munge,offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
offset += field_size;
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
}
std::cout << "Write checksum " << checksum << std::endl;
if ( grid->IsBoss() ) {
writeHeader(data.size(), checksum, format, file);
}
}
template<typename FieldType>
void readFieldArray(std::vector<FieldType> &data, const std::string &file){
typedef typename FieldType::vector_object vobj;
typedef typename FieldType::scalar_object sobj;
assert(data.size() > 0);
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
BinarySimpleUnmunger<sobj, sobj> munge; //straight copy
uint32_t hdr_checksum, hdr_size;
std::string format;
uint64_t offset = readHeader(hdr_size, hdr_checksum, format, file);
std::cout << "Data offset read " << offset << std::endl;
std::cout << "Data size read " << hdr_size << std::endl;
assert(data.size() == hdr_size);
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
uint32_t checksum = 0;
for(int i=0;i<data.size();i++){
std::cout << "Data field read " << i << " offset " << offset << std::endl;
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
BinaryIO::readLatticeObject<vobj,sobj>(data[i],file,munge,offset,format,
nersc_csum,scidac_csuma,scidac_csumb);
offset += field_size;
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
}
std::cout << "Header checksum " << hdr_checksum << std::endl;
std::cout << "Read checksum " << checksum << std::endl;
assert( hdr_checksum == checksum );
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
Coordinate latt = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
const int Ls=8;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt, simd_layout, mpi_layout);
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
typedef DomainWallFermionD::FermionField FermionField;
int nfield = 20;
std::vector<FermionField> data(nfield, FGrid);
for(int i=0;i<data.size();i++)
gaussian(RNG5, data[i]);
std::string file = "test_field_array_io.0";
writeFieldArray(file, data);
std::vector<FermionField> data_r(nfield, FGrid);
readFieldArray(data_r, file);
for(int i=0;i<nfield;i++){
FermionField diff = data_r[i] - data[i];
RealD norm_diff = norm2(diff);
std::cout << "Norm2 of difference between stored and loaded data index " << i << " : " << norm_diff << std::endl;
}
std::cout << "Done" << std::endl;
Grid_finalize();
}

View File

@ -147,7 +147,7 @@ int main (int argc, char ** argv)
Complex p = TensorRemove(Tp);
std::cout<<GridLogMessage << "calculated plaquettes " <<p*PlaqScale<<std::endl;
Complex LinkTraceScale(1.0/vol/4.0/3.0);
Complex LinkTraceScale(1.0/vol/4.0/(Real)Nc);
TComplex Tl = sum(LinkTrace);
Complex l = TensorRemove(Tl);
std::cout<<GridLogMessage << "calculated link trace " <<l*LinkTraceScale<<std::endl;
@ -157,8 +157,10 @@ int main (int argc, char ** argv)
Complex ll= TensorRemove(TcP);
std::cout<<GridLogMessage << "coarsened plaquettes sum to " <<ll*PlaqScale<<std::endl;
std::string clone2x3("./ckpoint_clone2x3.4000");
std::string clone3x3("./ckpoint_clone3x3.4000");
const string stNc = to_string( Nc ) ;
const string stNcM1 = to_string( Nc-1 ) ;
std::string clone2x3("./ckpoint_clone"+stNcM1+"x"+stNc+".4000");
std::string clone3x3("./ckpoint_clone"+stNc+"x"+stNc+".4000");
NerscIO::writeConfiguration(Umu,clone3x3,0,precision32);
NerscIO::writeConfiguration(Umu,clone2x3,1,precision32);

View File

@ -29,19 +29,6 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
using namespace std;
using namespace Grid;
;
template<class d>
struct scal {
d internal;
};
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT
};
template<class What>
void TestWhat(What & Ddwf,
@ -86,10 +73,15 @@ int main (int argc, char ** argv)
RealD M5 =1.8;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion vectorised test"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestWhat<DomainWallFermionR>(Ddwf,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::vector<Complex> boundary = {1,1,1,-1};
DomainWallFermionD::ImplParams Params(boundary);
// Coordinate Dirichlet({0,8,8,16,32});
// Params.dirichlet=Dirichlet;
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,Params);
TestWhat<DomainWallFermionD>(Ddwf,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
@ -97,54 +89,54 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionR>(Dmob,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionD>(Dmob,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"Z-MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::vector<ComplexD> gamma(Ls,std::complex<double>(1.0,0.0));
ZMobiusFermionR zDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionR>(zDmob,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
ZMobiusFermionD zDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionD>(zDmob,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
MobiusZolotarevFermionD Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestWhat<MobiusZolotarevFermionR>(Dzolo,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
TestWhat<MobiusZolotarevFermionD>(Dzolo,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestWhat<ScaledShamirFermionR>(Dsham,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
TestWhat<ScaledShamirFermionD>(Dsham,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
ShamirZolotarevFermionD Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<ShamirZolotarevFermionR>(Dshamz,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
TestWhat<ShamirZolotarevFermionD>(Dshamz,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonCayleyTanhFermionR>(Dov,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
TestWhat<OverlapWilsonCayleyTanhFermionD>(Dov,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage<<"**************************************************************"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
OverlapWilsonCayleyZolotarevFermionD Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<OverlapWilsonCayleyZolotarevFermionR>(Dovz,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
TestWhat<OverlapWilsonCayleyZolotarevFermionD>(Dovz,FGrid,FrbGrid,UGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage<<"=============================================================="<<std::endl;

View File

@ -209,8 +209,8 @@ int main (int argc, char ** argv) {
std::cout << GridLogMessage << "Lattice dimensions: " << latt << " Ls: " << Ls << std::endl;
// ZMobius EO Operator
ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, Params.omega,1.,0.);
SchurDiagTwoOperator<ZMobiusFermionR,LatticeFermion> HermOp(Ddwf);
ZMobiusFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, Params.omega,1.,0.);
SchurDiagTwoOperator<ZMobiusFermionD,LatticeFermion> HermOp(Ddwf);
// Eigenvector storage
LanczosParams fine =Params.FineParams;

View File

@ -46,7 +46,7 @@ int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
const int Ls=8;
const int Ls=12;
std::cout << GridLogMessage << "::::: NB: to enable a quick bit reproducibility check use the --checksums flag. " << std::endl;
@ -94,13 +94,40 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "::::::::::::: Starting mixed CG" << std::endl;
MixedPrecisionConjugateGradient<LatticeFermionD,LatticeFermionF> mCG(1.0e-8, 10000, 50, FrbGrid_f, HermOpEO_f, HermOpEO);
mCG(src_o,result_o);
double t1,t2,flops;
double MdagMsiteflops = 1452; // Mobius (real coeffs)
// CG overhead: 8 inner product, 4+8 axpy_norm, 4+4 linear comb (2 of)
double CGsiteflops = (8+4+8+4+4)*Nc*Ns ;
std:: cout << " MdagM site flops = "<< 4*MdagMsiteflops<<std::endl;
std:: cout << " CG site flops = "<< CGsiteflops <<std::endl;
int iters;
for(int i=0;i<10;i++){
result_o = Zero();
t1=usecond();
mCG(src_o,result_o);
t2=usecond();
iters = mCG.TotalInnerIterations; //Number of inner CG iterations
flops = MdagMsiteflops*4*FrbGrid->gSites()*iters;
flops+= CGsiteflops*FrbGrid->gSites()*iters;
std::cout << " SinglePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl;
std::cout << " SinglePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
}
std::cout << GridLogMessage << "::::::::::::: Starting regular CG" << std::endl;
ConjugateGradient<LatticeFermionD> CG(1.0e-8,10000);
CG(HermOpEO,src_o,result_o_2);
MemoryManager::Print();
for(int i=0;i<1;i++){
result_o_2 = Zero();
t1=usecond();
CG(HermOpEO,src_o,result_o_2);
t2=usecond();
iters = CG.IterationsToComplete;
flops = MdagMsiteflops*4*FrbGrid->gSites()*iters;
flops+= CGsiteflops*FrbGrid->gSites()*iters;
std::cout << " DoublePrecision iterations/sec "<< iters/(t2-t1)*1000.*1000.<<std::endl;
std::cout << " DoublePrecision GF/s "<< flops/(t2-t1)/1000.<<std::endl;
}
// MemoryManager::Print();
LatticeFermionD diff_o(FrbGrid);
RealD diff = axpy_norm(diff_o, -1.0, result_o, result_o_2);

View File

@ -115,6 +115,7 @@ int main(int argc, char ** argv)
if (SE->_permute & 0x2 ) { permute(check[i],tmp,1); tmp=check[i];}
if (SE->_permute & 0x4 ) { permute(check[i],tmp,2); tmp=check[i];}
if (SE->_permute & 0x8 ) { permute(check[i],tmp,3); tmp=check[i];}
// std::cout<<GridLogMessage<<"stencil["<<i<<"] "<< check[i]<< " perm "<<(uint32_t)SE->_permute <<std::endl;
}
Real nrmC = norm2(Check);
@ -138,18 +139,17 @@ int main(int argc, char ** argv)
ddiff = check -bar;
diff =norm2(ddiff);
if ( diff > 0){
std::cout <<"Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
<<") " <<check<<" vs "<<bar<<std::endl;
std::cout <<"Diff at Coor (" << coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]
<<") stencil " <<check<<" vs cshift "<<bar<<std::endl;
}
}}}}
if (nrm > 1.0e-4) {
autoView( check , Check, CpuRead);
autoView( bar , Bar, CpuRead);
for(int i=0;i<check.size();i++){
std::cout << i<<" Check "<<check[i]<< "\n"<<i<<" Bar "<<bar[i]<<std::endl;
std::cout << i<<" ERROR Check \n"<<check[i]<< "\n"<<i<<" Bar \n"<<bar[i]<<std::endl;
}
}
if (nrm > 1.0e-4) exit(-1);

183
tests/Test_gfield_shift.cc Normal file
View File

@ -0,0 +1,183 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_gfield_shift.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
//Test the shifting of the gauge field that respects the boundary conditions
#include <Grid/Grid.h>
using namespace Grid;
;
typedef ConjugateGimplR Gimpl; //can choose periodic / charge conjugate directions at wil
typedef Gimpl::GaugeField GaugeField;
typedef Gimpl::GaugeLinkField GaugeLinkField;
typedef Gimpl::SiteGaugeField SiteGaugeField;
typedef Gimpl::SiteGaugeLink SiteGaugeLink;
GaugeField CshiftGaugeField(const GaugeField &U, const int dir, const int shift){
GridBase *Grid = U.Grid();
GaugeField out(Grid);
GaugeLinkField Umu(Grid);
for(int mu=0;mu<Grid->Nd();mu++){
Umu = PeekIndex<LorentzIndex>(U, mu);
Umu = Gimpl::CshiftLink(Umu,dir,shift);
PokeIndex<LorentzIndex>(out,Umu,mu);
}
return out;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(4,vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
std::vector<int> conj_dirs = {1,1,0,0};
Gimpl::setDirections(conj_dirs);
GridCartesian Fine(latt_size,simd_layout,mpi_layout);
GridParallelRNG FineRNG(&Fine); FineRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
GaugeField U(&Fine);
GaugeField ShiftU(&Fine);
GaugeLinkField link_field(&Fine), link_field_2(&Fine);
//Like Test_cshift we put the lex coordinate index on each site but make it imaginary
//so we can tell when it was complex conjugated
LatticeComplex lex(&Fine);
lex=Zero();
U = Zero();
{
LatticeComplex coor(&Fine);
Integer stride =1;
for(int d=0;d<4;d++){
LatticeCoordinate(coor,d);
lex = lex + coor*stride;
stride=stride*latt_size[d];
}
PokeIndex<ColourIndex>(link_field, lex, 0,0); //place on 0,0 element of link
for(int mu=0;mu<Nd;mu++){
link_field_2 = link_field + mu*stride; //add in lex-mapping of mu
link_field_2 = ComplexD(0,1) * link_field_2; //make imaginary
PokeIndex<LorentzIndex>(U, link_field_2, mu);
}
}
std::stringstream ss;
ss<<"error";
for(int d=0;d<Fine._ndimension;d++){
ss<<"."<<Fine._processor_coor[d];
}
ss<<"_wr_"<<Fine._processor;
std::string fname(ss.str());
std::ofstream ferr(fname);
Integer vol4d = latt_size[0]*latt_size[1]*latt_size[2]*latt_size[3];
bool fail = false;
typename SiteGaugeField::scalar_object um;
TComplex cm;
for(int dir=0;dir<4;dir++){
for(int shift=-latt_size[dir]+1;shift<latt_size[dir];shift++){
if ( Fine.IsBoss() )
std::cout<<GridLogMessage<<"Shifting by "<<shift<<" in direction "<<dir
<< " dir is conj ? " << conj_dirs[dir] << std::endl;
ShiftU = CshiftGaugeField(U,dir,shift);
Coordinate coor(4);
for(coor[3]=0;coor[3]<latt_size[3];coor[3]++){
for(coor[2]=0;coor[2]<latt_size[2];coor[2]++){
for(coor[1]=0;coor[1]<latt_size[1];coor[1]++){
for(coor[0]=0;coor[0]<latt_size[0];coor[0]++){
peekSite(um,ShiftU,coor);
Coordinate scoor(coor);
scoor[dir] = (scoor[dir]+shift + latt_size[dir])%latt_size[dir];
Integer slex = scoor[0]
+ latt_size[0]*scoor[1]
+ latt_size[0]*latt_size[1]*scoor[2]
+ latt_size[0]*latt_size[1]*latt_size[2]*scoor[3];
for(int mu = 0 ; mu < 4; mu++){
Integer slex_mu = slex + vol4d*mu;
Complex scm(0,slex_mu); //imaginary
if(
( shift > 0 && coor[dir] >= latt_size[dir]-shift && conj_dirs[dir] )
||
( shift < 0 && coor[dir] <= -shift-1 && conj_dirs[dir] )
)
scm = conjugate(scm); //CC if pulled over boundary
cm = um(mu)()(0,0);
RealD nrm = abs(scm-cm()()());
//std::cout << cm << " " << scm << std::endl;
Coordinate peer(4);
Complex tmp =cm;
Integer index=real(tmp);
Integer cm_mu = index / vol4d;
index = index % vol4d;
Lexicographic::CoorFromIndex(peer,index,latt_size);
if (nrm > 0){
ferr<<"FAIL mu " << mu << " shift "<< shift<<" in dir "<< dir<<" ["<<coor[0]<<","<<coor[1]<<","<<coor[2]<<","<<coor[3]<<"] = "<< cm()()()<<" expect "<<scm<<" "<<nrm<<std::endl;
ferr<<"Got mu "<< cm_mu << " site " <<index<<" : " << peer[0]<<","<<peer[1]<<","<<peer[2]<<","<<peer[3]<<std::endl;
index=real(scm);
Integer scm_mu = index / vol4d;
index = index % vol4d;
Lexicographic::CoorFromIndex(peer,index,latt_size);
ferr<<"Expect mu " << scm_mu << " site " <<index<<": " << peer[0]<<","<<peer[1]<<","<<peer[2]<<","<<peer[3]<<std::endl;
fail = true;
}
}
}}}}
}
}
if(fail) std::cout << "Test FAILED : see " << fname << " for more details" << std::endl;
else std::cout << "Test Passed" << std::endl;
Grid_finalize();
}

View File

@ -33,8 +33,8 @@ using namespace Grid;
const int TSRC = 0; //timeslice where rho is nonzero
const int VDIM = 5; //length of each vector
typedef typename DomainWallFermionR::ComplexField ComplexField;
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionD::ComplexField ComplexField;
typedef typename DomainWallFermionD::FermionField FermionField;
int main(int argc, char *argv[])
{

View File

@ -793,6 +793,7 @@ int main (int argc, char ** argv)
}
std::cout <<" OK ! "<<std::endl;
#ifdef USE_FP16
// Double to Half
std::cout << GridLogMessage<< "Double to half" ;
precisionChange(&H[0],&D[0],Ndp);
@ -822,6 +823,7 @@ int main (int argc, char ** argv)
assert( tmp < 1.0e-3 );
}
std::cout <<" OK ! "<<std::endl;
#endif
}
Grid_finalize();

View File

@ -31,7 +31,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
using namespace std;
using namespace Grid;
;
int main(int argc, char ** argv) {
Grid_init(&argc, &argv);
@ -80,7 +79,8 @@ int main(int argc, char ** argv) {
Foo=lex;
}
typedef CartesianStencil<vobj,vobj,int> Stencil;
typedef CartesianStencil<vobj,vobj,SimpleStencilParams> Stencil;
SimpleStencilParams p;
for(int dir=0;dir<4;dir++){
for(int disp=0;disp<Fine._fdimensions[dir];disp++){
@ -90,7 +90,7 @@ int main(int argc, char ** argv) {
std::vector<int> directions(npoint,dir);
std::vector<int> displacements(npoint,disp);
Stencil myStencil(&Fine,npoint,0,directions,displacements,0);
Stencil myStencil(&Fine,npoint,0,directions,displacements,p);
Coordinate ocoor(4);
for(int o=0;o<Fine.oSites();o++){
Fine.oCoorFromOindex(ocoor,o);
@ -183,8 +183,8 @@ int main(int argc, char ** argv) {
std::vector<int> directions(npoint,dir);
std::vector<int> displacements(npoint,disp);
Stencil EStencil(&rbFine,npoint,Even,directions,displacements,0);
Stencil OStencil(&rbFine,npoint,Odd,directions,displacements,0);
Stencil EStencil(&rbFine,npoint,Even,directions,displacements,p);
Stencil OStencil(&rbFine,npoint,Odd,directions,displacements,p);
Coordinate ocoor(4);
for(int o=0;o<Fine.oSites();o++){

View File

@ -75,8 +75,8 @@ int main (int argc, char ** argv)
RealD M5=1.8;
{
OverlapWilsonContFracTanhFermionR Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonContFracTanhFermionR,LatticeFermion> HermIndefOp(Dcf);
OverlapWilsonContFracTanhFermionD Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonContFracTanhFermionD,LatticeFermion> HermIndefOp(Dcf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);
@ -92,8 +92,8 @@ int main (int argc, char ** argv)
}
{
OverlapWilsonPartialFractionTanhFermionR Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonPartialFractionTanhFermionR,LatticeFermion> HermIndefOp(Dpf);
OverlapWilsonPartialFractionTanhFermionD Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
HermitianLinearOperator<OverlapWilsonPartialFractionTanhFermionD,LatticeFermion> HermIndefOp(Dpf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);

View File

@ -140,14 +140,14 @@ int main (int argc, char ** argv)
// RealD mass=0.1;
// RealD M5=1.8;
// DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
// DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
// LatticeFermion src_o(FrbGrid);
// LatticeFermion result_o(FrbGrid);
// pickCheckerboard(Odd,src_o,src);
// result_o=Zero();
// SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
// SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermion> HermOpEO(Ddwf);
// ConjugateGradient<LatticeFermion> CG(1.0e-8,10000);
// CG(HermOpEO,src_o,result_o);

View File

@ -0,0 +1,226 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/core/Test_compact_wilson_clover_speedup.cc
Copyright (C) 2020 - 2022
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
Author: Nils Meyer <nils.meyer@ur.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
NAMESPACE_BEGIN(CommandlineHelpers);
static bool checkPresent(int* argc, char*** argv, const std::string& option) {
return GridCmdOptionExists(*argv, *argv + *argc, option);
}
static std::string getContent(int* argc, char*** argv, const std::string& option) {
return GridCmdOptionPayload(*argv, *argv + *argc, option);
}
static int readInt(int* argc, char*** argv, std::string&& option, int defaultValue) {
std::string arg;
int ret = defaultValue;
if(checkPresent(argc, argv, option)) {
arg = getContent(argc, argv, option);
GridCmdOptionInt(arg, ret);
}
return ret;
}
static float readFloat(int* argc, char*** argv, std::string&& option, float defaultValue) {
std::string arg;
double ret = defaultValue;
if(checkPresent(argc, argv, option)) {
arg = getContent(argc, argv, option);
GridCmdOptionFloat(arg, ret);
}
return ret;
}
NAMESPACE_END(CommandlineHelpers);
#define _grid_printf(LOGGER, ...) \
{ \
if((LOGGER).isActive()) { /* this makes it safe to put, e.g., norm2 in the calling code w.r.t. performance */ \
char _printf_buf[1024]; \
std::sprintf(_printf_buf, __VA_ARGS__); \
std::cout << (LOGGER) << _printf_buf; \
fflush(stdout); \
} \
}
#define grid_printf_msg(...) _grid_printf(GridLogMessage, __VA_ARGS__)
template<typename Field>
bool resultsAgree(const Field& ref, const Field& res, const std::string& name) {
RealD checkTolerance = (getPrecision<Field>::value == 2) ? 1e-15 : 1e-7;
Field diff(ref.Grid());
diff = ref - res;
auto absDev = norm2(diff);
auto relDev = absDev / norm2(ref);
std::cout << GridLogMessage
<< "norm2(reference), norm2(" << name << "), abs. deviation, rel. deviation: " << norm2(ref) << " "
<< norm2(res) << " " << absDev << " " << relDev << " -> check "
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
return relDev <= checkTolerance;
}
template<typename vCoeff_t>
void runBenchmark(int* argc, char*** argv) {
// read from command line
const int nIter = CommandlineHelpers::readInt( argc, argv, "--niter", 1000);
const RealD mass = CommandlineHelpers::readFloat( argc, argv, "--mass", 0.5);
const RealD csw = CommandlineHelpers::readFloat( argc, argv, "--csw", 1.0);
const RealD cF = CommandlineHelpers::readFloat( argc, argv, "--cF", 1.0);
const bool antiPeriodic = CommandlineHelpers::checkPresent(argc, argv, "--antiperiodic");
// precision
static_assert(getPrecision<vCoeff_t>::value == 2 || getPrecision<vCoeff_t>::value == 1, "Incorrect precision"); // double or single
std::string precision = (getPrecision<vCoeff_t>::value == 2 ? "double" : "single");
// setup grids
GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vCoeff_t::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
// clang-format on
// setup rng
std::vector<int> seeds({1, 2, 3, 4});
GridParallelRNG pRNG(UGrid);
pRNG.SeedFixedIntegers(seeds);
// type definitions
typedef WilsonImpl<vCoeff_t, FundamentalRepresentation, CoeffReal> WImpl;
typedef WilsonCloverFermion<WImpl, CloverHelpers<WImpl>> WilsonCloverOperator;
typedef CompactWilsonCloverFermion<WImpl, CompactCloverHelpers<WImpl>> CompactWilsonCloverOperator;
typedef typename WilsonCloverOperator::FermionField Fermion;
typedef typename WilsonCloverOperator::GaugeField Gauge;
// setup fields
Fermion src(UGrid); random(pRNG, src);
Fermion ref(UGrid); ref = Zero();
Fermion res(UGrid); res = Zero();
Fermion hop(UGrid); hop = Zero();
Fermion diff(UGrid); diff = Zero();
Gauge Umu(UGrid); SU3::HotConfiguration(pRNG, Umu);
// setup boundary phases
typename WilsonCloverOperator::ImplParams implParams;
std::vector<Complex> boundary_phases(Nd, 1.);
if(antiPeriodic) boundary_phases[Nd-1] = -1.;
implParams.boundary_phases = boundary_phases;
WilsonAnisotropyCoefficients anisParams;
// misc stuff needed for benchmarks
double volume=1.0; for(int mu=0; mu<Nd; mu++) volume*=UGrid->_fdimensions[mu];
// setup fermion operators
WilsonCloverOperator Dwc( Umu, *UGrid, *UrbGrid, mass, csw, csw, anisParams, implParams);
CompactWilsonCloverOperator Dwc_compact(Umu, *UGrid, *UrbGrid, mass, csw, csw, cF, anisParams, implParams);
// now test the conversions
typename CompactWilsonCloverOperator::CloverField tmp_ref(UGrid); tmp_ref = Dwc.CloverTerm;
typename CompactWilsonCloverOperator::CloverField tmp_res(UGrid); tmp_res = Zero();
typename CompactWilsonCloverOperator::CloverField tmp_diff(UGrid); tmp_diff = Zero();
typename CompactWilsonCloverOperator::CloverDiagonalField diagonal(UGrid); diagonal = Zero();
typename CompactWilsonCloverOperator::CloverTriangleField triangle(UGrid); diagonal = Zero();
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(tmp_ref, diagonal, triangle);
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(diagonal, triangle, tmp_res);
tmp_diff = tmp_ref - tmp_res;
std::cout << GridLogMessage << "conversion: ref, res, diff, eps"
<< " " << norm2(tmp_ref)
<< " " << norm2(tmp_res)
<< " " << norm2(tmp_diff)
<< " " << norm2(tmp_diff) / norm2(tmp_ref)
<< std::endl;
// performance per site (use minimal values necessary)
double hop_flop_per_site = 1320; // Rich's Talk + what Peter uses
double hop_byte_per_site = (8 * 9 + 9 * 12) * 2 * getPrecision<vCoeff_t>::value * 4;
double clov_flop_per_site = 504; // Rich's Talk and 1412.2629
double clov_byte_per_site = (2 * 18 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
double clov_flop_per_site_performed = 1128;
double clov_byte_per_site_performed = (12 * 12 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
// total performance numbers
double hop_gflop_total = volume * nIter * hop_flop_per_site / 1e9;
double hop_gbyte_total = volume * nIter * hop_byte_per_site / 1e9;
double clov_gflop_total = volume * nIter * clov_flop_per_site / 1e9;
double clov_gbyte_total = volume * nIter * clov_byte_per_site / 1e9;
double clov_gflop_performed_total = volume * nIter * clov_flop_per_site_performed / 1e9;
double clov_gbyte_performed_total = volume * nIter * clov_byte_per_site_performed / 1e9;
// warmup + measure dhop
for(auto n : {1, 2, 3, 4, 5}) Dwc.Dhop(src, hop, 0);
double t0 = usecond();
for(int n = 0; n < nIter; n++) Dwc.Dhop(src, hop, 0);
double t1 = usecond();
double secs_hop = (t1-t0)/1e6;
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n",
"hop", precision.c_str(), secs_hop, hop_gflop_total/secs_hop, hop_gbyte_total/secs_hop, 0.0, secs_hop/secs_hop);
#define BENCH_CLOVER_KERNEL(KERNEL) { \
/* warmup + measure reference clover */ \
for(auto n : {1, 2, 3, 4, 5}) Dwc.KERNEL(src, ref); \
double t2 = usecond(); \
for(int n = 0; n < nIter; n++) Dwc.KERNEL(src, ref); \
double t3 = usecond(); \
double secs_ref = (t3-t2)/1e6; \
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
"reference_"#KERNEL, precision.c_str(), secs_ref, clov_gflop_total/secs_ref, clov_gbyte_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", /* to see how well the ET performs */ \
"reference_"#KERNEL"_performed", precision.c_str(), secs_ref, clov_gflop_performed_total/secs_ref, clov_gbyte_performed_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
\
/* warmup + measure compact clover */ \
for(auto n : {1, 2, 3, 4, 5}) Dwc_compact.KERNEL(src, res); \
double t4 = usecond(); \
for(int n = 0; n < nIter; n++) Dwc_compact.KERNEL(src, res); \
double t5 = usecond(); \
double secs_res = (t5-t4)/1e6; \
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
"compact_"#KERNEL, precision.c_str(), secs_res, clov_gflop_total/secs_res, clov_gbyte_total/secs_res, secs_ref/secs_res, secs_res/secs_hop); \
assert(resultsAgree(ref, res, #KERNEL)); \
}
BENCH_CLOVER_KERNEL(Mooee);
BENCH_CLOVER_KERNEL(MooeeDag);
BENCH_CLOVER_KERNEL(MooeeInv);
BENCH_CLOVER_KERNEL(MooeeInvDag);
grid_printf_msg("finalize %s\n", precision.c_str());
}
int main(int argc, char** argv) {
Grid_init(&argc, &argv);
runBenchmark<vComplexD>(&argc, &argv);
runBenchmark<vComplexF>(&argc, &argv);
Grid_finalize();
}

View File

@ -76,20 +76,20 @@ int main (int argc, char ** argv)
RealD M5 =1.8;
std::cout<<GridLogMessage <<"OverlapWilsonContFracTanhFermion test"<<std::endl;
OverlapWilsonContFracTanhFermionR Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonContFracTanhFermionR>(Dcf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonContFracTanhFermionD Dcf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonContFracTanhFermionD>(Dcf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonContFracZolotarevFermion test"<<std::endl;
OverlapWilsonContFracZolotarevFermionR Dcfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonContFracZolotarevFermionR>(Dcfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonContFracZolotarevFermionD Dcfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonContFracZolotarevFermionD>(Dcfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonPartialFractionTanhFermion test"<<std::endl;
OverlapWilsonPartialFractionTanhFermionR Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonPartialFractionTanhFermionR>(Dpf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonPartialFractionTanhFermionD Dpf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonPartialFractionTanhFermionD>(Dpf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonPartialFractionZolotarevFermion test"<<std::endl;
OverlapWilsonPartialFractionZolotarevFermionR Dpfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonPartialFractionZolotarevFermionR>(Dpfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonPartialFractionZolotarevFermionD Dpfz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,6.0);
TestWhat<OverlapWilsonPartialFractionZolotarevFermionD>(Dpfz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}
@ -235,7 +235,6 @@ void TestWhat(What & Ddwf,
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<What,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);

View File

@ -90,7 +90,7 @@ int main (int argc, char ** argv)
RealD shift = 0.1234;
RealD M5 = 1.8;
int pm = 1;
DomainWallEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5);
DomainWallEOFAFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -215,9 +215,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd , chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd , phi_o, phi);
RealD t1,t2;
SchurDiagMooeeOperator<DomainWallEOFAFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<DomainWallEOFAFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e, dchi_e);
HermOpEO.MpcDagMpc(chi_o, dchi_o);

View File

@ -86,7 +86,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD M5 =1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -212,10 +212,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<DomainWallFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -29,14 +29,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/Grid.h>
using namespace Grid;
;
int main (int argc, char ** argv)
{
template<typename Gimpl>
void run(double alpha, bool do_fft_gfix){
std::vector<int> seeds({1,2,3,4});
Grid_init(&argc,&argv);
int threads = GridThread::GetThreads();
Coordinate latt_size = GridDefaultLatt();
@ -55,10 +51,7 @@ int main (int argc, char ** argv)
FFT theFFT(&GRID);
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
std::cout<< "*****************************************************************" <<std::endl;
std::cout<< "* Testing we can gauge fix steep descent a RGT of Unit gauge *" <<std::endl;
std::cout<< "*****************************************************************" <<std::endl;
std::cout<<GridLogMessage << "Using alpha=" << alpha << std::endl;
// int coulomb_dir = -1;
int coulomb_dir = Nd-1;
@ -72,81 +65,167 @@ int main (int argc, char ** argv)
LatticeColourMatrix xform1(&GRID); // Gauge xform
LatticeColourMatrix xform2(&GRID); // Gauge xform
LatticeColourMatrix xform3(&GRID); // Gauge xform
//#########################################################################################
std::cout<< "*********************************************************************************************************" <<std::endl;
std::cout<< "* Testing steepest descent fixing to Landau gauge with randomly transformed unit gauge configuration *" <<std::endl;
std::cout<< "*********************************************************************************************************" <<std::endl;
SU<Nc>::ColdConfiguration(pRNG,Umu); // Unit gauge
Uorg=Umu;
Real init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<< init_plaq << std::endl;
//Apply a random gauge transformation to the unit gauge config
Urnd=Umu;
SU<Nc>::RandomGaugeTransform<Gimpl>(pRNG,Urnd,g);
SU<Nc>::RandomGaugeTransform(pRNG,Urnd,g); // Unit gauge
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<<plaq << std::endl;
Real alpha=0.1;
//Gauge fix the randomly transformed field
Umu = Urnd;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform1,alpha,10000,1.0e-12, 1.0e-12,false);
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform1,alpha,10000,1.0e-12, 1.0e-12,false);
// Check the gauge xform matrices
Utmp=Urnd;
SU<Nc>::GaugeTransform(Utmp,xform1);
SU<Nc>::GaugeTransform<Gimpl>(Utmp,xform1);
Utmp = Utmp - Umu;
std::cout << " Norm Difference of xformed gauge "<< norm2(Utmp) << std::endl;
std::cout << " Check the output gauge transformation matrices applied to the original field produce the xformed field "<< norm2(Utmp) << " (expect 0)" << std::endl;
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << std::endl;
Real plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
Uorg = Uorg - Umu;
std::cout << " Norm Difference "<< norm2(Uorg) << std::endl;
std::cout << " Norm "<< norm2(Umu) << std::endl;
std::cout << " Norm difference between a unit gauge configuration and the gauge fixed configuration "<< norm2(Uorg) << " (expect 0)" << std::endl;
std::cout << " Norm of gauge fixed configuration "<< norm2(Umu) << std::endl;
//#########################################################################################
if(do_fft_gfix){
std::cout<< "*************************************************************************************" <<std::endl;
std::cout<< "* Testing Fourier accelerated fixing to Landau gauge with unit gauge configuration *" <<std::endl;
std::cout<< "*************************************************************************************" <<std::endl;
Umu=Urnd;
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform2,alpha,10000,1.0e-12, 1.0e-12,true);
Utmp=Urnd;
SU<Nc>::GaugeTransform<Gimpl>(Utmp,xform2);
Utmp = Utmp - Umu;
std::cout << " Check the output gauge transformation matrices applied to the original field produce the xformed field "<< norm2(Utmp) << " (expect 0)" << std::endl;
std::cout<< "*****************************************************************" <<std::endl;
std::cout<< "* Testing Fourier accelerated fixing *" <<std::endl;
std::cout<< "*****************************************************************" <<std::endl;
Umu=Urnd;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform2,alpha,10000,1.0e-12, 1.0e-12,true);
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
}
//#########################################################################################
Utmp=Urnd;
SU<Nc>::GaugeTransform(Utmp,xform2);
Utmp = Utmp - Umu;
std::cout << " Norm Difference of xformed gauge "<< norm2(Utmp) << std::endl;
std::cout<< "******************************************************************************************" <<std::endl;
std::cout<< "* Testing steepest descent fixing to Landau gauge with random configuration **" <<std::endl;
std::cout<< "******************************************************************************************" <<std::endl;
SU<Nc>::HotConfiguration(pRNG,Umu);
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << std::endl;
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<< init_plaq << std::endl;
std::cout<< "*****************************************************************" <<std::endl;
std::cout<< "* Testing non-unit configuration *" <<std::endl;
std::cout<< "*****************************************************************" <<std::endl;
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,false);
SU<Nc>::HotConfiguration(pRNG,Umu); // Unit gauge
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<<plaq << std::endl;
//#########################################################################################
if(do_fft_gfix){
std::cout<< "******************************************************************************************" <<std::endl;
std::cout<< "* Testing Fourier accelerated fixing to Landau gauge with random configuration **" <<std::endl;
std::cout<< "******************************************************************************************" <<std::endl;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,true);
SU<Nc>::HotConfiguration(pRNG,Umu);
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << std::endl;
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<< init_plaq << std::endl;
std::cout<< "*****************************************************************" <<std::endl;
std::cout<< "* Testing Fourier accelerated fixing to coulomb gauge *" <<std::endl;
std::cout<< "*****************************************************************" <<std::endl;
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,alpha,10000,1.0e-12, 1.0e-12,true);
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
}
//#########################################################################################
std::cout<< "*******************************************************************************************" <<std::endl;
std::cout<< "* Testing steepest descent fixing to coulomb gauge with random configuration *" <<std::endl;
std::cout<< "*******************************************************************************************" <<std::endl;
Umu=Urnd;
SU<Nc>::HotConfiguration(pRNG,Umu); // Unit gauge
SU<Nc>::HotConfiguration(pRNG,Umu);
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<<plaq << std::endl;
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<< init_plaq << std::endl;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,true,coulomb_dir);
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,false,coulomb_dir);
std::cout << Umu<<std::endl;
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << std::endl;
//#########################################################################################
if(do_fft_gfix){
std::cout<< "*******************************************************************************************" <<std::endl;
std::cout<< "* Testing Fourier accelerated fixing to coulomb gauge with random configuration *" <<std::endl;
std::cout<< "*******************************************************************************************" <<std::endl;
Umu=Urnd;
SU<Nc>::HotConfiguration(pRNG,Umu);
init_plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Initial plaquette "<< init_plaq << std::endl;
FourierAcceleratedGaugeFixer<Gimpl>::SteepestDescentGaugeFix(Umu,xform3,alpha,10000,1.0e-12, 1.0e-12,true,coulomb_dir);
plaq=WilsonLoops<Gimpl>::avgPlaquette(Umu);
std::cout << " Final plaquette "<<plaq << " diff " << plaq - init_plaq << " (expect 0)" << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
double alpha=0.1; //step size
std::string gimpl = "periodic";
bool do_fft_gfix = true; //test fourier transformed gfix as well as steepest descent
for(int i=1;i<argc;i++){
std::string sarg(argv[i]);
if(sarg == "--gimpl"){
assert(i<argc-1 && "--gimpl option requires an argument");
gimpl = argv[i+1];
if(gimpl != "periodic" && gimpl != "conjugate")
assert(0 && "Invalid gimpl");
if(gimpl == "conjugate")
alpha = 0.025; //default alpha too large for CCBC
}else if(sarg == "--no-fft-gfix"){
std::cout << "Not doing the Fourier accelerated gauge fixing tests" << std::endl;
do_fft_gfix = false;
}else if(sarg == "--alpha"){
assert(i<argc-1 && "--alpha option requires an argument");
std::istringstream ss(argv[i+1]); ss >> alpha;
}
}
if(gimpl == "periodic"){
std::cout << GridLogMessage << "Using periodic boundary condition" << std::endl;
run<PeriodicGimplR>(alpha, do_fft_gfix);
}else{
std::vector<int> conjdirs = {1,1,0,0}; //test with 2 conjugate dirs and 2 not
std::cout << GridLogMessage << "Using complex conjugate boundary conditions in dimensions ";
for(int i=0;i<Nd;i++)
if(conjdirs[i])
std::cout << i << " ";
std::cout << std::endl;
ConjugateGimplR::setDirections(conjdirs);
run<ConjugateGimplR>(alpha, do_fft_gfix);
}
Grid_finalize();
}

View File

@ -181,8 +181,8 @@ void checkAdj(const Gamma::Algebra a)
void checkProject(GridSerialRNG &rng)
{
SpinVector rv, recon, full;
HalfSpinVector hsp, hsm;
SpinVector rv, recon;
HalfSpinVector hsm;
random(rng, rv);
@ -228,6 +228,59 @@ void checkGammaL(const Gamma::Algebra a, GridSerialRNG &rng)
std::cout << std::endl;
}
void checkChargeConjMatrix(){
//Check the properties of the charge conjugation matrix
//In the Grid basis C = -\gamma^2 \gamma^4
SpinMatrix C = testAlgebra[Gamma::Algebra::MinusGammaY] * testAlgebra[Gamma::Algebra::GammaT];
SpinMatrix mC = -C;
SpinMatrix one = testAlgebra[Gamma::Algebra::Identity];
std::cout << "Testing properties of charge conjugation matrix C = -\\gamma^2 \\gamma^4 (in Grid's basis)" << std::endl;
//C^T = -C
SpinMatrix Ct = transpose(C);
std::cout << GridLogMessage << "C^T=-C ";
test(Ct, mC);
std::cout << std::endl;
//C^\dagger = -C
SpinMatrix Cdag = adj(C);
std::cout << GridLogMessage << "C^dag=-C ";
test(Cdag, mC);
std::cout << std::endl;
//C^* = C
SpinMatrix Cstar = conjugate(C);
std::cout << GridLogMessage << "C^*=C ";
test(Cstar, C);
std::cout << std::endl;
//C^{-1} = -C
SpinMatrix CinvC = mC * C;
std::cout << GridLogMessage << "C^{-1}=-C ";
test(CinvC, one);
std::cout << std::endl;
// C^{-1} \gamma^\mu C = -[\gamma^\mu]^T
Gamma::Algebra gmu_a[4] = { Gamma::Algebra::GammaX, Gamma::Algebra::GammaY, Gamma::Algebra::GammaZ, Gamma::Algebra::GammaT };
for(int mu=0;mu<4;mu++){
SpinMatrix gmu = testAlgebra[gmu_a[mu]];
SpinMatrix Cinv_gmu_C = mC * gmu * C;
SpinMatrix mgmu_T = -transpose(gmu);
std::cout << GridLogMessage << "C^{-1} \\gamma^" << mu << " C = -[\\gamma^" << mu << "]^T ";
test(Cinv_gmu_C, mgmu_T);
std::cout << std::endl;
}
//[C, \gamma^5] = 0
SpinMatrix Cg5 = C * testAlgebra[Gamma::Algebra::Gamma5];
SpinMatrix g5C = testAlgebra[Gamma::Algebra::Gamma5] * C;
std::cout << GridLogMessage << "C \\gamma^5 = \\gamma^5 C";
test(Cg5, g5C);
std::cout << std::endl;
}
int main(int argc, char *argv[])
{
Grid_init(&argc,&argv);
@ -270,6 +323,13 @@ int main(int argc, char *argv[])
{
checkGammaL(i, sRNG);
}
std::cout << GridLogMessage << "======== Charge conjugation matrix check" << std::endl;
checkChargeConjMatrix();
std::cout << GridLogMessage << std::endl;
Grid_finalize();

View File

@ -55,13 +55,17 @@ static_assert(same_vComplex == 1, "Dirac Operators must have same underlying SIM
int main (int argc, char ** argv)
{
int nu = 0;
int tbc_aprd = 0; //use antiperiodic BCs in the time direction?
Grid_init(&argc,&argv);
for(int i=1;i<argc;i++){
if(std::string(argv[i]) == "--Gparity-dir"){
std::stringstream ss; ss << argv[i+1]; ss >> nu;
std::cout << GridLogMessage << "Set Gparity direction to " << nu << std::endl;
}else if(std::string(argv[i]) == "--Tbc-APRD"){
tbc_aprd = 1;
std::cout << GridLogMessage << "Using antiperiodic BCs in the time direction" << std::endl;
}
}
@ -155,13 +159,18 @@ int main (int argc, char ** argv)
//Coordinate grid for reference
LatticeInteger xcoor_1f5(FGrid_1f);
LatticeCoordinate(xcoor_1f5,1+nu);
LatticeCoordinate(xcoor_1f5,1+nu); //note '1+nu'! This is because for 5D fields the s-direction is direction 0
Replicate(src,src_1f);
src_1f = where( xcoor_1f5 >= Integer(L), 2.0*src_1f,src_1f );
RealD mass=0.0;
RealD M5=1.8;
StandardDiracOp Ddwf(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,mass,M5 DOP_PARAMS);
//Standard Dirac op
AcceleratorVector<Complex,4> bc_std(Nd, 1.0);
if(tbc_aprd) bc_std[Nd-1] = -1.; //antiperiodic time BC
StandardDiracOp::ImplParams std_params(bc_std);
StandardDiracOp Ddwf(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,mass,M5 DOP_PARAMS, std_params);
StandardFermionField src_o_1f(FrbGrid_1f);
StandardFermionField result_o_1f(FrbGrid_1f);
@ -172,9 +181,11 @@ int main (int argc, char ** argv)
ConjugateGradient<StandardFermionField> CG(1.0e-8,10000);
CG(HermOpEO,src_o_1f,result_o_1f);
// const int nu = 3;
//Gparity Dirac op
std::vector<int> twists(Nd,0);
twists[nu] = 1;
if(tbc_aprd) twists[Nd-1] = 1;
GparityDiracOp::ImplParams params;
params.twists = twists;
GparityDiracOp GPDdwf(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f,mass,M5 DOP_PARAMS,params);
@ -271,8 +282,11 @@ int main (int argc, char ** argv)
std::cout << "2f cb "<<result_o_2f.Checkerboard()<<std::endl;
std::cout << "1f cb "<<result_o_1f.Checkerboard()<<std::endl;
std::cout << " result norms " <<norm2(result_o_2f)<<" " <<norm2(result_o_1f)<<std::endl;
//Compare norms
std::cout << " result norms 2f: " <<norm2(result_o_2f)<<" 1f: " <<norm2(result_o_1f)<<std::endl;
//Take the 2f solution and convert into the corresponding 1f solution (odd cb only)
StandardFermionField res0o (FrbGrid_2f);
StandardFermionField res1o (FrbGrid_2f);
StandardFermionField res0 (FGrid_2f);
@ -281,14 +295,15 @@ int main (int argc, char ** argv)
res0=Zero();
res1=Zero();
res0o = PeekIndex<0>(result_o_2f,0);
res1o = PeekIndex<0>(result_o_2f,1);
res0o = PeekIndex<0>(result_o_2f,0); //flavor 0, odd cb
res1o = PeekIndex<0>(result_o_2f,1); //flavor 1, odd cb
std::cout << "res cb "<<res0o.Checkerboard()<<std::endl;
std::cout << "res cb "<<res1o.Checkerboard()<<std::endl;
setCheckerboard(res0,res0o);
setCheckerboard(res1,res1o);
//poke odd onto non-cb field
setCheckerboard(res0,res0o);
setCheckerboard(res1,res1o);
StandardFermionField replica (FGrid_1f);
StandardFermionField replica0(FGrid_1f);
@ -296,12 +311,13 @@ int main (int argc, char ** argv)
Replicate(res0,replica0);
Replicate(res1,replica1);
//2nd half of doubled lattice has f=1
replica = where( xcoor_1f5 >= Integer(L), replica1,replica0 );
replica0 = Zero();
setCheckerboard(replica0,result_o_1f);
std::cout << "Norm2 solutions is " <<norm2(replica)<<" "<< norm2(replica0)<<std::endl;
std::cout << "Norm2 solutions 1f reconstructed from 2f: " <<norm2(replica)<<" Actual 1f: "<< norm2(replica0)<<std::endl;
replica = replica - replica0;

View File

@ -0,0 +1,177 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_gparity_flavour.cc
Copyright (C) 2015-2017
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
static constexpr double tolerance = 1.0e-6;
static std::array<GparityFlavourMatrix, GparityFlavour::nSigma> testAlgebra;
void print(const GparityFlavourMatrix &g)
{
for(int i = 0; i < Ngp; i++)
{
std::cout << GridLogMessage << "(";
for(int j=0;j<Ngp;j++){
if ( abs( g(i,j)()() ) == 0 ) {
std::cout<< " 0";
} else if ( abs(g(i,j)()() - Complex(0,1)) == 0){
std::cout<< " i";
} else if ( abs(g(i,j)()() + Complex(0,1)) == 0){
std::cout<< "-i";
} else if ( abs(g(i,j)()() - Complex(1,0)) == 0){
std::cout<< " 1";
} else if ( abs(g(i,j)()() + Complex(1,0)) == 0){
std::cout<< "-1";
}
std::cout<<((j == Ngp-1) ? ")" : "," );
}
std::cout << std::endl;
}
std::cout << GridLogMessage << std::endl;
}
void createTestAlgebra(void)
{
std::array<GparityFlavourMatrix, 3> testg;
const Complex I(0., 1.), mI(0., -1.);
// 0 1
// 1 0
testg[0] = Zero();
testg[0](0, 1)()() = 1.;
testg[0](1, 0)()() = 1.;
std::cout << GridLogMessage << "test SigmaX= " << std::endl;
print(testg[0]);
// 0 -i
// i 0
testg[1] = Zero();
testg[1](0, 1)()() = mI;
testg[1](1, 0)()() = I;
std::cout << GridLogMessage << "test SigmaY= " << std::endl;
print(testg[1]);
// 1 0
// 0 -1
testg[2] = Zero();
testg[2](0, 0)()() = 1.0;
testg[2](1, 1)()() = -1.0;
std::cout << GridLogMessage << "test SigmaZ= " << std::endl;
print(testg[2]);
#define DEFINE_TEST_G(g, exp)\
testAlgebra[GparityFlavour::Algebra::g] = exp; \
testAlgebra[GparityFlavour::Algebra::Minus##g] = -exp;
DEFINE_TEST_G(SigmaX , testg[0]);
DEFINE_TEST_G(SigmaY , testg[1]);
DEFINE_TEST_G(SigmaZ , testg[2]);
DEFINE_TEST_G(Identity , 1.);
GparityFlavourMatrix pplus;
pplus = 1.0;
pplus = pplus + testg[1];
pplus = pplus * 0.5;
DEFINE_TEST_G(ProjPlus , pplus);
GparityFlavourMatrix pminus;
pminus = 1.0;
pminus = pminus - testg[1];
pminus = pminus * 0.5;
DEFINE_TEST_G(ProjMinus , pminus);
#undef DEFINE_TEST_G
}
template <typename Expr>
void test(const Expr &a, const Expr &b)
{
if (norm2(a - b) < tolerance)
{
std::cout << "[OK] ";
}
else
{
std::cout << "[fail]" << std::endl;
std::cout << GridLogError << "a= " << a << std::endl;
std::cout << GridLogError << "is different (tolerance= " << tolerance << ") from " << std::endl;
std::cout << GridLogError << "b= " << b << std::endl;
exit(EXIT_FAILURE);
}
}
void checkSigma(const GparityFlavour::Algebra a, GridSerialRNG &rng)
{
GparityFlavourVector v;
GparityFlavourMatrix m, &testg = testAlgebra[a];
GparityFlavour g(a);
random(rng, v);
random(rng, m);
std::cout << GridLogMessage << "Checking " << GparityFlavour::name[a] << ": ";
std::cout << "vecmul ";
test(g*v, testg*v);
std::cout << "matlmul ";
test(g*m, testg*m);
std::cout << "matrmul ";
test(m*g, m*testg);
std::cout << std::endl;
}
int main(int argc, char *argv[])
{
Grid_init(&argc,&argv);
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(4,vComplex::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridSerialRNG sRNG;
sRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
std::cout << GridLogMessage << "======== Test algebra" << std::endl;
createTestAlgebra();
std::cout << GridLogMessage << "======== Multiplication operators check" << std::endl;
for (int i = 0; i < GparityFlavour::nSigma; ++i)
{
checkSigma(i, sRNG);
}
std::cout << GridLogMessage << std::endl;
Grid_finalize();
return EXIT_SUCCESS;
}

View File

@ -52,7 +52,7 @@ int main (int argc, char ** argv)
// pRNG.SeedFixedIntegers(seeds);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
typedef typename GparityWilsonFermionR::FermionField FermionField;
typedef typename GparityWilsonFermionD::FermionField FermionField;
FermionField src (&Grid); random(pRNG,src);
FermionField phi (&Grid); random(pRNG,phi);
@ -80,10 +80,10 @@ int main (int argc, char ** argv)
RealD mass=0.1;
GparityWilsonFermionR::ImplParams params;
GparityWilsonFermionD::ImplParams params;
std::vector<int> twists(Nd,0); twists[1] = 1;
params.twists = twists;
GparityWilsonFermionR Dw(Umu,Grid,RBGrid,mass,params);
GparityWilsonFermionD Dw(Umu,Grid,RBGrid,mass,params);
FermionField src_e (&RBGrid);
FermionField src_o (&RBGrid);
@ -198,9 +198,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<GparityWilsonFermionR,FermionField> HermOpEO(Dw);
SchurDiagMooeeOperator<GparityWilsonFermionD,FermionField> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -9,6 +9,7 @@ Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Guido Cossu <guido.cossu@ed.ac.uk>
Author: Jamie Hudspith <renwick.james.hudspth@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -43,14 +44,18 @@ directory
using namespace std;
using namespace Grid;
<<<<<<< HEAD
=======
;
>>>>>>> develop
int main(int argc, char** argv) {
Grid_init(&argc, &argv);
std::vector<int> latt({4, 4, 4, 8});
GridCartesian* grid = SpaceTimeGrid::makeFourDimGrid(
latt, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
latt, GridDefaultSimd(Nd, vComplex::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(grid);
@ -61,10 +66,16 @@ int main(int argc, char** argv) {
<< std::endl;
SU2::printGenerators();
std::cout << "Dimension of adjoint representation: "<< SU2Adjoint::Dimension << std::endl;
// guard as this code fails to compile for Nc != 3
#if 1
std::cout << " Printing Adjoint Generators"<< std::endl;
SU2Adjoint::printGenerators();
SU2::testGenerators();
SU2Adjoint::testGenerators();
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "* Generators for SU(3)" << std::endl;
@ -112,8 +123,6 @@ int main(int argc, char** argv) {
// AdjointRepresentation has the predefined number of colours Nc
// Representations<FundamentalRepresentation, AdjointRepresentation, TwoIndexSymmetricRepresentation> RepresentationTypes(grid);
LatticeGaugeField U(grid), V(grid);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, U);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, V);
@ -128,17 +137,17 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeMatrix Vmu = peekLorentz(V,mu);
pokeLorentz(UV,Umu*Vmu, mu);
}
AdjRep.update_representation(UV);
typename AdjointRep<Nc>::LatticeField UVr = AdjRep.U; // (U_f * V_f)_r
AdjRep.update_representation(U);
typename AdjointRep<Nc>::LatticeField Ur = AdjRep.U; // U_r
AdjRep.update_representation(V);
typename AdjointRep<Nc>::LatticeField Vr = AdjRep.U; // V_r
typename AdjointRep<Nc>::LatticeField UrVr(grid);
UrVr = Zero();
for (int mu = 0; mu < Nd; mu++) {
@ -147,9 +156,32 @@ int main(int argc, char** argv) {
pokeLorentz(UrVr,Urmu*Vrmu, mu);
}
typedef typename SU_Adjoint<Nc>::AMatrix AdjointMatrix;
typename AdjointRep<Nc>::LatticeField Diff_check = UVr - UrVr;
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Adjoint representation) : " << norm2(Diff_check) << std::endl;
std::cout << GridLogMessage << "****************************************** " << std::endl;
std::cout << GridLogMessage << " MAP BETWEEN FUNDAMENTAL AND ADJOINT CHECK " << std::endl;
std::cout << GridLogMessage << "****************************************** " << std::endl;
for(int a=0;a<Nc*Nc-1;a++){
for(int b=0;b<Nc*Nc-1;b++){
for(int c=0;c<Nc*Nc-1;c++){
ColourMatrix Ta;
ColourMatrix Tb;
ColourMatrix Tc;
SU3::generator(a, Ta);
SU3::generator(b, Tb);
SU3::generator(c, Tc);
AdjointMatrix TRa;
SU3Adjoint::generator(a,TRa);
Complex tr1 = trace ( Tc * ( Ta*Tb-Tb*Ta)); // i/2 fabc
Complex tr2 = TRa()()(b,c) * Complex(0,1);
std::cout << " 2 Tr( Tc[Ta,Tb]) " << 2.0*tr1<<std::endl;
std::cout << " - TRa_bc " << tr2<<std::endl;
assert(abs( (2.0*tr1-tr2) ) < 1.0e-7);
std::cout << "------------------"<<std::endl;
}}}
// Check correspondence of algebra and group transformations
// Create a random vector
SU<Nc>::LatticeAlgebraVector h_adj(grid);
@ -157,32 +189,31 @@ int main(int argc, char** argv) {
random(gridRNG,h_adj);
h_adj = real(h_adj);
SU_Adjoint<Nc>::AdjointLieAlgebraMatrix(h_adj,Ar);
// Re-extract h_adj
SU<Nc>::LatticeAlgebraVector h_adj2(grid);
SU_Adjoint<Nc>::projectOnAlgebra(h_adj2, Ar);
SU<Nc>::LatticeAlgebraVector h_diff = h_adj - h_adj2;
std::cout << GridLogMessage << "Projections structure check vector difference (Adjoint representation) : " << norm2(h_diff) << std::endl;
// Exponentiate
typename AdjointRep<Nc>::LatticeMatrix Uadj(grid);
Uadj = expMat(Ar, 1.0, 16);
typename AdjointRep<Nc>::LatticeMatrix uno(grid);
uno = 1.0;
// Check matrix Uadj, must be real orthogonal
typename AdjointRep<Nc>::LatticeMatrix Ucheck = Uadj - conjugate(Uadj);
std::cout << GridLogMessage << "Reality check: " << norm2(Ucheck)
<< std::endl;
<< std::endl;
Ucheck = Uadj * adj(Uadj) - uno;
std::cout << GridLogMessage << "orthogonality check 1: " << norm2(Ucheck)
<< std::endl;
<< std::endl;
Ucheck = adj(Uadj) * Uadj - uno;
std::cout << GridLogMessage << "orthogonality check 2: " << norm2(Ucheck)
<< std::endl;
<< std::endl;
// Construct the fundamental matrix in the group
SU<Nc>::LatticeMatrix Af(grid);
SU<Nc>::FundamentalLieAlgebraMatrix(h_adj,Af);
@ -194,72 +225,65 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeMatrix UnitCheck(grid);
UnitCheck = Ufund * adj(Ufund) - uno_f;
std::cout << GridLogMessage << "unitarity check 1: " << norm2(UnitCheck)
<< std::endl;
<< std::endl;
UnitCheck = adj(Ufund) * Ufund - uno_f;
std::cout << GridLogMessage << "unitarity check 2: " << norm2(UnitCheck)
<< std::endl;
<< std::endl;
// Tranform to the adjoint representation
U = Zero(); // fill this with only one direction
pokeLorentz(U,Ufund,0); // the representation transf acts on full gauge fields
AdjRep.update_representation(U);
Ur = AdjRep.U; // U_r
typename AdjointRep<Nc>::LatticeMatrix Ur0 = peekLorentz(Ur,0); // this should be the same as Uadj
typename AdjointRep<Nc>::LatticeMatrix Diff_check_mat = Ur0 - Uadj;
std::cout << GridLogMessage << "Projections structure check group difference : " << norm2(Diff_check_mat) << std::endl;
// TwoIndexRep tests
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "* eS^{ij} base for SU(2)" << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "Dimension of Two Index Symmetric representation: "<< SU2TwoIndexSymm::Dimension << std::endl;
SU2TwoIndexSymm::printBase();
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "Generators of Two Index Symmetric representation: "<< SU2TwoIndexSymm::Dimension << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "Generators of Two Index Symmetric representation: "<< SU2TwoIndexSymm::Dimension << std::endl;
SU2TwoIndexSymm::printGenerators();
std::cout << GridLogMessage << "Test of Two Index Symmetric Generators: "<< SU2TwoIndexSymm::Dimension << std::endl;
std::cout << GridLogMessage << "Test of Two Index Symmetric Generators: "<< SU2TwoIndexSymm::Dimension << std::endl;
SU2TwoIndexSymm::testGenerators();
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "* eAS^{ij} base for SU(2)" << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "Dimension of Two Index anti-Symmetric representation: "<< SU2TwoIndexAntiSymm::Dimension << std::endl;
SU2TwoIndexAntiSymm::printBase();
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "Dimension of Two Index anti-Symmetric representation: "<< SU2TwoIndexAntiSymm::Dimension << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "Dimension of Two Index anti-Symmetric representation: "<< SU2TwoIndexAntiSymm::Dimension << std::endl;
SU2TwoIndexAntiSymm::printGenerators();
std::cout << GridLogMessage << "Test of Two Index anti-Symmetric Generators: "<< SU2TwoIndexAntiSymm::Dimension << std::endl;
SU2TwoIndexAntiSymm::testGenerators();
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "Test for the Two Index Symmetric projectors"
<< std::endl;
<< std::endl;
// Projectors
SU_TwoIndex<Nc, Symmetric>::LatticeTwoIndexMatrix Gauss2(grid);
random(gridRNG,Gauss2);
@ -277,13 +301,13 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeAlgebraVector diff2 = ha - hb;
std::cout << GridLogMessage << "Difference: " << norm2(diff) << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "Test for the Two index anti-Symmetric projectors"
<< std::endl;
<< std::endl;
// Projectors
SU_TwoIndex<Nc, AntiSymmetric>::LatticeTwoIndexMatrix Gauss2a(grid);
random(gridRNG,Gauss2a);
@ -301,20 +325,21 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeAlgebraVector diff2a = ha - hb;
std::cout << GridLogMessage << "Difference: " << norm2(diff2a) << std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<< std::endl;
std::cout << GridLogMessage << "Two index Symmetric: Checking Group Structure"
<< std::endl;
<< std::endl;
// Testing HMC representation classes
TwoIndexRep< Nc, Symmetric> TIndexRep(grid);
// Test group structure
// (U_f * V_f)_r = U_r * V_r
LatticeGaugeField U2(grid), V2(grid);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, U2);
SU<Nc>::HotConfiguration<LatticeGaugeField>(gridRNG, V2);
LatticeGaugeField UV2(grid);
UV2 = Zero();
for (int mu = 0; mu < Nd; mu++) {
@ -322,28 +347,31 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeMatrix Vmu2 = peekLorentz(V2,mu);
pokeLorentz(UV2,Umu2*Vmu2, mu);
}
TIndexRep.update_representation(UV2);
typename TwoIndexRep< Nc, Symmetric>::LatticeField UVr2 = TIndexRep.U; // (U_f * V_f)_r
typename TwoIndexRep< Nc, Symmetric >::LatticeField UVr2 = TIndexRep.U; // (U_f * V_f)_r
TIndexRep.update_representation(U2);
typename TwoIndexRep< Nc, Symmetric>::LatticeField Ur2 = TIndexRep.U; // U_r
typename TwoIndexRep< Nc, Symmetric >::LatticeField Ur2 = TIndexRep.U; // U_r
TIndexRep.update_representation(V2);
typename TwoIndexRep< Nc, Symmetric>::LatticeField Vr2 = TIndexRep.U; // V_r
typename TwoIndexRep< Nc, Symmetric>::LatticeField Ur2Vr2(grid);
typename TwoIndexRep< Nc, Symmetric >::LatticeField Vr2 = TIndexRep.U; // V_r
typename TwoIndexRep< Nc, Symmetric >::LatticeField Ur2Vr2(grid);
Ur2Vr2 = Zero();
for (int mu = 0; mu < Nd; mu++) {
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Urmu2 = peekLorentz(Ur2,mu);
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Vrmu2 = peekLorentz(Vr2,mu);
pokeLorentz(Ur2Vr2,Urmu2*Vrmu2, mu);
}
typename TwoIndexRep< Nc, Symmetric>::LatticeField Diff_check2 = UVr2 - Ur2Vr2;
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Two Index Symmetric): " << norm2(Diff_check2) << std::endl;
typename TwoIndexRep< Nc, Symmetric >::LatticeField Diff_check2 = UVr2 - Ur2Vr2;
std::cout << GridLogMessage << "Group structure SU("<<Nc<<") check difference (Two Index Symmetric): " << norm2(Diff_check2) << std::endl;
// Check correspondence of algebra and group transformations
// Create a random vector
SU<Nc>::LatticeAlgebraVector h_sym(grid);
@ -351,34 +379,31 @@ int main(int argc, char** argv) {
random(gridRNG,h_sym);
h_sym = real(h_sym);
SU_TwoIndex<Nc,Symmetric>::TwoIndexLieAlgebraMatrix(h_sym,Ar_sym);
// Re-extract h_sym
SU<Nc>::LatticeAlgebraVector h_sym2(grid);
SU_TwoIndex< Nc, Symmetric>::projectOnAlgebra(h_sym2, Ar_sym);
SU<Nc>::LatticeAlgebraVector h_diff_sym = h_sym - h_sym2;
std::cout << GridLogMessage << "Projections structure check vector difference (Two Index Symmetric): " << norm2(h_diff_sym) << std::endl;
// Exponentiate
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix U2iS(grid);
U2iS = expMat(Ar_sym, 1.0, 16);
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix uno2iS(grid);
uno2iS = 1.0;
// Check matrix U2iS, must be real orthogonal
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Ucheck2iS = U2iS - conjugate(U2iS);
std::cout << GridLogMessage << "Reality check: " << norm2(Ucheck2iS)
<< std::endl;
<< std::endl;
Ucheck2iS = U2iS * adj(U2iS) - uno2iS;
std::cout << GridLogMessage << "orthogonality check 1: " << norm2(Ucheck2iS)
<< std::endl;
<< std::endl;
Ucheck2iS = adj(U2iS) * U2iS - uno2iS;
std::cout << GridLogMessage << "orthogonality check 2: " << norm2(Ucheck2iS)
<< std::endl;
<< std::endl;
// Construct the fundamental matrix in the group
SU<Nc>::LatticeMatrix Af_sym(grid);
SU<Nc>::FundamentalLieAlgebraMatrix(h_sym,Af_sym);
@ -387,32 +412,30 @@ int main(int argc, char** argv) {
SU<Nc>::LatticeMatrix UnitCheck2(grid);
UnitCheck2 = Ufund2 * adj(Ufund2) - uno_f;
std::cout << GridLogMessage << "unitarity check 1: " << norm2(UnitCheck2)
<< std::endl;
<< std::endl;
UnitCheck2 = adj(Ufund2) * Ufund2 - uno_f;
std::cout << GridLogMessage << "unitarity check 2: " << norm2(UnitCheck2)
<< std::endl;
<< std::endl;
// Tranform to the 2Index Sym representation
U = Zero(); // fill this with only one direction
pokeLorentz(U,Ufund2,0); // the representation transf acts on full gauge fields
TIndexRep.update_representation(U);
Ur2 = TIndexRep.U; // U_r
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Ur02 = peekLorentz(Ur2,0); // this should be the same as U2iS
typename TwoIndexRep< Nc, Symmetric>::LatticeMatrix Diff_check_mat2 = Ur02 - U2iS;
std::cout << GridLogMessage << "Projections structure check group difference (Two Index Symmetric): " << norm2(Diff_check_mat2) << std::endl;
if (TwoIndexRep<Nc, AntiSymmetric >::Dimension != 1){
if (TwoIndexRep<Nc, AntiSymmetric>::Dimension != 1){
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
std::cout << GridLogMessage << "*********************************************"
<< std::endl;
<<<<<<< HEAD
std::cout << GridLogMessage << "Two Index anti-Symmetric: Check Group Structure"
<< std::endl;
// Testing HMC representation classes
@ -519,14 +542,6 @@ int main(int argc, char** argv) {
"because representation is trivial (dim = 1)"
<< std::endl;
}
Grid_finalize();
}

View File

@ -364,14 +364,12 @@ int main(int argc, char **argv) {
{ // Peek-ology and Poke-ology, with a little app-ology
Complex c;
ColourMatrix c_m;
SpinMatrix s_m;
SpinColourMatrix sc_m;
ColourMatrix c_m = Zero();
SpinMatrix s_m = Zero();
SpinColourMatrix sc_m = Zero();
s_m = TensorIndexRecursion<ColourIndex>::traceIndex(
sc_m); // Map to traceColour
c_m = TensorIndexRecursion<SpinIndex>::traceIndex(
sc_m); // map to traceSpin
s_m = TensorIndexRecursion<ColourIndex>::traceIndex(sc_m); // Map to traceColour
c_m = TensorIndexRecursion<SpinIndex>::traceIndex(sc_m); // map to traceSpin
c = TensorIndexRecursion<SpinIndex>::traceIndex(s_m);
c = TensorIndexRecursion<ColourIndex>::traceIndex(c_m);

View File

@ -0,0 +1,110 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_memory_manager.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
void MemoryTest(GridCartesian * FGrid,int N);
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
int N=100;
for(int i=0;i<N;i++){
std::cout << "============================"<<std::endl;
std::cout << "Epoch "<<i<<"/"<<N<<std::endl;
std::cout << "============================"<<std::endl;
MemoryTest(UGrid,256);
MemoryManager::Print();
AUDIT();
}
Grid_finalize();
}
void MemoryTest(GridCartesian * FGrid, int N)
{
LatticeComplexD zero(FGrid); zero=Zero();
std::vector<LatticeComplexD> A(N,zero);//FGrid);
std::vector<ComplexD> B(N,ComplexD(0.0)); // Update sequentially on host
for(int v=0;v<N;v++) A[v] = Zero();
uint64_t counter = 0;
for(int epoch = 0;epoch<10000;epoch++){
int v = random() %N; // Which vec
int w = random() %2; // Write or read
int e = random() %3; // expression or for loop
int dev= random() %2; // On device?
// int e=1;
ComplexD zc = counter++;
if ( w ) {
B[v] = B[v] + zc;
if ( e == 0 ) {
A[v] = A[v] + zc - A[v] + A[v];
} else {
if ( dev ) {
autoView(A_v,A[v],AcceleratorWrite);
accelerator_for(ss,FGrid->oSites(),1,{
A_v[ss] = A_v[ss] + zc;
});
} else {
autoView(A_v,A[v],CpuWrite);
thread_for(ss,FGrid->oSites(),{
A_v[ss] = A_v[ss] + zc;
});
}
}
} else {
if ( e == 0 ) {
A[v] = A[v] + A[v] - A[v];
} else {
if ( dev ) {
autoView(A_v,A[v],AcceleratorRead);
accelerator_for(ss,FGrid->oSites(),1,{
assert(B[v]==A_v[ss]()()().getlane(0));
});
// std::cout << "["<<v<<"] checked on GPU"<<B[v]<<std::endl;
} else {
autoView(A_v,A[v],CpuRead);
thread_for(ss,FGrid->oSites(),{
assert(B[v]==A_v[ss]()()().getlane(0));
});
// std::cout << "["<<v<<"] checked on CPU"<<B[v]<<std::endl;
}
}
}
}
}

View File

@ -92,7 +92,7 @@ int main (int argc, char ** argv)
RealD shift = 0.1234;
RealD M5 = 1.8;
int pm = 1;
MobiusEOFAFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5, b, c);
MobiusEOFAFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mq1, mq2, mq3, shift, pm, M5, b, c);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -217,9 +217,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd , chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd , phi_o, phi);
RealD t1,t2;
SchurDiagMooeeOperator<MobiusEOFAFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<MobiusEOFAFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e, dchi_e);
HermOpEO.MpcDagMpc(chi_o, dchi_o);

View File

@ -108,8 +108,8 @@ int main (int argc, char ** argv)
omegas.push_back( std::complex<double>(0.0686324988446592,-0.0550658530827402) );
#endif
MobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, 0.5,0.5);
// DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
MobiusFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, 0.5,0.5);
// DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -262,10 +262,9 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<MobiusFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<MobiusFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -0,0 +1,124 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/core/Test_prec_change.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
int Ls = 12;
Coordinate latt4 = GridDefaultLatt();
GridCartesian * UGridD = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGridD = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridD);
GridCartesian * FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridD);
GridRedBlackCartesian * FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridD);
GridCartesian * UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian * FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
GridRedBlackCartesian * FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
GridParallelRNG RNG5(FGridD); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG5F(FGridF); RNG5F.SeedFixedIntegers(seeds5);
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
LatticeFermionD field_d(FGridD), tmp_d(FGridD);
random(RNG5,field_d);
RealD norm2_field_d = norm2(field_d);
LatticeFermionD2 field_d2(FGridF), tmp_d2(FGridF);
random(RNG5F,field_d2);
RealD norm2_field_d2 = norm2(field_d2);
LatticeFermionF field_f(FGridF);
//Test original implementation
{
std::cout << GridLogMessage << "Testing original implementation" << std::endl;
field_f = Zero();
precisionChangeOrig(field_f,field_d);
RealD Ndiff = (norm2_field_d - norm2(field_f))/norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of single and double prec fields differs by " << Ndiff << std::endl;
tmp_d = Zero();
precisionChangeOrig(tmp_d, field_f);
Ndiff = norm2( LatticeFermionD(tmp_d-field_d) ) / norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of back-converted and original double prec fields differs by " << Ndiff << std::endl;
}
//Test new implementation with pregenerated workspace
{
std::cout << GridLogMessage << "Testing new implementation with pregenerated workspace" << std::endl;
precisionChangeWorkspace wk_sp_to_dp(field_d.Grid(),field_f.Grid());
precisionChangeWorkspace wk_dp_to_sp(field_f.Grid(),field_d.Grid());
field_f = Zero();
precisionChange(field_f,field_d,wk_dp_to_sp);
RealD Ndiff = (norm2_field_d - norm2(field_f))/norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of single and double prec fields differs by " << Ndiff << std::endl;
tmp_d = Zero();
precisionChange(tmp_d, field_f,wk_sp_to_dp);
Ndiff = norm2( LatticeFermionD(tmp_d-field_d) ) / norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of back-converted and original double prec fields differs by " << Ndiff << std::endl;
}
//Test new implementation without pregenerated workspace
{
std::cout << GridLogMessage << "Testing new implementation without pregenerated workspace" << std::endl;
field_f = Zero();
precisionChange(field_f,field_d);
RealD Ndiff = (norm2_field_d - norm2(field_f))/norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of single and double prec fields differs by " << Ndiff << std::endl;
tmp_d = Zero();
precisionChange(tmp_d, field_f);
Ndiff = norm2( LatticeFermionD(tmp_d-field_d) ) / norm2_field_d;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of back-converted and original double prec fields differs by " << Ndiff << std::endl;
}
//Test fast implementation
{
std::cout << GridLogMessage << "Testing fast (double2) implementation" << std::endl;
field_f = Zero();
precisionChangeFast(field_f,field_d2);
RealD Ndiff = (norm2_field_d2 - norm2(field_f))/norm2_field_d2;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of single and double prec fields differs by " << Ndiff << std::endl;
tmp_d2 = Zero();
precisionChangeFast(tmp_d2, field_f);
Ndiff = norm2( LatticeFermionD2(tmp_d2-field_d2) ) / norm2_field_d2;
std::cout << GridLogMessage << (fabs(Ndiff) > 1e-05 ? "!!FAIL" : "Pass") << ": relative norm2 of back-converted and original double prec fields differs by " << Ndiff << std::endl;
}
std::cout << "Done" << std::endl;
Grid_finalize();
}

View File

@ -132,6 +132,7 @@ int main (int argc, char ** argv)
tmp = U*adj(U) - ident;
std::cout << "Unitarity check after projection " << norm2(tmp)<<std::endl;
#endif
ProjectSUn(UU);
detUU= Determinant(UU);
detUU= detUU -1.0;

View File

@ -53,9 +53,9 @@ int main (int argc, char ** argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename ImprovedStaggeredFermionR::FermionField FermionField;
typedef typename ImprovedStaggeredFermionR::ComplexField ComplexField;
typename ImprovedStaggeredFermionR::ImplParams params;
typedef typename ImprovedStaggeredFermionD::FermionField FermionField;
typedef typename ImprovedStaggeredFermionD::ComplexField ComplexField;
typename ImprovedStaggeredFermionD::ImplParams params;
FermionField src (&Grid); random(pRNG,src);
FermionField result(&Grid); result=Zero();
@ -130,7 +130,7 @@ int main (int argc, char ** argv)
// ref = ref + mass * src;
}
ImprovedStaggeredFermionR Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
ImprovedStaggeredFermionD Ds(Umu,Umu,Grid,RBGrid,mass,c1,c2,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
@ -144,7 +144,7 @@ int main (int argc, char ** argv)
Ds.Dhop(src,result,0);
}
double t1=usecond();
double t2;
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
@ -269,7 +269,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<ImprovedStaggeredFermionR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<ImprovedStaggeredFermionD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -60,9 +60,9 @@ int main (int argc, char ** argv)
pRNG4.SeedFixedIntegers(seeds);
pRNG5.SeedFixedIntegers(seeds);
typedef typename ImprovedStaggeredFermion5DR::FermionField FermionField;
typedef typename ImprovedStaggeredFermion5DR::ComplexField ComplexField;
typename ImprovedStaggeredFermion5DR::ImplParams params;
typedef typename ImprovedStaggeredFermion5DD::FermionField FermionField;
typedef typename ImprovedStaggeredFermion5DD::ComplexField ComplexField;
typename ImprovedStaggeredFermion5DD::ImplParams params;
FermionField src (FGrid);
@ -148,7 +148,7 @@ int main (int argc, char ** argv)
}
}
ImprovedStaggeredFermion5DR Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params);
ImprovedStaggeredFermion5DD Ds(Umu,Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,c1,c2,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
std::cout<<GridLogMessage<<"= Testing Dhop against cshift implementation "<<std::endl;
@ -162,7 +162,6 @@ int main (int argc, char ** argv)
}
double t1=usecond();
double t2;
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
@ -289,7 +288,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<ImprovedStaggeredFermion5DR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<ImprovedStaggeredFermion5DD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -30,7 +30,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
using namespace std;
using namespace Grid;
;
int main (int argc, char ** argv)
{
@ -53,9 +52,9 @@ int main (int argc, char ** argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename NaiveStaggeredFermionR::FermionField FermionField;
typedef typename NaiveStaggeredFermionR::ComplexField ComplexField;
typename NaiveStaggeredFermionR::ImplParams params;
typedef typename NaiveStaggeredFermionD::FermionField FermionField;
typedef typename NaiveStaggeredFermionD::ComplexField ComplexField;
typename NaiveStaggeredFermionD::ImplParams params;
FermionField src (&Grid); random(pRNG,src);
FermionField result(&Grid); result=Zero();
@ -121,7 +120,7 @@ int main (int argc, char ** argv)
// ref = ref + mass * src;
}
NaiveStaggeredFermionR Ds(Umu,Grid,RBGrid,mass,c1,u0,params);
NaiveStaggeredFermionD Ds(Umu,Grid,RBGrid,mass,c1,u0,params);
std::cout<<GridLogMessage<<"=========================================================="<<std::endl;
@ -135,7 +134,6 @@ int main (int argc, char ** argv)
Ds.Dhop(src,result,0);
}
double t1=usecond();
double t2;
double flops=(16*(3*(6+8+8)) + 15*3*2)*volume*ncall; // == 66*16 + == 1146
std::cout<<GridLogMessage << "Called Ds"<<std::endl;
@ -260,7 +258,7 @@ int main (int argc, char ** argv)
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
SchurDiagMooeeOperator<NaiveStaggeredFermionR,FermionField> HermOpEO(Ds);
SchurDiagMooeeOperator<NaiveStaggeredFermionD,FermionField> HermOpEO(Ds);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -2,11 +2,12 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: ./benchmarks/Benchmark_wilson.cc
Source file: ./tests/core/Test_wilson_clover.cc
Copyright (C) 2015
Author: Guido Cossu <guido.cossu@ed.ac.uk>
Fabian Joswig <fabian.joswig@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -51,8 +52,8 @@ int main(int argc, char **argv)
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename WilsonCloverFermionR::FermionField FermionField;
typename WilsonCloverFermionR::ImplParams params;
typedef typename WilsonCloverFermionD::FermionField FermionField;
typename WilsonCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
FermionField src(&Grid);
@ -67,8 +68,6 @@ int main(int argc, char **argv)
tmp = Zero();
FermionField err(&Grid);
err = Zero();
FermionField err2(&Grid);
err2 = Zero();
FermionField phi(&Grid);
random(pRNG, phi);
FermionField chi(&Grid);
@ -77,6 +76,8 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(pRNG, Umu);
std::vector<LatticeColourMatrix> U(4, &Grid);
double tolerance = 1e-4;
double volume = 1;
for (int mu = 0; mu < Nd; mu++)
{
@ -87,8 +88,8 @@ int main(int argc, char **argv)
RealD csw_r = 1.0;
RealD csw_t = 1.0;
WilsonCloverFermionR Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
//Dwc.ImportGauge(Umu); // not necessary, included in the constructor
WilsonCloverFermionD Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionD Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing that Deo + Doe = Dunprec " << std::endl;
@ -112,7 +113,24 @@ int main(int argc, char **argv)
setCheckerboard(r_eo, r_e);
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff " << norm2(err) << " " << norm2(ref) << " " << norm2(r_eo) << std::endl;
std::cout << GridLogMessage << "EO norm diff\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.Meooe(src_e, r_o);
std::cout << GridLogMessage << "Applied Meo" << std::endl;
Dwc_compact.Meooe(src_o, r_e);
std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dwc_compact.Dhop(src, ref, DaggerNo);
setCheckerboard(r_eo, r_o);
setCheckerboard(r_eo, r_e);
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff compact\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test Ddagger is the dagger of D by requiring " << std::endl;
@ -152,6 +170,22 @@ int main(int argc, char **argv)
std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDce - conj(cDpo) << std::endl;
std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDco - conj(cDpe) << std::endl;
Dwc_compact.Meooe(chi_e, dchi_o);
Dwc_compact.Meooe(chi_o, dchi_e);
Dwc_compact.MeooeDag(phi_e, dphi_o);
Dwc_compact.MeooeDag(phi_o, dphi_e);
pDce = innerProduct(phi_e, dchi_e);
pDco = innerProduct(phi_o, dchi_o);
cDpe = innerProduct(chi_e, dphi_e);
cDpo = innerProduct(chi_o, dphi_o);
std::cout << GridLogMessage << "e compact " << pDce << " " << cDpe << std::endl;
std::cout << GridLogMessage << "o compact " << pDco << " " << cDpo << std::endl;
std::cout << GridLogMessage << "pDce - conj(cDpo) compact " << pDce - conj(cDpo) << std::endl;
std::cout << GridLogMessage << "pDco - conj(cDpe) compact " << pDco - conj(cDpe) << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv Mee = 1 (if csw!=0) " << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
@ -169,7 +203,21 @@ int main(int argc, char **argv)
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.Mooee(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
Dwc_compact.Mooee(chi_o, src_o);
Dwc_compact.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeDag MeeInvDag = 1 (if csw!=0) " << std::endl;
@ -188,7 +236,21 @@ int main(int argc, char **argv)
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInvDag(src_e, phi_e);
Dwc_compact.MooeeDag(chi_o, src_o);
Dwc_compact.MooeeInvDag(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv MeeDag = 1 (if csw!=0) " << std::endl;
@ -207,7 +269,21 @@ int main(int argc, char **argv)
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
Dwc_compact.MooeeDag(chi_o, src_o);
Dwc_compact.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term with EO preconditioning " << std::endl;
@ -248,8 +324,8 @@ int main(int argc, char **argv)
}
/////////////////
WilsonCloverFermionR Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
Dwc_prime.ImportGauge(U_prime);
WilsonCloverFermionD Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonCloverFermionD Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
@ -262,7 +338,37 @@ int main(int argc, char **argv)
setCheckerboard(phi, phi_o);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
tmp = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
Dwc_compact.Mooee(src_e, chi_e);
Dwc_compact.Mooee(src_o, chi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
pickCheckerboard(Odd, src_o, tmp);
Dwc_compact_prime.Mooee(src_e, phi_e);
Dwc_compact_prime.Mooee(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "=================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term w/o EO preconditioning " << std::endl;
@ -271,22 +377,32 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
WilsonFermionR Dw(Umu, Grid, RBGrid, mass, params);
Dw.ImportGauge(Umu);
WilsonFermionD Dw(Umu, Grid, RBGrid, mass, params);
Dw.M(src, result);
Dwc.M(src, chi);
Dwc_prime.M(Omega * src, phi);
WilsonFermionR Dw_prime(U_prime, Grid, RBGrid, mass, params);
Dw_prime.ImportGauge(U_prime);
WilsonFermionD Dw_prime(U_prime, Grid, RBGrid, mass, params);
Dw_prime.M(Omega * src, result2);
err = result - adj(Omega) * result2;
std::cout << GridLogMessage << "norm diff Wilson " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
err = chi - adj(Omega) * phi;
err2 = result - adj(Omega) * result2;
std::cout << GridLogMessage << "norm diff Wilson " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff WilsonClover " << norm2(err2) << std::endl;
std::cout << GridLogMessage << "norm diff WilsonClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
Dwc_compact.M(src, chi);
Dwc_compact_prime.M(Omega * src, phi);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff CompactWilsonClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing Mooee(csw=0) Clover to reproduce Mooee Wilson " << std::endl;
@ -295,8 +411,7 @@ int main(int argc, char **argv)
chi = Zero();
phi = Zero();
err = Zero();
WilsonCloverFermionR Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
Dwc_csw0.ImportGauge(Umu);
WilsonCloverFermionD Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
@ -316,7 +431,34 @@ int main(int argc, char **argv)
setCheckerboard(src, src_o);
err = chi - phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
err = Zero();
CompactWilsonCloverFermionD Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dw.Mooee(src_e, chi_e);
Dw.Mooee(src_o, chi_o);
Dwc_compact_csw0.Mooee(src_e, phi_e);
Dwc_compact_csw0.Mooee(src_o, phi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
err = chi - phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing EO operator is equal to the unprec " << std::endl;
@ -348,9 +490,41 @@ int main(int argc, char **argv)
setCheckerboard(phi, phi_o);
err = ref - phi;
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff :" << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff :" << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff :" << norm2(err) << std::endl;
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
err = Zero();
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
// M phi = (Mooee src_e + Meooe src_o , Meooe src_e + Mooee src_o)
Dwc_compact.M(src, ref); // Reference result from the unpreconditioned operator
// EO matrix
Dwc_compact.Mooee(src_e, chi_e);
Dwc_compact.Mooee(src_o, chi_o);
Dwc_compact.Meooe(src_o, phi_e);
Dwc_compact.Meooe(src_e, phi_o);
phi_o += chi_o;
phi_e += chi_e;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = ref - phi;
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff compact : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff compact : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff compact : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Grid_finalize();
}

View File

@ -0,0 +1,253 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_cayley_cg.cc
Copyright (C) 2022
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Fabian Joswig <fabian.joswig@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
template<class What>
void TestConserved(What & Dw,
LatticeGaugeField &Umu,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
GridParallelRNG *RNG4);
Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::Gamma5
};
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG4(UGrid);
std::vector<int> seeds4({1,2,3,4}); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
if( argc > 1 && argv[1][0] != '-' )
{
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
FieldMetaData header;
NerscIO::readConfiguration(Umu, header, argv[1]);
}
else
{
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
SU<Nc>::HotConfiguration(RNG4,Umu);
}
typename WilsonCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
RealD mass = 0.1;
RealD csw_r = 1.0;
RealD csw_t = 1.0;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonFermionD Dw(Umu,*UGrid,*UrbGrid,mass,params);
TestConserved<WilsonFermionD>(Dw,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonCloverFermionD Dwc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonCloverFermionD>(Dwc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"CompactWilsonCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
CompactWilsonCloverFermionD Dwcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonCloverFermionD>(Dwcc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"WilsonExpCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
WilsonExpCloverFermionD Dewc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, anis, params);
TestConserved<WilsonExpCloverFermionD>(Dewc,Umu,UGrid,UrbGrid,&RNG4);
std::cout<<GridLogMessage <<"=================================="<<std::endl;
std::cout<<GridLogMessage <<"CompactWilsonExpCloverFermion test"<<std::endl;
std::cout<<GridLogMessage <<"=================================="<<std::endl;
CompactWilsonExpCloverFermionD Dewcc(Umu, *UGrid, *UrbGrid, mass, csw_r, csw_t, 1.0, anis, params);
TestConserved<CompactWilsonExpCloverFermionD>(Dewcc,Umu,UGrid,UrbGrid,&RNG4);
Grid_finalize();
}
template<class Action>
void TestConserved(Action & Dw,
LatticeGaugeField &Umu,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
GridParallelRNG *RNG4)
{
LatticePropagator phys_src(UGrid);
LatticePropagator seqsrc(UGrid);
LatticePropagator prop4(UGrid);
LatticePropagator Vector_mu(UGrid);
LatticeComplex SV (UGrid);
LatticeComplex VV (UGrid);
LatticePropagator seqprop(UGrid);
SpinColourMatrix kronecker; kronecker=1.0;
Coordinate coor({0,0,0,0});
phys_src=Zero();
pokeSite(kronecker,phys_src,coor);
ConjugateGradient<LatticeFermion> CG(1.0e-16,100000);
SchurRedBlackDiagTwoSolve<LatticeFermion> schur(CG);
ZeroGuesser<LatticeFermion> zpg;
for(int s=0;s<Nd;s++){
for(int c=0;c<Nc;c++){
LatticeFermion src4 (UGrid);
PropToFerm<Action>(src4,phys_src,s,c);
LatticeFermion result4(UGrid); result4=Zero();
schur(Dw,src4,result4,zpg);
std::cout<<GridLogMessage<<"spin "<<s<<" color "<<c<<" norm2(sourc4d) "<<norm2(src4)
<<" norm2(result4d) "<<norm2(result4)<<std::endl;
FermToProp<Action>(prop4,result4,s,c);
}
}
auto curr = Current::Vector;
const int mu_J=0;
const int t_J=0;
LatticeComplex ph (UGrid); ph=1.0;
Dw.SeqConservedCurrent(prop4,
seqsrc,
phys_src,
curr,
mu_J,
t_J,
t_J,// whole lattice
ph);
for(int s=0;s<Nd;s++){
for(int c=0;c<Nc;c++){
LatticeFermion src4 (UGrid);
PropToFerm<Action>(src4,seqsrc,s,c);
LatticeFermion result4(UGrid); result4=Zero();
schur(Dw,src4,result4,zpg);
FermToProp<Action>(seqprop,result4,s,c);
}
}
Gamma g5(Gamma::Algebra::Gamma5);
Gamma gT(Gamma::Algebra::GammaT);
std::vector<TComplex> sumSV;
std::vector<TComplex> sumVV;
Dw.ContractConservedCurrent(prop4,prop4,Vector_mu,phys_src,Current::Vector,Tdir);
SV = trace(Vector_mu); // Scalar-Vector conserved current
VV = trace(gT*Vector_mu); // (local) Vector-Vector conserved current
// Spatial sum
sliceSum(SV,sumSV,Tdir);
sliceSum(VV,sumVV,Tdir);
const int Nt{static_cast<int>(sumSV.size())};
std::cout<<GridLogMessage<<"Vector Ward identity by timeslice (~ 0)"<<std::endl;
for(int t=0;t<Nt;t++){
std::cout<<GridLogMessage <<" t "<<t<<" SV "<<real(TensorRemove(sumSV[t]))<<" VV "<<real(TensorRemove(sumVV[t]))<<std::endl;
assert(abs(real(TensorRemove(sumSV[t]))) < 1e-10);
assert(abs(real(TensorRemove(sumVV[t]))) < 1e-2);
}
///////////////////////////////
// 3pt vs 2pt check
///////////////////////////////
{
Gamma::Algebra gA = Gamma::Algebra::Identity;
Gamma g(gA);
LatticePropagator cur(UGrid);
LatticePropagator tmp(UGrid);
LatticeComplex c(UGrid);
SpinColourMatrix qSite;
peekSite(qSite, seqprop, coor);
Complex test_S, test_V, check_S, check_V;
std::vector<TComplex> check_buf;
test_S = trace(qSite*g);
test_V = trace(qSite*g*Gamma::gmu[mu_J]);
Dw.ContractConservedCurrent(prop4,prop4,cur,phys_src,curr,mu_J);
c = trace(cur*g);
sliceSum(c, check_buf, Tp);
check_S = TensorRemove(check_buf[t_J]);
auto gmu=Gamma::gmu[mu_J];
c = trace(cur*g*gmu);
sliceSum(c, check_buf, Tp);
check_V = TensorRemove(check_buf[t_J]);
std::cout<<GridLogMessage << std::setprecision(14)<<"Test S = " << abs(test_S) << std::endl;
std::cout<<GridLogMessage << "Test V = " << abs(test_V) << std::endl;
std::cout<<GridLogMessage << "Check S = " << abs(check_S) << std::endl;
std::cout<<GridLogMessage << "Check V = " << abs(check_V) << std::endl;
// Check difference = 0
check_S = check_S - test_S;
check_V = check_V - test_V;
std::cout<<GridLogMessage << "Consistency check for sequential conserved " <<std::endl;
std::cout<<GridLogMessage << "Diff S = " << abs(check_S) << std::endl;
assert(abs(check_S) < 1e-8);
std::cout<<GridLogMessage << "Diff V = " << abs(check_V) << std::endl;
assert(abs(check_V) < 1e-8);
}
}

View File

@ -89,7 +89,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
WilsonFermionR Dw(Umu,Grid,RBGrid,mass);
WilsonFermionD Dw(Umu,Grid,RBGrid,mass);
LatticeFermion src_e (&RBGrid);
LatticeFermion src_o (&RBGrid);
@ -204,9 +204,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<WilsonFermionR,LatticeFermion> HermOpEO(Dw);
SchurDiagMooeeOperator<WilsonFermionD,LatticeFermion> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -0,0 +1,530 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/core/Test_wilson_exp_clover.cc
Copyright (C) 2022
Author: Guido Cossu <guido.cossu@ed.ac.uk>
Fabian Joswig <fabian.joswig@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
int main(int argc, char **argv)
{
Grid_init(&argc, &argv);
auto latt_size = GridDefaultLatt();
auto simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
auto mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size, simd_layout, mpi_layout);
GridRedBlackCartesian RBGrid(&Grid);
int threads = GridThread::GetThreads();
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
std::cout << GridLogMessage << "Grid floating point word size is REALF" << sizeof(RealF) << std::endl;
std::cout << GridLogMessage << "Grid floating point word size is REALD" << sizeof(RealD) << std::endl;
std::cout << GridLogMessage << "Grid floating point word size is REAL" << sizeof(Real) << std::endl;
std::vector<int> seeds({1, 2, 3, 4});
GridParallelRNG pRNG(&Grid);
pRNG.SeedFixedIntegers(seeds);
// pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9});
typedef typename WilsonExpCloverFermionD::FermionField FermionField;
typename WilsonExpCloverFermionD::ImplParams params;
WilsonAnisotropyCoefficients anis;
FermionField src(&Grid);
random(pRNG, src);
FermionField result(&Grid);
result = Zero();
FermionField result2(&Grid);
result2 = Zero();
FermionField ref(&Grid);
ref = Zero();
FermionField tmp(&Grid);
tmp = Zero();
FermionField err(&Grid);
err = Zero();
FermionField phi(&Grid);
random(pRNG, phi);
FermionField chi(&Grid);
random(pRNG, chi);
LatticeGaugeField Umu(&Grid);
SU<Nc>::HotConfiguration(pRNG, Umu);
std::vector<LatticeColourMatrix> U(4, &Grid);
double tolerance = 1e-4;
double volume = 1;
for (int mu = 0; mu < Nd; mu++)
{
volume = volume * latt_size[mu];
}
RealD mass = 0.1;
RealD csw_r = 1.0;
RealD csw_t = 1.0;
WilsonExpCloverFermionD Dwc(Umu, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionD Dwc_compact(Umu, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing that Deo + Doe = Dunprec " << std::endl;
std::cout << GridLogMessage << "==========================================================" << std::endl;
FermionField src_e(&RBGrid);
FermionField src_o(&RBGrid);
FermionField r_e(&RBGrid);
FermionField r_o(&RBGrid);
FermionField r_eo(&Grid);
pickCheckerboard(Even, src_e, src);
pickCheckerboard(Odd, src_o, src);
Dwc.Meooe(src_e, r_o);
std::cout << GridLogMessage << "Applied Meo" << std::endl;
Dwc.Meooe(src_o, r_e);
std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dwc.Dhop(src, ref, DaggerNo);
setCheckerboard(r_eo, r_o);
setCheckerboard(r_eo, r_e);
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.Meooe(src_e, r_o);
std::cout << GridLogMessage << "Applied Meo" << std::endl;
Dwc_compact.Meooe(src_o, r_e);
std::cout << GridLogMessage << "Applied Moe" << std::endl;
Dwc_compact.Dhop(src, ref, DaggerNo);
setCheckerboard(r_eo, r_o);
setCheckerboard(r_eo, r_e);
err = ref - r_eo;
std::cout << GridLogMessage << "EO norm diff compact\t" << norm2(err) << " (" << norm2(ref) << " - " << norm2(r_eo) << ")" << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test Ddagger is the dagger of D by requiring " << std::endl;
std::cout << GridLogMessage << "= < phi | Deo | chi > * = < chi | Deo^dag| phi> " << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
FermionField chi_e(&RBGrid);
FermionField chi_o(&RBGrid);
FermionField dchi_e(&RBGrid);
FermionField dchi_o(&RBGrid);
FermionField phi_e(&RBGrid);
FermionField phi_o(&RBGrid);
FermionField dphi_e(&RBGrid);
FermionField dphi_o(&RBGrid);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
Dwc.Meooe(chi_e, dchi_o);
Dwc.Meooe(chi_o, dchi_e);
Dwc.MeooeDag(phi_e, dphi_o);
Dwc.MeooeDag(phi_o, dphi_e);
ComplexD pDce = innerProduct(phi_e, dchi_e);
ComplexD pDco = innerProduct(phi_o, dchi_o);
ComplexD cDpe = innerProduct(chi_e, dphi_e);
ComplexD cDpo = innerProduct(chi_o, dphi_o);
std::cout << GridLogMessage << "e " << pDce << " " << cDpe << std::endl;
std::cout << GridLogMessage << "o " << pDco << " " << cDpo << std::endl;
std::cout << GridLogMessage << "pDce - conj(cDpo) " << pDce - conj(cDpo) << std::endl;
std::cout << GridLogMessage << "pDco - conj(cDpe) " << pDco - conj(cDpe) << std::endl;
Dwc_compact.Meooe(chi_e, dchi_o);
Dwc_compact.Meooe(chi_o, dchi_e);
Dwc_compact.MeooeDag(phi_e, dphi_o);
Dwc_compact.MeooeDag(phi_o, dphi_e);
pDce = innerProduct(phi_e, dchi_e);
pDco = innerProduct(phi_o, dchi_o);
cDpe = innerProduct(chi_e, dphi_e);
cDpo = innerProduct(chi_o, dphi_o);
std::cout << GridLogMessage << "e compact " << pDce << " " << cDpe << std::endl;
std::cout << GridLogMessage << "o compact " << pDco << " " << cDpo << std::endl;
std::cout << GridLogMessage << "pDce - conj(cDpo) compact " << pDce - conj(cDpo) << std::endl;
std::cout << GridLogMessage << "pDco - conj(cDpe) compact " << pDco - conj(cDpe) << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv Mee = 1 (if csw!=0) " << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dwc.Mooee(chi_e, src_e);
Dwc.MooeeInv(src_e, phi_e);
Dwc.Mooee(chi_o, src_o);
Dwc.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.Mooee(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
Dwc_compact.Mooee(chi_o, src_o);
Dwc_compact.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeDag MeeInvDag = 1 (if csw!=0) " << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dwc.MooeeDag(chi_e, src_e);
Dwc.MooeeInvDag(src_e, phi_e);
Dwc.MooeeDag(chi_o, src_o);
Dwc.MooeeInvDag(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInvDag(src_e, phi_e);
Dwc_compact.MooeeDag(chi_o, src_o);
Dwc_compact.MooeeInvDag(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==============================================================" << std::endl;
std::cout << GridLogMessage << "= Test MeeInv MeeDag = 1 (if csw!=0) " << std::endl;
std::cout << GridLogMessage << "==============================================================" << std::endl;
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dwc.MooeeDag(chi_e, src_e);
Dwc.MooeeInv(src_e, phi_e);
Dwc.MooeeDag(chi_o, src_o);
Dwc.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Dwc_compact.MooeeDag(chi_e, src_e);
Dwc_compact.MooeeInv(src_e, phi_e);
Dwc_compact.MooeeDag(chi_o, src_o);
Dwc_compact.MooeeInv(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = phi - chi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term with EO preconditioning " << std::endl;
std::cout << GridLogMessage << "================================================================" << std::endl;
chi = Zero();
phi = Zero();
tmp = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
Dwc.Mooee(src_e, chi_e);
Dwc.Mooee(src_o, chi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
////////////////////// Gauge Transformation
std::vector<int> seeds2({5, 6, 7, 8});
GridParallelRNG pRNG2(&Grid);
pRNG2.SeedFixedIntegers(seeds2);
LatticeColourMatrix Omega(&Grid);
LatticeColourMatrix ShiftedOmega(&Grid);
LatticeGaugeField U_prime(&Grid);
U_prime = Zero();
LatticeColourMatrix U_prime_mu(&Grid);
U_prime_mu = Zero();
SU<Nc>::LieRandomize(pRNG2, Omega, 1.0);
for (int mu = 0; mu < Nd; mu++)
{
U[mu] = peekLorentz(Umu, mu);
ShiftedOmega = Cshift(Omega, mu, 1);
U_prime_mu = Omega * U[mu] * adj(ShiftedOmega);
pokeLorentz(U_prime, U_prime_mu, mu);
}
/////////////////
WilsonExpCloverFermionD Dwc_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, anis, params);
CompactWilsonExpCloverFermionD Dwc_compact_prime(U_prime, Grid, RBGrid, mass, csw_r, csw_t, 1.0, anis, params);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
pickCheckerboard(Odd, src_o, tmp);
Dwc_prime.Mooee(src_e, phi_e);
Dwc_prime.Mooee(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
tmp = Zero();
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
Dwc_compact.Mooee(src_e, chi_e);
Dwc_compact.Mooee(src_o, chi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
tmp = Omega * src;
pickCheckerboard(Even, src_e, tmp);
pickCheckerboard(Odd, src_o, tmp);
Dwc_compact_prime.Mooee(src_e, phi_e);
Dwc_compact_prime.Mooee(src_o, phi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "=================================================================" << std::endl;
std::cout << GridLogMessage << "= Testing gauge covariance Clover term w/o EO preconditioning " << std::endl;
std::cout << GridLogMessage << "================================================================" << std::endl;
chi = Zero();
phi = Zero();
WilsonFermionD Dw(Umu, Grid, RBGrid, mass, params);
Dw.M(src, result);
Dwc.M(src, chi);
Dwc_prime.M(Omega * src, phi);
WilsonFermionD Dw_prime(U_prime, Grid, RBGrid, mass, params);
Dw_prime.M(Omega * src, result2);
err = result - adj(Omega) * result2;
std::cout << GridLogMessage << "norm diff Wilson " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff WilsonExpClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
Dwc_compact.M(src, chi);
Dwc_compact_prime.M(Omega * src, phi);
err = chi - adj(Omega) * phi;
std::cout << GridLogMessage << "norm diff CompactWilsonExpClover " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing Mooee(csw=0) Clover to reproduce Mooee Wilson " << std::endl;
std::cout << GridLogMessage << "==========================================================" << std::endl;
chi = Zero();
phi = Zero();
err = Zero();
WilsonExpCloverFermionD Dwc_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dw.Mooee(src_e, chi_e);
Dw.Mooee(src_o, chi_o);
Dwc_csw0.Mooee(src_e, phi_e);
Dwc_csw0.Mooee(src_o, phi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
err = chi - phi;
std::cout << GridLogMessage << "norm diff " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
err = Zero();
CompactWilsonExpCloverFermionD Dwc_compact_csw0(Umu, Grid, RBGrid, mass, 0.0, 0.0, 1.0, anis, params); // <-- Notice: csw=0
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
Dw.Mooee(src_e, chi_e);
Dw.Mooee(src_o, chi_o);
Dwc_compact_csw0.Mooee(src_e, phi_e);
Dwc_compact_csw0.Mooee(src_o, phi_o);
setCheckerboard(chi, chi_e);
setCheckerboard(chi, chi_o);
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
setCheckerboard(src, src_e);
setCheckerboard(src, src_o);
err = chi - phi;
std::cout << GridLogMessage << "norm diff compact " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
std::cout << GridLogMessage << "==========================================================" << std::endl;
std::cout << GridLogMessage << "= Testing EO operator is equal to the unprec " << std::endl;
std::cout << GridLogMessage << "==========================================================" << std::endl;
chi = Zero();
phi = Zero();
err = Zero();
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
// M phi = (Mooee src_e + Meooe src_o , Meooe src_e + Mooee src_o)
Dwc.M(src, ref); // Reference result from the unpreconditioned operator
// EO matrix
Dwc.Mooee(src_e, chi_e);
Dwc.Mooee(src_o, chi_o);
Dwc.Meooe(src_o, phi_e);
Dwc.Meooe(src_e, phi_o);
phi_o += chi_o;
phi_e += chi_e;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = ref - phi;
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
chi = Zero();
phi = Zero();
err = Zero();
pickCheckerboard(Even, phi_e, phi);
pickCheckerboard(Odd, phi_o, phi);
pickCheckerboard(Even, chi_e, chi);
pickCheckerboard(Odd, chi_o, chi);
// M phi = (Mooee src_e + Meooe src_o , Meooe src_e + Mooee src_o)
Dwc_compact.M(src, ref); // Reference result from the unpreconditioned operator
// EO matrix
Dwc_compact.Mooee(src_e, chi_e);
Dwc_compact.Mooee(src_o, chi_o);
Dwc_compact.Meooe(src_o, phi_e);
Dwc_compact.Meooe(src_e, phi_o);
phi_o += chi_o;
phi_e += chi_e;
setCheckerboard(phi, phi_e);
setCheckerboard(phi, phi_o);
err = ref - phi;
std::cout << GridLogMessage << "ref (unpreconditioned operator) diff compact : " << norm2(ref) << std::endl;
std::cout << GridLogMessage << "phi (EO decomposition) diff compact : " << norm2(phi) << std::endl;
std::cout << GridLogMessage << "norm diff compact : " << norm2(err) << std::endl;
assert(fabs(norm2(err)) < tolerance);
Grid_finalize();
}

View File

@ -90,7 +90,7 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD mu = 0.1;
WilsonTMFermionR Dw(Umu,Grid,RBGrid,mass,mu);
WilsonTMFermionD Dw(Umu,Grid,RBGrid,mass,mu);
LatticeFermion src_e (&RBGrid);
LatticeFermion src_o (&RBGrid);
@ -205,9 +205,8 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<WilsonTMFermionR,LatticeFermion> HermOpEO(Dw);
SchurDiagMooeeOperator<WilsonTMFermionD,LatticeFermion> HermOpEO(Dw);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -123,7 +123,7 @@ int main (int argc, char ** argv)
RealD _mass,RealD _M5,
std::vector<ComplexD> &gamma, RealD b,RealD c,const ImplParams &p= ImplParams()) :
*/
ZMobiusFermionR Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,RealD(1.),RealD(0.));
ZMobiusFermionD Ddwf(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,RealD(1.),RealD(0.));
LatticeFermion src_e (FrbGrid);
LatticeFermion src_o (FrbGrid);
@ -276,10 +276,9 @@ int main (int argc, char ** argv)
pickCheckerboard(Odd ,chi_o,chi);
pickCheckerboard(Even,phi_e,phi);
pickCheckerboard(Odd ,phi_o,phi);
RealD t1,t2;
SchurDiagMooeeOperator<ZMobiusFermionR,LatticeFermion> HermOpEO(Ddwf);
SchurDiagMooeeOperator<ZMobiusFermionD,LatticeFermion> HermOpEO(Ddwf);
HermOpEO.MpcDagMpc(chi_e,dchi_e);
HermOpEO.MpcDagMpc(chi_o,dchi_o);

View File

@ -125,10 +125,10 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionF DdwfF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5);
TestCGinversions<DomainWallFermionR>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<DomainWallFermionR,DomainWallFermionF>(Ddwf,DdwfF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<DomainWallFermionD>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<DomainWallFermionD,DomainWallFermionF>(Ddwf,DdwfF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
@ -137,54 +137,54 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
MobiusFermionF DmobF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,b,c);
TestCGinversions<MobiusFermionR>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<MobiusFermionR,MobiusFermionF>(Dmob,DmobF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<MobiusFermionD>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<MobiusFermionD,MobiusFermionF>(Dmob,DmobF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestCGinversions<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestCGinversions<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestCGinversions<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusZolotarevFermionD Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestCGinversions<MobiusZolotarevFermionD>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<MobiusZolotarevFermionD>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionF DshamF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,2.0);
TestCGinversions<ScaledShamirFermionR>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<ScaledShamirFermionR,ScaledShamirFermionF>(Dsham,DshamF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<ScaledShamirFermionD>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<ScaledShamirFermionD,ScaledShamirFermionF>(Dsham,DshamF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ShamirZolotarevFermionD Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<ShamirZolotarevFermionD>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ShamirZolotarevFermionD>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionF DovF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,1.0);
TestCGinversions<OverlapWilsonCayleyTanhFermionR>(Dov,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<OverlapWilsonCayleyTanhFermionR,OverlapWilsonCayleyTanhFermionF>(Dov,DovF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<OverlapWilsonCayleyTanhFermionD>(Dov,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<OverlapWilsonCayleyTanhFermionD,OverlapWilsonCayleyTanhFermionF>(Dov,DovF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyZolotarevFermionD Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionD>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<OverlapWilsonCayleyZolotarevFermionD>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}

View File

@ -95,8 +95,8 @@ int main (int argc, char ** argv)
RealD mass=0.5;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionD,LatticeFermion> HermIndefOp(Ddwf);
HermIndefOp.Op(src,ref);
HermIndefOp.OpDiag(src,result);
@ -118,7 +118,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage<<"Calling Aggregation class" <<std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermDefOp(Ddwf);
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
Subspace Aggregates(Coarse5d,FGrid,cb);

View File

@ -76,41 +76,41 @@ int main (int argc, char ** argv)
RealD mass=0.1;
RealD M5 =1.8;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestWhat<DomainWallFermionR>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestWhat<DomainWallFermionD>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
std::vector<ComplexD> gamma(Ls,ComplexD(1.0,0.1));
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionR>(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestWhat<MobiusFermionD>(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionR>(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestWhat<ZMobiusFermionD>(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestWhat<MobiusZolotarevFermionR>(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusZolotarevFermionD Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestWhat<MobiusZolotarevFermionD>(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestWhat<ScaledShamirFermionR>(Dsham,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestWhat<ScaledShamirFermionD>(Dsham,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<ShamirZolotarevFermionR>(Dshamz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ShamirZolotarevFermionD Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<ShamirZolotarevFermionD>(Dshamz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonCayleyTanhFermionR>(Dov,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyTanhFermionD Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestWhat<OverlapWilsonCayleyTanhFermionD>(Dov,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<OverlapWilsonCayleyZolotarevFermionR>(Dovz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyZolotarevFermionD Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestWhat<OverlapWilsonCayleyZolotarevFermionD>(Dovz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}

View File

@ -83,8 +83,8 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Gamma5R5HermitianLinearOperator<DomainWallFermionD,LatticeFermion> HermIndefOp(Ddwf);
const int nbasis = 8;
@ -95,7 +95,7 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
MdagMLinearOperator<DomainWallFermionD,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid,cb);
Aggregates.CreateSubspace(RNG5,HermDefOp);

View File

@ -128,8 +128,8 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestConserved<DomainWallFermionR>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
TestConserved<DomainWallFermionD>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
@ -138,23 +138,23 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestConserved<MobiusFermionR>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
MobiusFermionD Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
TestConserved<MobiusFermionD>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestConserved<ScaledShamirFermionR>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
ScaledShamirFermionD Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
TestConserved<ScaledShamirFermionD>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
for(int s=0;s<Ls;s++) omegasrev[s]=conjugate(omegas[Ls-1-s]);
// for(int s=0;s<Ls;s++) omegasrev[s]=omegas[Ls-1-s];
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegas,b,c);
ZMobiusFermionR ZDmobrev(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegasrev,b,c);
TestConserved<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5,&ZDmobrev);
ZMobiusFermionD ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegas,b,c);
ZMobiusFermionD ZDmobrev(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,omegasrev,b,c);
TestConserved<ZMobiusFermionD>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5,&ZDmobrev);
Grid_finalize();
}
@ -290,7 +290,7 @@ void TestConserved(Action & Ddwf,
const RealD DmuPAmu{real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt]))};
std::cout<<GridLogMessage<<" t "<<t<<" DmuPAmu "<<DmuPAmu
<<" PP "<<real(TensorRemove(sumPP[t]))<<" PJ5q "<<real(TensorRemove(sumPJ5q[t]))
<<" Ward Identity defect " <<(DmuPAmu - 2.*real(TensorRemove(Ddwf.mass*sumPP[t] + sumPJ5q[t])))<<std::endl;
<<" Ward Identity defect " <<(DmuPAmu - 2.*real(TensorRemove(Ddwf.Mass()*sumPP[t] + sumPJ5q[t])))<<std::endl;
}
///////////////////////////////
@ -539,7 +539,7 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
PA = trace(g5*Axial_mu);
PP = trace(adj(prop4)*prop4);
Defect = Defect - 2.0*Ddwf.mass* PP;
Defect = Defect - 2.0*Ddwf.Mass()* PP;
Defect = Defect - 2.0*PJ5q;
std::vector<TComplex> sumPAref;
@ -565,8 +565,8 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
std::cout <<" PAc action "<<real(TensorRemove(sumPA[t]));
std::cout <<" PJ5q ref "<<real(TensorRemove(sumPJ5qref[t]));
std::cout <<" PJ5q action "<<real(TensorRemove(sumPJ5q[t]));
std::cout <<"WTI defects "<<real(TensorRemove(sumPAref[t]-sumPAref[(t-1+Nt)%Nt] - 2.0*(Ddwf.mass*sumPP[t] + sumPJ5q[t]) ))<<",";
std::cout <<real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt] - 2.0*(Ddwf.mass*sumPP[t] + sumPJ5q[t]) ))<<"\n";
std::cout <<"WTI defects "<<real(TensorRemove(sumPAref[t]-sumPAref[(t-1+Nt)%Nt] - 2.0*(Ddwf.Mass()*sumPP[t] + sumPJ5q[t]) ))<<",";
std::cout <<real(TensorRemove(sumPA[t]-sumPA[(t-1+Nt)%Nt] - 2.0*(Ddwf.Mass()*sumPP[t] + sumPJ5q[t]) ))<<"\n";
}
}
#endif
@ -600,7 +600,7 @@ void TestConserved1(Action & Ddwf, Action & Ddwfrev,
// Dperp
{
RealD diag = 5.0 - Ddwf.M5;
mass = Ddwf.mass;
mass = Ddwf.Mass();
autoView( psi,result5,CpuRead);
autoView( chi,tmp, CpuWrite);
thread_for(sss,UGrid->oSites(),{

View File

@ -77,8 +77,8 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
DomainWallEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5);
DomainWallEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5);
DomainWallEOFAFermionD Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5);
DomainWallEOFAFermionD Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5);
// Construct the action and test the heatbath (zero initial guess)
{

View File

@ -41,7 +41,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallEOFAFermionR FermionAction;
typedef GparityDomainWallEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Parameters for test
@ -82,7 +82,7 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
// GparityDomainWallFermionR::ImplParams params;
// GparityDomainWallFermionD::ImplParams params;
FermionAction::ImplParams params;
FermionAction Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, params);
FermionAction Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, params);

View File

@ -79,8 +79,8 @@ int main(int argc, char** argv)
LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu);
MobiusEOFAFermionR Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Lop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mpv, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Rop(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mpv, mf, mpv, -1.0, 1, M5, b, c);
// Construct the action and test the heatbath (zero initial guess)
{

View File

@ -41,7 +41,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityMobiusEOFAFermionR FermionAction;
typedef GparityMobiusEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Parameters for test

View File

@ -0,0 +1,184 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_padded_cell.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
using namespace std;
using namespace Grid;
template<class vobj> void gpermute(vobj & inout,int perm){
vobj tmp=inout;
if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;}
if (perm & 0x2 ) { permute(inout,tmp,1); tmp=inout;}
if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;}
if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout= GridDefaultSimd(Nd,vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
std::cout << " mpi "<<mpi_layout<<std::endl;
std::cout << " simd "<<simd_layout<<std::endl;
std::cout << " latt "<<latt_size<<std::endl;
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
GridParallelRNG pRNG(&GRID);
pRNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
LatticeGaugeField Umu(&GRID);
SU<Nc>::HotConfiguration(pRNG,Umu);
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Umu);
LatticeComplex trplaq(&GRID);
std::vector<LatticeColourMatrix> U(Nd, Umu.Grid());
for (int mu = 0; mu < Nd; mu++) {
U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
}
std::cout << GridLogMessage << " Average plaquette "<<plaq<<std::endl;
LatticeComplex cplaq(&GRID); cplaq=Zero();
/////////////////////////////////////////////////
// Create a padded cell of extra padding depth=1
/////////////////////////////////////////////////
int depth = 1;
PaddedCell Ghost(depth,&GRID);
LatticeGaugeField Ughost = Ghost.Exchange(Umu);
///////////////////////////////////////////////////////////////////
// Temporary debug Hack for single rank sim:
// Check the contents of the cell are periodcally replicated
// In future ONLY pad those dimensions that are not local to node
///////////////////////////////////////////////////////////////////
#if 0
{
double diff=0;
double n=0;
{
autoView( Ug_v , Ughost, CpuRead);
autoView( Ul_v , Umu , CpuRead);
for(int x=0;x<latt_size[0]+2;x++){
for(int y=0;y<latt_size[1]+2;y++){
for(int z=0;z<latt_size[2]+2;z++){
for(int t=0;t<latt_size[3]+2;t++){
int lx=(x-1+latt_size[0])%latt_size[0];
int ly=(y-1+latt_size[1])%latt_size[1];
int lz=(z-1+latt_size[2])%latt_size[2];
int lt=(t-1+latt_size[3])%latt_size[3];
Coordinate gcoor({x,y,z,t});
Coordinate lcoor({lx,ly,lz,lt});
LorentzColourMatrix g;
LorentzColourMatrix l;
peekLocalSite(g,Ug_v,gcoor);
peekLocalSite(l,Ul_v,lcoor);
g=g-l;
assert(norm2(g)==0);
diff = diff + norm2(g);
n = n + norm2(l);
}}}}
}
std::cout << "padded field check diff "<< diff <<" / "<< n<<std::endl;
std::cout << norm2(Ughost)<< " " << norm2(Umu)<<std::endl;
}
#endif
///// Array for the site plaquette
GridBase *GhostGrid = Ughost.Grid();
LatticeComplex gplaq(GhostGrid);
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
// Umu(x) Unu(x+mu) Umu^dag(x+nu) Unu^dag(x)
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
shifts.push_back(shift_0);
shifts.push_back(shift_mu);
shifts.push_back(shift_nu);
shifts.push_back(shift_0);
}
}
GeneralLocalStencil gStencil(GhostGrid,shifts);
gplaq=Zero();
{
autoView( gp_v , gplaq, CpuWrite);
autoView( t_v , trplaq, CpuRead);
autoView( U_v , Ughost, CpuRead);
for(int ss=0;ss<gp_v.size();ss++){
int s=0;
for(int mu=0;mu<Nd;mu++){
for(int nu=mu+1;nu<Nd;nu++){
auto SE0 = gStencil.GetEntry(s+0,ss);
auto SE1 = gStencil.GetEntry(s+1,ss);
auto SE2 = gStencil.GetEntry(s+2,ss);
auto SE3 = gStencil.GetEntry(s+3,ss);
int o0 = SE0->_offset;
int o1 = SE1->_offset;
int o2 = SE2->_offset;
int o3 = SE3->_offset;
auto U0 = U_v[o0](mu);
auto U1 = U_v[o1](nu);
auto U2 = adj(U_v[o2](mu));
auto U3 = adj(U_v[o3](nu));
gpermute(U0,SE0->_permute);
gpermute(U1,SE1->_permute);
gpermute(U2,SE2->_permute);
gpermute(U3,SE3->_permute);
gp_v[ss]() =gp_v[ss]() + trace( U0*U1*U2*U3 );
s=s+4;
}
}
}
}
cplaq = Ghost.Extract(gplaq);
RealD vol = cplaq.Grid()->gSites();
RealD faces = (Nd * (Nd-1))/2;
auto p = TensorRemove(sum(cplaq));
auto result = p.real()/vol/faces/Nc;
std::cout << GridLogMessage << " Average plaquette via padded cell "<<result<<std::endl;
std::cout << GridLogMessage << " Diff "<<result-plaq<<std::endl;
assert(fabs(result-plaq)<1.0e-8);
Grid_finalize();
}

View File

@ -105,10 +105,10 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
DomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5);
DomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5);
SchurDiagMooeeOperator<DomainWallFermionR, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<DomainWallFermionR, LatticeFermion> VdagV(Ddwf_b);
DomainWallFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5);
DomainWallFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5);
SchurDiagMooeeOperator<DomainWallFermionD, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<DomainWallFermionD, LatticeFermion> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -153,10 +153,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
DomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
DomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
MdagMLinearOperator<DomainWallEOFAFermionR, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<DomainWallEOFAFermionR, LatticeFermion> RdagR(Deofa_R);
DomainWallEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5);
DomainWallEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5);
MdagMLinearOperator<DomainWallEOFAFermionD, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<DomainWallEOFAFermionD, LatticeFermion> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
// parameters for test
const std::vector<int> grid_dim = { 8, 8, 8, 8 };
@ -107,11 +107,11 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
GparityDomainWallFermionR::ImplParams params;
GparityDomainWallFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, params);
GparityDomainWallFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, params);
SchurDiagMooeeOperator<GparityDomainWallFermionR, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityDomainWallFermionR, FermionField> VdagV(Ddwf_b);
GparityDomainWallFermionD::ImplParams params;
GparityDomainWallFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, params);
GparityDomainWallFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, params);
SchurDiagMooeeOperator<GparityDomainWallFermionD, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityDomainWallFermionD, FermionField> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -156,10 +156,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
GparityDomainWallEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, params);
GparityDomainWallEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, params);
MdagMLinearOperator<GparityDomainWallEOFAFermionR, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityDomainWallEOFAFermionR, FermionField> RdagR(Deofa_R);
GparityDomainWallEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, params);
GparityDomainWallEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, params);
MdagMLinearOperator<GparityDomainWallEOFAFermionD, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityDomainWallEOFAFermionD, FermionField> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -107,10 +107,10 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
MobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c);
MobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c);
SchurDiagMooeeOperator<MobiusFermionR, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<MobiusFermionR, LatticeFermion> VdagV(Ddwf_b);
MobiusFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c);
MobiusFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c);
SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> MdagM(Ddwf_f);
SchurDiagMooeeOperator<MobiusFermionD, LatticeFermion> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -155,10 +155,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
MobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c);
MobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c);
MdagMLinearOperator<MobiusEOFAFermionR, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<MobiusEOFAFermionR, LatticeFermion> RdagR(Deofa_R);
MobiusEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c);
MobiusEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c);
MdagMLinearOperator<MobiusEOFAFermionD, LatticeFermion> LdagL(Deofa_L);
MdagMLinearOperator<MobiusEOFAFermionD, LatticeFermion> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
// parameters for test
const std::vector<int> grid_dim = { 8, 8, 8, 8 };
@ -109,11 +109,11 @@ int main(int argc, char **argv)
SU<Nc>::HotConfiguration(RNG4, Umu);
// Initialize RHMC fermion operators
GparityDomainWallFermionR::ImplParams params;
GparityMobiusFermionR Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c, params);
GparityMobiusFermionR Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c, params);
SchurDiagMooeeOperator<GparityMobiusFermionR, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityMobiusFermionR, FermionField> VdagV(Ddwf_b);
GparityDomainWallFermionD::ImplParams params;
GparityMobiusFermionD Ddwf_f(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, M5, b, c, params);
GparityMobiusFermionD Ddwf_b(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, M5, b, c, params);
SchurDiagMooeeOperator<GparityMobiusFermionD, FermionField> MdagM(Ddwf_f);
SchurDiagMooeeOperator<GparityMobiusFermionD, FermionField> VdagV(Ddwf_b);
// Degree 12 rational approximations to x^(1/4) and x^(-1/4)
double lo = 0.0001;
@ -158,10 +158,10 @@ int main(int argc, char **argv)
RealD shift_L = 0.0;
RealD shift_R = -1.0;
int pm = 1;
GparityMobiusEOFAFermionR Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c, params);
GparityMobiusEOFAFermionR Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c, params);
MdagMLinearOperator<GparityMobiusEOFAFermionR, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityMobiusEOFAFermionR, FermionField> RdagR(Deofa_R);
GparityMobiusEOFAFermionD Deofa_L(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, shift_L, pm, M5, b, c, params);
GparityMobiusEOFAFermionD Deofa_R(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, shift_R, pm, M5, b, c, params);
MdagMLinearOperator<GparityMobiusEOFAFermionD, FermionField> LdagL(Deofa_L);
MdagMLinearOperator<GparityMobiusEOFAFermionD, FermionField> RdagR(Deofa_R);
// Stochastically estimate reweighting factor via EOFA
RealD k = Deofa_L.k;

305
tests/forces/Test_bdy.cc Normal file
View File

@ -0,0 +1,305 @@
/*
2f Full det MdagM 10^6 force ~ 1.3e7
rid : Message : 1767.283471 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1767.283476 s : S1 : 1.52885e+09
Grid : Message : 1767.283480 s : S2 : 1.52886e+09
Grid : Message : 1767.283482 s : dS : 8877.34
Grid : Message : 1767.283483 s : dSpred : 8877.7
Grid : Message : 1767.283484 s : diff : -0.360484
Grid : Message : 1767.283485 s : *********************************************************
2f Full det MpcdagMpc 10^6 force ~ 1.8e6
Grid : Message : 2399.576962 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2399.576968 s : S1 : 1.52885e+09
Grid : Message : 2399.576972 s : S2 : 1.52886e+09
Grid : Message : 2399.576974 s : dS : 9728.49
Grid : Message : 2399.576975 s : dSpred : 9726.58
Grid : Message : 2399.576976 s : diff : 1.90683
Grid : Message : 2399.576977 s : *********************************************************
2f bdy MdagM 1500 force Force ~ 2800
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
2f bdy MpcdagMpc 10^6 force Force ~ 2200
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
1f Bdy Det
Optimisation log: looser rational AND MD tolerances sloppy
MobiusForce.221179 -- same as HMC. dS is mispredicted Forece ~2.8
Grid : Message : 6582.258991 s : dS : 0.024478
Grid : Message : 6582.258992 s : dSpred : 0.00791876
Grid : Message : 6582.258994 s : diff : 0.0165592
MobiusForce.221193 -- tight rational AND MD tolerances to 1e-8 ~ 2.8 same
Grid : Message : 1964.939209 s : S1 : 7.64404e+08
Grid : Message : 1964.939213 s : S2 : 7.64404e+08
Grid : Message : 1964.939215 s : dS : -0.00775838 <--- too loose even on action
Grid : Message : 1964.939216 s : dSpred : -0.00416793
Grid : Message : 1964.939217 s : diff : -0.00359045
MobiusForce.221394 -- looser rational, MD tol 1e-8 ~ 2.8 same
Grid : Message : 1198.346720 s : S1 : 764404649.48886
Grid : Message : 1198.346760 s : S2 : 764404649.5133
Grid : Message : 1198.346780 s : dS : 0.024440884590149
Grid : Message : 1198.346800 s : dSpred : 0.0079145154465184
Grid : Message : 1198.346810 s : diff : 0.016526369143631
MobiusForce.221394 -- tight rational, MD tol sloppy Force ~ 2.8
Grid : Message : 2376.921950 s : S1 : 764404436.44069
Grid : Message : 2376.921954 s : S2 : 764404436.43299
Grid : Message : 2376.921956 s : dS : -0.0076971054077148
Grid : Message : 2376.921958 s : dSpred : -0.0041610472282526
Grid : Message : 2376.921959 s : diff : -0.0035360581794623
*/
//
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_double_ratio.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
typedef MobiusFermionD FermionAction;
typedef WilsonImplD FimplD;
typedef WilsonImplD FermionImplPolicy;
template<class Gimpl>
void ForceTest(Action<LatticeGaugeField> &action,LatticeGaugeField & U,MomentumFilterBase<LatticeGaugeField> &Filter)
{
GridBase *UGrid = U.Grid();
std::vector<int> seeds({1,2,3,5});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
LatticeColourMatrix Pmu(UGrid);
LatticeGaugeField P(UGrid);
LatticeGaugeField UdSdU(UGrid);
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
std::cout << GridLogMessage << " Force test for "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
RealD eps=0.005;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Refresh "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
Gimpl::generate_momenta(P,sRNG,RNG4);
Filter.applyFilter(P);
#if 0
FieldMetaData header;
std::string file("./ckpoint_lat.2000");
NerscIO::readConfiguration(U,header,file);
#else
U = 1.0;
#endif
action.refresh(U,sRNG,RNG4);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(U);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Derivative "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
action.deriv(U,UdSdU);
UdSdU = Ta(UdSdU);
Filter.applyFilter(UdSdU);
DumpSliceNorm("Force",UdSdU,Nd-1);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(U);
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
for(int mu=0;mu<Nd;mu++){
auto UdSdUmu = PeekIndex<LorentzIndex>(UdSdU,mu);
Pmu= PeekIndex<LorentzIndex>(P,mu);
dS = dS - trace(Pmu*UdSdUmu)*eps*2.0*2.0;
}
ComplexD dSpred = sum(dS);
RealD diff = S2-S1-dSpred.real();
std::cout<< GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout<< GridLogMessage << "S1 : "<< S1 <<std::endl;
std::cout<< GridLogMessage << "S2 : "<< S2 <<std::endl;
std::cout<< GridLogMessage << "dS : "<< S2-S1 <<std::endl;
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::cout << std::setprecision(14);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi_layout,shm);
const int Ls=12;
const int Nt = latt_size[3];
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
////////////////////////////////////////////////////////////////
// Domain decomposed operator
////////////////////////////////////////////////////////////////
Coordinate CommDim(Nd);
for(int d=0;d<Nd;d++) CommDim[d]= (mpi_layout[d]/shm[d])>1 ? 1 : 0;
Coordinate NonDirichlet(Nd+1,0);
Coordinate Dirichlet(Nd+1,0);
Dirichlet[1] = CommDim[0]*latt_size[0]/mpi_layout[0] * shm[0];
Dirichlet[2] = CommDim[1]*latt_size[1]/mpi_layout[1] * shm[1];
Dirichlet[3] = CommDim[2]*latt_size[2]/mpi_layout[2] * shm[2];
Dirichlet[4] = CommDim[3]*latt_size[3]/mpi_layout[3] * shm[3];
Coordinate Block4(Nd);
Block4[0] = Dirichlet[1];
Block4[1] = Dirichlet[2];
Block4[2] = Dirichlet[3];
Block4[3] = Dirichlet[4];
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionAction::ImplParams ParamsDir(boundary);
Params.dirichlet=NonDirichlet;
ParamsDir.dirichlet=Dirichlet;
ParamsDir.partialDirichlet=1;
///////////////////// Gauge Field and Gauge Forces ////////////////////////////
LatticeGaugeField U(UGrid);
RealD beta=6.0;
WilsonGaugeActionR PlaqAction(beta);
IwasakiGaugeActionR RectAction(beta);
MomentumFilterNone<LatticeGaugeField> FilterNone;
ForceTest<GimplTypesR>(PlaqAction,U,FilterNone);
ForceTest<GimplTypesR>(RectAction,U,FilterNone);
////////////////////////////////////
// Action
////////////////////////////////////
RealD mass=0.00078;
RealD pvmass=1.0;
RealD M5=1.8;
RealD b=1.5;
RealD c=0.5;
// Double versions
FermionAction DdwfPeriodic(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,Params);
FermionAction PVPeriodic (U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,pvmass,M5,b,c,Params);
FermionAction DdwfDirichlet(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,ParamsDir);
double StoppingCondition = 1.0e-8;
double MaxCGIterations = 50000;
ConjugateGradient<LatticeFermion> CG(StoppingCondition,MaxCGIterations);
//////////////////// Two Flavour Determinant Ratio ///////////////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> Nf2(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2,U,FilterNone);
//////////////////// Two Flavour Determinant force test Even Odd ///////////////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> Nf2eo(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2eo,U,FilterNone);
//////////////////// Domain forces ////////////////////
int Width=4;
DDHMCFilter<WilsonImplD::Field> DDHMCFilter(Block4,Width);
//////////////////// Two flavour boundary det ////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> BdyNf2(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2,U,DDHMCFilter);
//////////////////// Two flavour eo boundary det ////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> BdyNf2eo(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
//////////////////// One flavour boundary det ////////////////////
OneFlavourRationalParams OFRp; // Up/down
OFRp.lo = 4.0e-5;
OFRp.hi = 90.0;
OFRp.MaxIter = 60000;
OFRp.tolerance= 1.0e-8;
OFRp.mdtolerance= 1.0e-6;
OFRp.degree = 18;
OFRp.precision= 80;
OFRp.BoundsCheckFreq=0;
std::vector<RealD> ActionTolByPole({
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8
});
std::vector<RealD> MDTolByPole({
1.0e-6,3.0e-7,1.0e-7,1.0e-7, // Orig sloppy
// 1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8
});
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> BdySqrt(DdwfDirichlet,DdwfPeriodic,OFRp);
ForceTest<GimplTypesR>(BdySqrt,U,DDHMCFilter);
Grid_finalize();
}

View File

@ -66,7 +66,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
OverlapWilsonContFracTanhFermionR Dcf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonContFracTanhFermionD Dcf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
Dcf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -0,0 +1,542 @@
/*
2f Full det MdagM 10^6 force ~ 1.3e7
rid : Message : 1767.283471 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1767.283476 s : S1 : 1.52885e+09
Grid : Message : 1767.283480 s : S2 : 1.52886e+09
Grid : Message : 1767.283482 s : dS : 8877.34
Grid : Message : 1767.283483 s : dSpred : 8877.7
Grid : Message : 1767.283484 s : diff : -0.360484
Grid : Message : 1767.283485 s : *********************************************************
2f Full det MpcdagMpc 10^6 force ~ 1.8e6
Grid : Message : 2399.576962 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2399.576968 s : S1 : 1.52885e+09
Grid : Message : 2399.576972 s : S2 : 1.52886e+09
Grid : Message : 2399.576974 s : dS : 9728.49
Grid : Message : 2399.576975 s : dSpred : 9726.58
Grid : Message : 2399.576976 s : diff : 1.90683
Grid : Message : 2399.576977 s : *********************************************************
2f bdy MdagM 1500 force Force ~ 2800
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
2f bdy MpcdagMpc 10^6 force Force ~ 2200
Grid : Message : 4622.385061 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 4622.385067 s : S1 : 1.52885e+09
Grid : Message : 4622.385071 s : S2 : 1.52885e+09
Grid : Message : 4622.385072 s : dS : 25.4944
Grid : Message : 4622.385073 s : dSpred : 25.4672
Grid : Message : 4622.385074 s : diff : 0.0271414
Grid : Message : 4622.385075 s : *********************************************************
1f Bdy Det
//
// These all had tol set by OFRp, not through MDpoles
// So assumptions it was Remez might be wrong.
//
Optimisation log: looser rational AND MD tolerances sloppy
MobiusForce.221179 -- same as HMC. dS is mispredicted Forece ~2.8
Grid : Message : 6582.258991 s : dS : 0.024478
Grid : Message : 6582.258992 s : dSpred : 0.00791876
Grid : Message : 6582.258994 s : diff : 0.0165592
MobiusForce.221193 -- tight rational AND MD tolerances to 1e-8 ~ 2.8 same
Grid : Message : 1964.939209 s : S1 : 7.64404e+08
Grid : Message : 1964.939213 s : S2 : 7.64404e+08
Grid : Message : 1964.939215 s : dS : -0.00775838 <--- too loose even on action
Grid : Message : 1964.939216 s : dSpred : -0.00416793
Grid : Message : 1964.939217 s : diff : -0.00359045
MobiusForce.221394 -- tight rational, MD tol sloppy Force ~ 2.8
Grid : Message : 2376.921950 s : S1 : 764404436.44069
Grid : Message : 2376.921954 s : S2 : 764404436.43299
Grid : Message : 2376.921956 s : dS : -0.0076971054077148
Grid : Message : 2376.921958 s : dSpred : -0.0041610472282526
Grid : Message : 2376.921959 s : diff : -0.0035360581794623
MobiusForce.221587 -- slightly sloppier action, coming from tol array
-- much sloppier force
-- degree 18
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-6,3.0e-7,1.0e-7,1.0e-7, // Orig sloppy
Grid : Message : 2438.875507 s : S1 : 764404436.42251
Grid : Message : 2438.875512 s : S2 : 764404436.4148
Grid : Message : 2438.875514 s : dS : -0.0077102184295654
Grid : Message : 2438.875516 s : dSpred : -0.0075684496959103
Grid : Message : 2438.875517 s : diff : -0.00014176873365508
MobiusForce.221639 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence more
Grid : Message : 2373.927550 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2373.927600 s : S1 : 764404436.42251
Grid : Message : 2373.927640 s : S2 : 764404436.4148
Grid : Message : 2373.927660 s : dS : -0.0077102184295654
Grid : Message : 2373.927680 s : dSpred : -0.0075993463919849
Grid : Message : 2373.927690 s : diff : -0.00011087203758051
Grid : Message : 2373.927700 s : *********************************************************
Grid : Message : 69.269319 s : ApproxPowerMD shift[0] pole 9.5166866092503e-06 residue -2.0047722631555e-08 tol 3e-06
Grid : Message : 69.269321 s : ApproxPowerMD shift[1] pole 4.7123486192778e-05 residue -1.316766030683e-07 tol 1e-06
Grid : Message : 69.269323 s : ApproxPowerMD shift[2] pole 0.00014860967743736 residue -6.109883117444e-07 tol 1e-07
Grid : Message : 69.269325 s : ApproxPowerMD shift[3] pole 0.00041055696132763 residue -2.6088717433891e-06 tol 1e-07
Grid : Message : 69.269327 s : ApproxPowerMD shift[4] pole 0.0010822555692906 residue -1.0853799412802e-05 tol 1e-08
Grid : Message : 69.269329 s : ApproxPowerMD shift[5] pole 0.0028029613512087 residue -4.4741734470158e-05 tol 1e-08
Grid : Message : 69.269331 s : ApproxPowerMD shift[6] pole 0.0072103567378527 residue -0.00018380499193253 tol 1e-08
rusher 96I]$ more MobiusForce.221887
1.0e-5,3.0e-6,3.0e-7,1.0e-7, // soften convergence more more
// <-- this is the dirichlet solve, why poorer conditioned???
Grid : Message : 1627.226206 s : ConjugateGradientMultiShift k=3643 Shift 3 has converged
Grid : Message : 1667.373045 s : ConjugateGradientMultiShift k=5381 Shift 2 has converged
Grid : Message : 1705.236992 s : ConjugateGradientMultiShift k=7063 Shift 1 has converged
Grid : Message : 1752.493182 s : ConjugateGradientMultiShift k=9220 Shift 0 has converged
//
//Grid : Message : 1414.837250 s : OneFlavourEvenOddRatioRationalPseudoFermionAction deriv: doing (M^dag M)^{-1/2} ( (V^dag V)^{1/4} Phi)
Grid : Message : 1523.416680 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1530.798503 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1536.153421 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged <-- this is the non-dirichlet solve
Grid : Message : 2339.927565 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2339.927571 s : S1 : 764404436.42251
Grid : Message : 2339.927575 s : S2 : 764404436.4148
Grid : Message : 2339.927577 s : dS : -0.0077102184295654
Grid : Message : 2339.927579 s : dSpred : -0.0068752425267964
Grid : Message : 2339.927580 s : diff : -0.00083497590276901
Grid : Message : 2339.927581 s : *********************************************************
Grid : Message : 2339.927582 s : Done
Grid : Message : 2339.927582 s : *********************************************************
Force 76 S {S {S {(9.0175185326468,-3.5764415623768e-36)}}}
Force 77 S {S {S {(4.1289977678493,-4.3364721285803e-37)}}}
Force 78 S {S {S {(3.2299269465841,6.0391022273495e-37)}}}
Force 79 S {S {S {(3.0051199649288,-9.6243599973575e-37)}}}
Force 80 S {S {S {(2.8924316727872,-1.3371248240604e-37)}}}
Force 81 S {S {S {(2.8270868791781,1.792628885004e-37)}}}
Force 82 S {S {S {(2.8676819960087,-1.3518185034456e-36)}}}
Force 83 S {S {S {(2.7724152154523,1.4950818774521e-37)}}}
Force 84 S {S {S {(3.0204624534964,-9.6475025423893e-36)}}}
Force 85 S {S {S {(2.8631304063459,2.2426228161781e-37)}}}
Force 86 S {S {S {(2.9025673908905,-1.3942465026706e-36)}}}
Force 87 S {S {S {(2.8553405232646,-2.0938493124022e-38)}}}
Force 88 S {S {S {(3.2820184381375,-1.422348164495e-36)}}}
Force 89 S {S {S {(3.8974980085791,1.1682209795266e-35)}}}
Force 90 S {S {S {(4.660053618223,-1.4399805797573e-37)}}}
Force 91 S {S {S {(6.7993872372366,1.4524702072348e-36)}}}
Full
Grid : Message : 1523.416680 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1530.798503 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1536.153421 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged
PV solve depth 3
Grid : Message : 1667.373045 s : ConjugateGradientMultiShift k=5381 Shift 2 has converged
Grid : Message : 1705.236992 s : ConjugateGradientMultiShift k=7063 Shift 1 has converged
Grid : Message : 1752.493182 s : ConjugateGradientMultiShift k=9220 Shift 0 has converged
MobiusForce.222490 depth 1
Grid : Message : 2155.595070 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 2155.595076 s : S1 : 764404436.37475
Grid : Message : 2155.595080 s : S2 : 764404436.21131
Grid : Message : 2155.595082 s : dS : -0.16344606876373
Grid : Message : 2155.595084 s : dSpred : -0.16235663327375
Grid : Message : 2155.595085 s : diff : -0.0010894354899788
Force 4 S {S {S {(24.512489110423,-7.4203080895657e-36)}}}
Force 5 S {S {S {(14.442663101577,7.3909207307951e-37)}}}
Force 6 S {S {S {(12.298567945213,2.1989091200069e-36)}}}
Force 7 S {S {S {(11.582362859271,-2.2540104177017e-36)}}}
Force 8 S {S {S {(11.465725500906,-2.9512255045332e-36)}}}
Force 9 S {S {S {(10.869067954412,-2.8388188572358e-36)}}}
Force 10 S {S {S {(10.937111429576,-3.3530976357206e-36)}}}
Force 11 S {S {S {(11.23500117508,-1.4487967873885e-36)}}}
Force 12 S {S {S {(10.900736551834,5.1427877848475e-36)}}} Force is bigger
Force 13 S {S {S {(10.951921323651,-1.2098775605838e-35)}}}
Force 14 S {S {S {(10.676529230575,-2.50527233519e-36)}}}
Force 15 S {S {S {(10.98568474467,3.2193851533145e-36)}}}
Force 16 S {S {S {(11.931707726568,-8.5223340434616e-37)}}}
Force 17 S {S {S {(13.751904678482,7.6337337826369e-36)}}}
Force 18 S {S {S {(17.518955473833,1.8073225643893e-36)}}}
Force 19 S {S {S {(20.36519304598,-2.5184966466368e-36)}}}
Full solve
Grid : Message : 1441.297575 s : ConjugateGradientMultiShift k=3846 Shift 2 has converged
Grid : Message : 1449.206520 s : ConjugateGradientMultiShift k=4143 Shift 1 has converged
Grid : Message : 1454.352909 s : ConjugateGradientMultiShift k=4353 Shift 0 has converged
Dirichlet solve -- why so expensive??
Spectral radius worse?
Grid : Message : 1571.887003 s : ConjugateGradientMultiShift k=5195 Shift 2 has converged
Grid : Message : 1599.543760 s : ConjugateGradientMultiShift k=6508 Shift 1 has converged
Grid : Message : 1625.368198 s : ConjugateGradientMultiShift k=7819 Shift 0 has converged
dS is much bigger.
MobiusForce.223606
Grid : Message : 1123.276405 s : ConjugateGradientMultiShift k=3273 Shift 0 has converged
Grid : Message : 1125.945359 s : ConjugateGradientMultiShift k=3407 Shift 1 has converged
Grid : Message : 1127.896580 s : ConjugateGradientMultiShift k=3508 Shift 2 has converged <-- 2 takes longer
first (bdy) hasenbusch mass raised to 0.005 -- reduces Dirchlet solve cost
Force looks ok still
Grid : Message : 1510.884960 s : OneFlavourEvenOddRatioRationalPseudoFermionAction compute action: complete
Grid : Message : 1510.969380 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1510.969440 s : S1 : 764404436.37475
Grid : Message : 1510.969480 s : S2 : 764404436.17379
Grid : Message : 1510.969500 s : dS : -0.20095825195312
Grid : Message : 1510.969520 s : dSpred : -0.20025674631954
Grid : Message : 1510.969530 s : diff : -0.00070150563358654
Force 76 S {S {S {(24.161229317675,2.0147973173094e-35)}}}
Force 77 S {S {S {(15.841085162729,3.983456481349e-36)}}}
Force 78 S {S {S {(11.031761776856,9.0394046210295e-35)}}}
Force 79 S {S {S {(12.177830066719,1.583978637733e-36)}}}
Force 80 S {S {S {(9.8372072482222,6.4284847310594e-37)}}}
Force 81 S {S {S {(9.6588863493149,1.0501572656659e-35)}}}
Force 82 S {S {S {(10.623076227724,-4.4161853392455e-35)}}}
Force 83 S {S {S {(8.9477003784221,-7.067659784319e-37)}}}
Force 84 S {S {S {(9.7663166497594,-2.1014900256825e-35)}}}
Force 85 S {S {S {(8.9992648919057,-4.7107936109203e-36)}}}
Force 86 S {S {S {(9.0399987268337,6.4652189295226e-37)}}}
Force 87 S {S {S {(9.1319052497073,7.9566273871284e-37)}}}
Force 88 S {S {S {(10.094569606113,-1.263656427134e-37)}}}
Force 89 S {S {S {(11.563679905523,-1.2777623593438e-35)}}}
Force 90 S {S {S {(13.653150474463,2.9093485182852e-37)}}}
Force 91 S {S {S {(16.303719912019,2.9857556510886e-36)}}}
MobiusForce.223749
first (bdy) hasenbusch mass raised to 0.01 -- reduces Dirchlet solve cost
Grid : Message : 1374.472462 s : OneFlavourEvenOddRatioRationalPseudoFermionAction compute action: complete
Grid : Message : 1374.479206 s : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Grid : Message : 1374.479211 s : S1 : 764404436.37428
Grid : Message : 1374.479215 s : S2 : 764404436.20009
Grid : Message : 1374.479217 s : dS : -0.17418932914734
Grid : Message : 1374.479219 s : dSpred : -0.17358090105485
Grid : Message : 1374.479220 s : diff : -0.00060842809248995
Force 76 S {S {S {(27.006858541753,4.2141472476979e-36)}}}
Force 77 S {S {S {(19.388701462694,-5.1620365048422e-35)}}}
Force 78 S {S {S {(13.502424539662,-2.4038859474316e-35)}}}
Force 79 S {S {S {(15.555776987064,6.0567346426118e-36)}}}
Force 80 S {S {S {(12.752116522904,-2.3720006631655e-35)}}}
Force 81 S {S {S {(12.656857824233,1.6912424972456e-35)}}}
Force 82 S {S {S {(15.159284452724,5.0898905390605e-36)}}}
Force 83 S {S {S {(12.222695136014,-2.2061824913027e-35)}}}
Force 84 S {S {S {(12.92077598466,9.6287681011731e-36)}}}
Force 85 S {S {S {(11.884630495484,2.822655809912e-36)}}}
Force 86 S {S {S {(11.896353116174,1.0926219990893e-35)}}}
Force 87 S {S {S {(11.557019282287,2.1532117771187e-35)}}}
Force 88 S {S {S {(11.945108384613,-3.0210204816133e-36)}}}
Force 89 S {S {S {(13.295373801078,7.3115748621146e-36)}}}
Force 90 S {S {S {(15.373728471417,-7.4923071185536e-36)}}}
Force 91 S {S {S {(17.348173714234,1.0344350287236e-36)}}}
MobiusForce.223829
1.0e-5,5.0e-6,1.0e-6,1.0e-7, // soften convergence more more
Grid : Message : 1000.951387 s : ConjugateGradientMultiShift k=1881 Shift 0 has converged
Grid : Message : 1002.619542 s : ConjugateGradientMultiShift k=1960 Shift 1 has converged
Grid : Message : 1003.726982 s : ConjugateGradientMultiShift k=2014 Shift 4 has converged
Grid : Message : 1005.698741 s : ConjugateGradientMultiShift k=2113 Shift 2 has converged
Grid : Message : 1007.320875 s : ConjugateGradientMultiShift k=2197 Shift 3 has converged
Grid : Message : 1351.171259 s : S1 : 764404436.37428
Grid : Message : 1351.171263 s : S2 : 764404436.20009
Grid : Message : 1351.171265 s : dS : -0.17418932914734
Grid : Message : 1351.171266 s : dSpred : -0.1743248065338
Grid : Message : 1351.171267 s : diff : 0.00013547738646566
Force 76 S {S {S {(27.004288088317,6.035575744297e-35)}}}
Force 77 S {S {S {(19.388023720604,-6.9736202362532e-36)}}}
Force 78 S {S {S {(13.502663916173,6.4067380855692e-35)}}}
Force 79 S {S {S {(15.55135748152,1.7219522871608e-35)}}}
Force 80 S {S {S {(12.75135802213,-1.1303847551095e-35)}}}
Force 81 S {S {S {(12.655732786276,1.689773129307e-36)}}}
Force 82 S {S {S {(15.158469055699,-6.7205950772387e-35)}}}
Force 83 S {S {S {(12.222907191126,-1.6775773754173e-35)}}}
Force 84 S {S {S {(12.916025368247,-1.9641041234302e-35)}}}
Force 85 S {S {S {(11.881879452577,-2.3054382955502e-36)}}}
Force 86 S {S {S {(11.897253557199,-3.3617669065579e-35)}}}
Force 87 S {S {S {(11.55717723524,-1.8690360178074e-36)}}}
Force 88 S {S {S {(11.945590605851,-6.7208889508264e-36)}}}
Force 89 S {S {S {(13.298173932749,-1.0322309768158e-35)}}}
Force 90 S {S {S {(15.373845416836,7.4158999857501e-36)}}}
Force 91 S {S {S {(17.348058307158,-1.8514036025451e-36)}}}
-- could make the stopping condition mandatory if shift 0 is converged.
-- Save 20% of iterations and single tunable
*/
//
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_double_ratio.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
typedef MobiusFermionD FermionAction;
typedef WilsonImplD FimplD;
typedef WilsonImplD FermionImplPolicy;
template<class Gimpl>
void ForceTest(Action<LatticeGaugeField> &action,LatticeGaugeField & U,MomentumFilterBase<LatticeGaugeField> &Filter)
{
GridBase *UGrid = U.Grid();
std::vector<int> seeds({1,2,3,5});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
LatticeColourMatrix Pmu(UGrid);
LatticeGaugeField P(UGrid);
LatticeGaugeField UdSdU(UGrid);
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
std::cout << GridLogMessage << " Force test for "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
RealD eps=0.005;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Refresh "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
Gimpl::generate_momenta(P,sRNG,RNG4);
Filter.applyFilter(P);
FieldMetaData header;
std::string file("./ckpoint_lat.2000");
NerscIO::readConfiguration(U,header,file);
action.refresh(U,sRNG,RNG4);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(U);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Derivative "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
action.deriv(U,UdSdU);
UdSdU = Ta(UdSdU);
Filter.applyFilter(UdSdU);
DumpSliceNorm("Force",UdSdU,Nd-1);
Gimpl::update_field(P,U,eps);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(U);
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
for(int mu=0;mu<Nd;mu++){
auto UdSdUmu = PeekIndex<LorentzIndex>(UdSdU,mu);
Pmu= PeekIndex<LorentzIndex>(P,mu);
dS = dS - trace(Pmu*UdSdUmu)*eps*2.0*2.0;
}
ComplexD dSpred = sum(dS);
RealD diff = S2-S1-dSpred.real();
std::cout<< GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout<< GridLogMessage << "S1 : "<< S1 <<std::endl;
std::cout<< GridLogMessage << "S2 : "<< S2 <<std::endl;
std::cout<< GridLogMessage << "dS : "<< S2-S1 <<std::endl;
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::cout << std::setprecision(14);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi_layout,shm);
const int Ls=12;
const int Nt = latt_size[3];
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
////////////////////////////////////////////////////////////////
// Domain decomposed operator
////////////////////////////////////////////////////////////////
Coordinate CommDim(Nd);
for(int d=0;d<Nd;d++) CommDim[d]= (mpi_layout[d]/shm[d])>1 ? 1 : 0;
Coordinate NonDirichlet(Nd+1,0);
Coordinate Dirichlet(Nd+1,0);
Dirichlet[1] = CommDim[0]*latt_size[0]/mpi_layout[0] * shm[0];
Dirichlet[2] = CommDim[1]*latt_size[1]/mpi_layout[1] * shm[1];
Dirichlet[3] = CommDim[2]*latt_size[2]/mpi_layout[2] * shm[2];
Dirichlet[4] = CommDim[3]*latt_size[3]/mpi_layout[3] * shm[3];
Coordinate Block4(Nd);
Block4[0] = Dirichlet[1];
Block4[1] = Dirichlet[2];
Block4[2] = Dirichlet[3];
Block4[3] = Dirichlet[4];
std::vector<Complex> boundary = {1,1,1,-1};
FermionAction::ImplParams Params(boundary);
FermionAction::ImplParams ParamsDir(boundary);
Params.dirichlet=NonDirichlet;
ParamsDir.dirichlet=Dirichlet;
ParamsDir.partialDirichlet=1;
///////////////////// Gauge Field and Gauge Forces ////////////////////////////
LatticeGaugeField U(UGrid);
RealD beta=6.0;
WilsonGaugeActionR PlaqAction(beta);
IwasakiGaugeActionR RectAction(beta);
MomentumFilterNone<LatticeGaugeField> FilterNone;
ForceTest<GimplTypesR>(PlaqAction,U,FilterNone);
ForceTest<GimplTypesR>(RectAction,U,FilterNone);
////////////////////////////////////
// Action
////////////////////////////////////
RealD mass=0.00078;
RealD dmass=0.01;
RealD pvmass=1.0;
RealD M5=1.8;
RealD b=1.5;
RealD c=0.5;
// Double versions
FermionAction DdwfPeriodic(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,Params);
FermionAction PVPeriodic (U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,pvmass,M5,b,c,Params);
FermionAction DdwfDirichlet(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,dmass,M5,b,c,ParamsDir);
double StoppingCondition = 1.0e-8;
double MaxCGIterations = 50000;
ConjugateGradient<LatticeFermion> CG(StoppingCondition,MaxCGIterations);
//////////////////// Two Flavour Determinant Ratio ///////////////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> Nf2(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2,U,FilterNone);
//////////////////// Two Flavour Determinant force test Even Odd ///////////////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> Nf2eo(PVPeriodic, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(Nf2eo,U,FilterNone);
//////////////////// Domain forces ////////////////////
int Width=4;
DDHMCFilter<WilsonImplD::Field> DDHMCFilter(Block4,Width);
//////////////////// Two flavour boundary det ////////////////////
TwoFlavourRatioPseudoFermionAction<FimplD> BdyNf2(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2,U,DDHMCFilter);
//////////////////// Two flavour eo boundary det ////////////////////
TwoFlavourEvenOddRatioPseudoFermionAction<FimplD> BdyNf2eo(DdwfDirichlet, DdwfPeriodic,CG,CG);
// ForceTest<GimplTypesR>(BdyNf2eo,U,DDHMCFilter);
//////////////////// One flavour boundary det ////////////////////
/*
RationalActionParams OFRp; // Up/down
int SP_iters = 3000;
OFRp.lo = 6.0e-5;
OFRp.hi = 90.0;
OFRp.inv_pow = 2;
OFRp.MaxIter = SP_iters; // get most shifts by 2000, stop sharing space
OFRp.action_tolerance= 1.0e-8;
OFRp.action_degree = 18;
OFRp.md_tolerance= 1.0e-5;
OFRp.md_degree = 14;
// OFRp.degree = 20; converges
// OFRp.degree = 16;
OFRp.precision= 80;
OFRp.BoundsCheckFreq=0;
*/
OneFlavourRationalParams OFRp; // Up/down
OFRp.lo = 4.0e-5;
OFRp.hi = 90.0;
OFRp.MaxIter = 60000;
OFRp.tolerance= 1.0e-9;
OFRp.mdtolerance= 1.0e-8;
OFRp.degree = 18;
OFRp.precision= 80;
OFRp.BoundsCheckFreq=0;
std::vector<RealD> ActionTolByPole({
1.0e-7,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
std::vector<RealD> MDTolByPole({
1.6e-5,5.0e-6,1.0e-6,3.0e-7, // soften convergence more more
// 1.0e-6,3.0e-7,1.0e-7,1.0e-7,
// 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
/*
std::vector<RealD> ActionTolByPole({
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
std::vector<RealD> MDTolByPole({
1.0e-5,5.0e-6,1.0e-6,1.0e-7, // soften convergence more more
// 3.0e-6,1.0e-6,1.0e-7,1.0e-7, // soften convergence more
// 1.0e-6,3.0e-7,1.0e-7,1.0e-7, // Orig sloppy
// 1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8,1.0e-8,1.0e-8,
1.0e-8,1.0e-8
});
*/
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> BdySqrt(DdwfDirichlet,DdwfPeriodic,OFRp);
BdySqrt.SetTolerances(ActionTolByPole,MDTolByPole);
ForceTest<GimplTypesR>(BdySqrt,U,DDHMCFilter);
Grid_finalize();
}

View File

@ -67,7 +67,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -80,8 +80,8 @@ int main (int argc, char** argv)
RealD mf = 0.01;
RealD mb = 1.0;
RealD M5 = 1.8;
DomainWallEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5);
DomainWallEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5);
DomainWallEOFAFermionD Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5);
DomainWallEOFAFermionD Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5);
OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12);
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, true);

View File

@ -47,7 +47,7 @@ int main (int argc, char ** argv)
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
@ -71,27 +71,15 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.2; //kills the diagonal term
RealD M5=1.8;
// const int nu = 3;
// std::vector<int> twists(Nd,0); // twists[nu] = 1;
// GparityDomainWallFermionR::ImplParams params; params.twists = twists;
// GparityDomainWallFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
// DomainWallFermionR Dw (U, Grid,RBGrid,mass,M5);
const int nu = 3;
const int nu = 0; //gparity direction
std::vector<int> twists(Nd,0);
twists[nu] = 1;
GparityDomainWallFermionR::ImplParams params;
twists[Nd-1] = 1; //antiperiodic in time
GparityDomainWallFermionD::ImplParams params;
params.twists = twists;
/*
params.boundary_phases[0] = 1.0;
params.boundary_phases[1] = 1.0;
params.boundary_phases[2] = 1.0;
params.boundary_phases[3] =- 1.0;
*/
GparityDomainWallFermionR Dw(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
GparityDomainWallFermionD Dw(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
Dw.M (phi,Mphi);

View File

@ -33,7 +33,7 @@ using namespace std;
using namespace Grid;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallEOFAFermionR FermionAction;
typedef GparityDomainWallEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
int main (int argc, char** argv)

170
tests/forces/Test_fthmc.cc Normal file
View File

@ -0,0 +1,170 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_fthmc.cc
Copyright (C) 2022
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/qcd/smearing/GaugeConfigurationMasked.h>
#include <Grid/qcd/smearing/JacobianAction.h>
using namespace std;
using namespace Grid;
template<class Gimpl>
void ForceTest(Action<LatticeGaugeField> &action,SmearedConfigurationMasked<PeriodicGimplR> & smU,MomentumFilterBase<LatticeGaugeField> &Filter)
{
LatticeGaugeField U = smU.get_U(false); // unsmeared config
GridBase *UGrid = U.Grid();
std::vector<int> seeds({1,2,3,5});
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
LatticeColourMatrix Pmu(UGrid);
LatticeGaugeField P(UGrid);
LatticeGaugeField UdSdU(UGrid);
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
std::cout << GridLogMessage << " Force test for "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
RealD eps=0.005;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Refresh "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
Gimpl::generate_momenta(P,sRNG,RNG4);
Filter.applyFilter(P);
action.refresh(smU,sRNG,RNG4);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S1 = action.S(smU);
Gimpl::update_field(P,U,eps);
smU.set_Field(U);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Derivative "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
action.deriv(smU,UdSdU);
UdSdU = Ta(UdSdU);
Filter.applyFilter(UdSdU);
DumpSliceNorm("Force",UdSdU,Nd-1);
Gimpl::update_field(P,U,eps);
smU.set_Field(U);
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout << GridLogMessage << " Action "<<action.action_name()<<std::endl;
std::cout << GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
RealD S2 = action.S(smU);
// Use the derivative
LatticeComplex dS(UGrid); dS = Zero();
for(int mu=0;mu<Nd;mu++){
auto UdSdUmu = PeekIndex<LorentzIndex>(UdSdU,mu);
Pmu= PeekIndex<LorentzIndex>(P,mu);
dS = dS - trace(Pmu*UdSdUmu)*eps*2.0*2.0;
}
ComplexD dSpred = sum(dS);
RealD diff = S2-S1-dSpred.real();
std::cout<< GridLogMessage << "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++"<<std::endl;
std::cout<< GridLogMessage << "S1 : "<< S1 <<std::endl;
std::cout<< GridLogMessage << "S2 : "<< S2 <<std::endl;
std::cout<< GridLogMessage << "dS : "<< S2-S1 <<std::endl;
std::cout<< GridLogMessage << "dSpred : "<< dSpred.real() <<std::endl;
std::cout<< GridLogMessage << "diff : "<< diff<<std::endl;
std::cout<< GridLogMessage << "*********************************************************"<<std::endl;
// assert(diff<1.0);
std::cout<< GridLogMessage << "Done" <<std::endl;
std::cout << GridLogMessage << "*********************************************************"<<std::endl;
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
std::cout << std::setprecision(14);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi_layout,shm);
const int Ls=12;
const int Nt = latt_size[3];
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
///////////////////// Gauge Field and Gauge Forces ////////////////////////////
LatticeGaugeField U(UGrid);
#if 0
FieldMetaData header;
std::string file("./ckpoint_lat.2000");
NerscIO::readConfiguration(U,header,file);
#else
std::vector<int> seeds({1,2,3,4,5,6,7,8});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds);
SU<Nc>::HotConfiguration(RNG4,U);
#endif
RealD beta=6.0;
WilsonGaugeActionR PlaqAction(beta);
IwasakiGaugeActionR RectAction(beta);
////////////////////////////////////////////////
// Plaquette only FTHMC smearer
////////////////////////////////////////////////
double rho = 0.1;
Smear_Stout<PeriodicGimplR> Smearer(rho);
SmearedConfigurationMasked<PeriodicGimplR> SmartConfig(UGrid,2*Nd,Smearer,true);
JacobianAction<PeriodicGimplR> Jacobian(&SmartConfig);
////////////////////////////////////////////////
// Run some tests
////////////////////////////////////////////////
MomentumFilterNone<LatticeGaugeField> FilterNone;
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(PlaqAction,SmartConfig,FilterNone);
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(RectAction,SmartConfig,FilterNone);
SmartConfig.set_Field(U);
ForceTest<GimplTypesR>(Jacobian,SmartConfig,FilterNone);
Grid_finalize();
}

View File

@ -56,7 +56,7 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
FermionField phi (FGrid); gaussian(RNG5,phi);
FermionField Mphi (FGrid);
FermionField MphiPrime (FGrid);
@ -71,10 +71,12 @@ int main (int argc, char ** argv)
RealD mass=0.01;
RealD M5=1.8;
const int nu = 3;
std::vector<int> twists(Nd,0); twists[nu] = 1;
GparityDomainWallFermionR::ImplParams params; params.twists = twists;
GparityDomainWallFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
const int nu = 1;
std::vector<int> twists(Nd,0);
twists[nu] = 1;
twists[3] = 1;
GparityDomainWallFermionD::ImplParams params; params.twists = twists;
GparityDomainWallFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,params);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -0,0 +1,446 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./forces/Test_gpdwf_force_1f_2f.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
//Here we test the G-parity action and force between the 1f (doubled-lattice) and 2f approaches
void copyConjGauge(LatticeGaugeFieldD &Umu_1f, const LatticeGaugeFieldD &Umu_2f, const int nu){
GridBase* UGrid_2f = Umu_2f.Grid();
GridBase* UGrid_1f = Umu_1f.Grid();
Replicate(Umu_2f,Umu_1f);
int L_2f = UGrid_2f->FullDimensions()[nu];
int L_1f = UGrid_1f->FullDimensions()[nu];
assert(L_1f == 2 * L_2f);
//Coordinate grid for reference
LatticeInteger xcoor_1f(UGrid_1f);
LatticeCoordinate(xcoor_1f,nu);
//Copy-conjugate the gauge field
//First C-shift the lattice by Lx/2
{
LatticeGaugeField Umu_shift = conjugate( Cshift(Umu_1f,nu,L_2f) );
Umu_1f = where( xcoor_1f >= Integer(L_2f), Umu_shift, Umu_1f );
//We use the in built APBC
//Make the gauge field antiperiodic in nu-direction
//decltype(PeekIndex<LorentzIndex>(Umu_1f,nu)) Unu(UGrid_1f);
//Unu = PeekIndex<LorentzIndex>(Umu_1f,nu);
//Unu = where(xcoor_1f == Integer(2*L_2f-1), -Unu, Unu);
//PokeIndex<LorentzIndex>(Umu_1f,Unu,nu);
}
}
template<typename FermionField2f, typename FermionField1f>
void convertFermion1f_from_2f(FermionField1f &out_1f, const FermionField2f &in_2f, const int nu, bool is_4d){
GridBase* FGrid_1f = out_1f.Grid();
GridBase* FGrid_2f = in_2f.Grid();
int nuoff = is_4d ? 0 : 1; //s in 0 direction
Integer L_2f = FGrid_2f->FullDimensions()[nu+nuoff];
Integer L_1f = FGrid_1f->FullDimensions()[nu+nuoff];
assert(L_1f == 2 * L_2f);
auto in_f0_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,0); //flavor 0 on 2f Grid
FermionField1f in_f0_1fgrid(FGrid_1f);
Replicate(in_f0_2fgrid, in_f0_1fgrid); //has flavor 0 on both halves
auto in_f1_2fgrid = PeekIndex<GparityFlavourIndex>(in_2f,1); //flavor 1 on 2f Grid
FermionField1f in_f1_1fgrid(FGrid_1f);
Replicate(in_f1_2fgrid, in_f1_1fgrid); //has flavor 1 on both halves
LatticeInteger xcoor_1f(FGrid_1f);
LatticeCoordinate(xcoor_1f,nu+nuoff);
out_1f = where(xcoor_1f < L_2f, in_f0_1fgrid, in_f1_1fgrid);
}
template<typename GparityAction, typename StandardAction>
class RatioActionSetupBase{
protected:
TwoFlavourEvenOddRatioPseudoFermionAction<WilsonImplD> *pf_1f;
TwoFlavourEvenOddRatioPseudoFermionAction<GparityWilsonImplD> *pf_2f;
GparityAction* action_2f;
GparityAction* action_PV_2f;
StandardAction* action_1f;
StandardAction* action_PV_1f;
ConjugateGradient<typename StandardAction::FermionField> CG_1f;
ConjugateGradient<typename GparityAction::FermionField> CG_2f;
RatioActionSetupBase(): CG_1f(1.0e-8,10000), CG_2f(1.0e-8,10000){}
void setupPseudofermion(){
pf_1f = new TwoFlavourEvenOddRatioPseudoFermionAction<WilsonImplD>(*action_PV_1f, *action_1f, CG_1f, CG_1f);
pf_2f = new TwoFlavourEvenOddRatioPseudoFermionAction<GparityWilsonImplD>(*action_PV_2f, *action_2f, CG_2f, CG_2f);
}
public:
GparityAction & action2f(){ return *action_2f; }
StandardAction & action1f(){ return *action_1f; }
void refreshAction(LatticeGaugeField &Umu_2f, typename GparityAction::FermionField &eta_2f,
LatticeGaugeField &Umu_1f, typename StandardAction::FermionField &eta_1f){
pf_1f->refresh(Umu_1f, eta_1f);
pf_2f->refresh(Umu_2f, eta_2f);
//Compare PhiOdd
RealD norm_1f = norm2(pf_1f->getPhiOdd());
RealD norm_2f = norm2(pf_2f->getPhiOdd());
std::cout << "Test PhiOdd 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
}
void computeAction(RealD &S_2f, RealD &S_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
S_1f = pf_1f->S(Umu_1f);
S_2f = pf_2f->S(Umu_2f);
}
void computeDeriv(LatticeGaugeField &deriv_2f, LatticeGaugeField &deriv_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
pf_1f->deriv(Umu_1f, deriv_1f);
pf_2f->deriv(Umu_2f, deriv_2f);
}
};
template<typename GparityAction, typename StandardAction>
struct setupAction{};
template<>
struct setupAction<GparityWilsonTMFermionD, WilsonTMFermionD>: public RatioActionSetupBase<GparityWilsonTMFermionD, WilsonTMFermionD>{
typedef GparityWilsonTMFermionD GparityAction;
typedef WilsonTMFermionD StandardAction;
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): RatioActionSetupBase(){
RealD mass=-1.8;
//Use same DSDR twists as https://arxiv.org/pdf/1208.4412.pdf
RealD epsilon_f = 0.02; //numerator (in determinant)
RealD epsilon_b = 0.5;
std::vector<int> twists(Nd,0);
twists[nu] = 1; //GPBC in y
twists[3] = 1; //APBC
GparityAction::ImplParams params_2f; params_2f.twists = twists;
action_2f = new GparityWilsonTMFermionD(Umu_2f,*UGrid_2f,*UrbGrid_2f, mass, epsilon_f, params_2f);
action_PV_2f = new GparityWilsonTMFermionD(Umu_2f,*UGrid_2f,*UrbGrid_2f, mass, epsilon_b, params_2f);
DomainWallFermionD::ImplParams params_1f;
params_1f.boundary_phases[nu] = -1;
params_1f.boundary_phases[3] = -1;
action_1f = new WilsonTMFermionD(Umu_1f,*UGrid_1f,*UrbGrid_1f, mass, epsilon_f, params_1f);
action_PV_1f = new WilsonTMFermionD(Umu_1f,*UGrid_1f,*UrbGrid_1f, mass, epsilon_b, params_1f);
setupPseudofermion();
}
static bool is4d(){ return true; }
};
template<>
struct setupAction<GparityDomainWallFermionD, DomainWallFermionD>: public RatioActionSetupBase<GparityDomainWallFermionD, DomainWallFermionD>{
typedef GparityDomainWallFermionD GparityAction;
typedef DomainWallFermionD StandardAction;
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): RatioActionSetupBase(){
RealD mass=0.01;
RealD M5=1.8;
std::vector<int> twists(Nd,0);
twists[nu] = 1; //GPBC in y
twists[3] = 1; //APBC
GparityDomainWallFermionD::ImplParams params_2f; params_2f.twists = twists;
action_2f = new GparityDomainWallFermionD(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f,mass,M5,params_2f);
action_PV_2f = new GparityDomainWallFermionD(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f,1.0,M5,params_2f);
DomainWallFermionD::ImplParams params_1f;
params_1f.boundary_phases[nu] = -1;
params_1f.boundary_phases[3] = -1;
action_1f = new DomainWallFermionD(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,mass,M5,params_1f);
action_PV_1f = new DomainWallFermionD(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f,1.0,M5,params_1f);
setupPseudofermion();
}
static bool is4d(){ return false; }
};
//For EOFA we need a different pseudofermion type
template<>
struct setupAction<GparityDomainWallEOFAFermionD, DomainWallEOFAFermionD>{
typedef GparityDomainWallEOFAFermionD GparityAction;
typedef DomainWallEOFAFermionD StandardAction;
ExactOneFlavourRatioPseudoFermionAction<WilsonImplD> *pf_1f;
ExactOneFlavourRatioPseudoFermionAction<GparityWilsonImplD> *pf_2f;
GparityAction* action_2f;
GparityAction* action_PV_2f;
StandardAction* action_1f;
StandardAction* action_PV_1f;
ConjugateGradient<typename StandardAction::FermionField> CG_1f;
ConjugateGradient<typename GparityAction::FermionField> CG_2f;
public:
GparityAction & action2f(){ return *action_2f; }
StandardAction & action1f(){ return *action_1f; }
void refreshAction(LatticeGaugeField &Umu_2f, typename GparityAction::FermionField &eta_2f,
LatticeGaugeField &Umu_1f, typename StandardAction::FermionField &eta_1f){
pf_1f->refresh(Umu_1f, eta_1f);
pf_2f->refresh(Umu_2f, eta_2f);
//Compare PhiOdd
RealD norm_1f = norm2(pf_1f->getPhi());
RealD norm_2f = norm2(pf_2f->getPhi());
std::cout << "Test Phi 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
}
void computeAction(RealD &S_2f, RealD &S_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
S_1f = pf_1f->S(Umu_1f);
S_2f = pf_2f->S(Umu_2f);
}
void computeDeriv(LatticeGaugeField &deriv_2f, LatticeGaugeField &deriv_1f, LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f){
pf_1f->deriv(Umu_1f, deriv_1f);
pf_2f->deriv(Umu_2f, deriv_2f);
}
setupAction(GridCartesian* UGrid_2f, GridRedBlackCartesian* UrbGrid_2f, GridCartesian* FGrid_2f, GridRedBlackCartesian* FrbGrid_2f,
GridCartesian* UGrid_1f, GridRedBlackCartesian* UrbGrid_1f, GridCartesian* FGrid_1f, GridRedBlackCartesian* FrbGrid_1f,
LatticeGaugeField &Umu_2f, LatticeGaugeField &Umu_1f, int nu): CG_1f(1.0e-8,10000), CG_2f(1.0e-8,10000){
RealD mass=0.01;
RealD M5=1.8;
std::vector<int> twists(Nd,0);
twists[nu] = 1; //GPBC in y
twists[3] = 1; //APBC
GparityAction::ImplParams params_2f; params_2f.twists = twists;
action_2f = new GparityAction(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f, mass, mass, 1.0, 0.0, -1, M5, params_2f);
action_PV_2f = new GparityAction(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f,*UrbGrid_2f, 1.0, mass, 1.0, -1.0, 1, M5, params_2f); //cf Test_dwf_gpforce_eofa.cc
StandardAction::ImplParams params_1f;
params_1f.boundary_phases[nu] = -1;
params_1f.boundary_phases[3] = -1;
action_1f = new StandardAction(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f, mass, mass, 1.0, 0.0, -1, M5, params_1f);
action_PV_1f = new StandardAction(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f,*UrbGrid_1f, 1.0, mass, 1.0, -1.0, 1, M5, params_1f);
OneFlavourRationalParams RationalParams(0.95, 100.0, 5000, 1.0e-12, 12);
pf_1f = new ExactOneFlavourRatioPseudoFermionAction<WilsonImplD>(*action_1f, *action_PV_1f, CG_1f, CG_1f, CG_1f, CG_1f, CG_1f, RationalParams, true);
pf_2f = new ExactOneFlavourRatioPseudoFermionAction<GparityWilsonImplD>(*action_2f, *action_PV_2f, CG_2f, CG_2f, CG_2f, CG_2f, CG_2f, RationalParams, true);
}
static bool is4d(){ return false; }
};
template<typename GparityAction, typename StandardAction>
void runTest(int argc, char** argv){
Grid_init(&argc,&argv);
const int nu = 1;
Coordinate latt_2f = GridDefaultLatt();
Coordinate latt_1f = latt_2f;
latt_1f[nu] *= 2;
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
const int Ls=8;
GridCartesian * UGrid_1f = SpaceTimeGrid::makeFourDimGrid(latt_1f, simd_layout, mpi_layout);
GridRedBlackCartesian * UrbGrid_1f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_1f);
GridCartesian * FGrid_1f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_1f);
GridRedBlackCartesian * FrbGrid_1f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_1f);
GridCartesian * UGrid_2f = SpaceTimeGrid::makeFourDimGrid(latt_2f, simd_layout, mpi_layout);
GridRedBlackCartesian * UrbGrid_2f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_2f);
GridCartesian * FGrid_2f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_2f);
GridRedBlackCartesian * FrbGrid_2f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_2f);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5_2f(FGrid_2f); RNG5_2f.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4_2f(UGrid_2f); RNG4_2f.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu_2f(UGrid_2f);
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
LatticeGaugeField Umu_1f(UGrid_1f);
copyConjGauge(Umu_1f, Umu_2f, nu);
typedef typename GparityAction::FermionField GparityFermionField;
typedef typename StandardAction::FermionField StandardFermionField;
setupAction<GparityAction, StandardAction> setup(UGrid_2f, UrbGrid_2f, FGrid_2f, FrbGrid_2f,
UGrid_1f, UrbGrid_1f, FGrid_1f, FrbGrid_1f,
Umu_2f, Umu_1f, nu);
GridBase* FGrid_2f_a = setup.action2f().FermionGrid();
GridBase* FGrid_1f_a = setup.action1f().FermionGrid();
GridBase* FrbGrid_2f_a = setup.action2f().FermionRedBlackGrid();
GridBase* FrbGrid_1f_a = setup.action1f().FermionRedBlackGrid();
bool is_4d = setup.is4d();
//Check components by doing an inversion
{
setup.action2f().ImportGauge(Umu_2f);
setup.action1f().ImportGauge(Umu_1f);
GparityFermionField src_2f(FGrid_2f_a);
gaussian(is_4d ? RNG4_2f : RNG5_2f, src_2f);
StandardFermionField src_1f(FGrid_1f_a);
convertFermion1f_from_2f(src_1f, src_2f, nu, is_4d);
StandardFermionField src_o_1f(FrbGrid_1f_a);
StandardFermionField result_o_1f(FrbGrid_1f_a);
pickCheckerboard(Odd,src_o_1f,src_1f);
result_o_1f=Zero();
SchurDiagMooeeOperator<StandardAction,StandardFermionField> HermOpEO_1f(setup.action1f());
ConjugateGradient<StandardFermionField> CG_1f(1.0e-8,10000);
CG_1f(HermOpEO_1f,src_o_1f,result_o_1f);
GparityFermionField src_o_2f(FrbGrid_2f_a);
GparityFermionField result_o_2f(FrbGrid_2f_a);
pickCheckerboard(Odd,src_o_2f,src_2f);
result_o_2f=Zero();
SchurDiagMooeeOperator<GparityAction,GparityFermionField> HermOpEO_2f(setup.action2f());
ConjugateGradient<GparityFermionField> CG_2f(1.0e-8,10000);
CG_2f(HermOpEO_2f,src_o_2f,result_o_2f);
RealD norm_1f = norm2(result_o_1f);
RealD norm_2f = norm2(result_o_2f);
std::cout << "Test fermion inversion 2f: " << norm_2f << " 1f: " << norm_1f << std::endl;
}
//Generate eta
RealD scale = std::sqrt(0.5);
GparityFermionField eta_2f(FGrid_2f_a);
gaussian(is_4d ? RNG4_2f : RNG5_2f,eta_2f); eta_2f = eta_2f * scale;
StandardFermionField eta_1f(FGrid_1f_a);
convertFermion1f_from_2f(eta_1f, eta_2f, nu, is_4d);
setup.refreshAction(Umu_2f, eta_2f, Umu_1f, eta_1f);
//Initial action is just |eta^2|
RealD S_1f, S_2f;
setup.computeAction(S_2f, S_1f, Umu_2f, Umu_1f);
std::cout << "Test Initial action 2f: " << S_2f << " 1f: " << S_1f << " diff: " << S_2f - S_1f << std::endl;
//Do a random gauge field refresh
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
copyConjGauge(Umu_1f, Umu_2f, nu);
//Compute the action again
setup.computeAction(S_2f, S_1f, Umu_2f, Umu_1f);
std::cout << "Test Action after gauge field randomize 2f: " << S_2f << " 1f: " << S_1f << " diff: " << S_2f - S_1f << std::endl;
//Compute the derivative and test the conjugate relation
LatticeGaugeField deriv_2f(UGrid_2f);
LatticeGaugeField deriv_1f(UGrid_1f);
setup.computeDeriv(deriv_2f, deriv_1f, Umu_2f, Umu_1f);
//Have to combine the two forces on the 1f by symmetrizing under the complex conjugate
{
RealD norm2_pre = norm2(deriv_1f);
LatticeGaugeField deriv_1f_shift = conjugate( Cshift(deriv_1f, nu, latt_2f[nu]) );
deriv_1f = deriv_1f + deriv_1f_shift;
std::cout << "Test combine/symmetrize forces on 1f lattice, dS/dU : " << norm2_pre << " -> " << norm2(deriv_1f) << std::endl;
}
LatticeGaugeField deriv_1f_from_2f(UGrid_1f);
copyConjGauge(deriv_1f_from_2f, deriv_2f, nu);
std::cout << "Test copy-conj 2f dS/dU to obtain equivalent 1f force : " << norm2(deriv_2f) << " -> " << norm2(deriv_1f_from_2f) << std::endl;
LatticeGaugeField diff_deriv_1f = deriv_1f - deriv_1f_from_2f;
std::cout << "Test dS/dU 1f constructed from 2f derivative: " << norm2(deriv_1f_from_2f) << " dS/dU 1f actual: " << norm2(deriv_1f) << " Norm of difference: " << norm2(diff_deriv_1f) << std::endl;
std::cout<< GridLogMessage << "Done" <<std::endl;
Grid_finalize();
}
int main (int argc, char ** argv)
{
std::string action = "DWF";
for(int i=1;i<argc;i++){
if(std::string(argv[i]) == "--action"){
action = argv[i+1];
}
}
if(action == "DWF"){
runTest<GparityDomainWallFermionD, DomainWallFermionD>(argc, argv);
}else if(action == "EOFA"){
runTest<GparityDomainWallEOFAFermionD, DomainWallEOFAFermionD>(argc, argv);
}else if(action == "DSDR"){
runTest<GparityWilsonTMFermionD, WilsonTMFermionD>(argc,argv);
}else{
assert(0);
}
}

View File

@ -50,7 +50,7 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads();
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename GparityDomainWallFermionD::FermionField FermionField;
FermionField phi (FGrid); gaussian(RNG4,phi);
FermionField Mphi (FGrid);
FermionField MphiPrime (FGrid);
@ -64,10 +64,14 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
const int nu = 3;
std::vector<int> twists(Nd,0); twists[nu] = 1;
GparityWilsonFermionR::ImplParams params; params.twists = twists;
GparityWilsonFermionR Wil(U,*UGrid,*UrbGrid,mass,params);
const int nu = 1;
const int Lnu=latt_size[nu];
std::vector<int> twists(Nd,0);
twists[nu] = 1;
twists[3]=1;
GparityWilsonFermionD::ImplParams params; params.twists = twists;
GparityWilsonFermionD Wil(U,*UGrid,*UrbGrid,mass,params);
Wil.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -76,7 +76,7 @@ int main (int argc, char ** argv)
p.boundary_phases[2] = 1.0;
p.boundary_phases[3] =- 1.0;
MobiusFermionR Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,p);
MobiusFermionD Ddwf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,p);
Ddwf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -82,14 +82,56 @@ int main (int argc, char** argv)
RealD mf = 0.01;
RealD mb = 1.0;
RealD M5 = 1.8;
MobiusEOFAFermionR Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Lop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mf, mf, mb, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Rop(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mb, mf, mb, -1.0, 1, M5, b, c);
OneFlavourRationalParams Params(0.95, 100.0, 5000, 1.0e-12, 12);
ConjugateGradient<LatticeFermion> CG(1.0e-12, 5000);
ExactOneFlavourRatioPseudoFermionAction<WilsonImplR> Meofa(Lop, Rop, CG, CG, CG, CG, CG, Params, false);
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds4);
//Check the rational approximation
{
RealD scale = std::sqrt(0.5);
LatticeFermion eta (Lop.FermionGrid());
gaussian(RNG5,eta); eta = eta * scale;
Meofa.refresh(U, eta);
//Phi = M^{-1/2} eta
//M is Hermitian
//(Phi, M Phi) = eta^\dagger M^{-1/2} M M^{-1/2} eta = eta^\dagger eta
LatticeFermion phi = Meofa.getPhi();
LatticeFermion Mphi(FGrid);
Meofa.Meofa(U, phi, Mphi);
std::cout << "Computing inner product" << std::endl;
ComplexD inner = innerProduct(phi, Mphi);
ComplexD test = inner - norm2(eta);
std::cout << "(phi, Mphi) - (eta,eta): " << test << " expect 0" << std::endl;
assert(test.real() < 1e-8);
assert(test.imag() < 1e-8);
//Another test is to use heatbath twice to apply M^{-1/2} to Phi then apply M
// M Phi'
//= M M^{-1/2} Phi
//= M M^{-1/2} M^{-1/2} eta
//= eta
Meofa.refresh(U, phi);
LatticeFermion phi2 = Meofa.getPhi();
LatticeFermion test2(FGrid);
Meofa.Meofa(U, phi2, test2);
test2 = test2 - eta;
RealD test2_norm = norm2(test2);
std::cout << "|M M^{-1/2} M^{-1/2} eta - eta|^2 = " << test2_norm << " expect 0" << std::endl;
assert( test2_norm < 1e-8 );
}
Meofa.refresh(U, sRNG, RNG5 );
RealD S = Meofa.S(U); // pdag M p
// get the deriv of phidag M phi with respect to "U"

View File

@ -0,0 +1,233 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/forces/Test_mobius_gparity_eofa_mixed.cc
Copyright (C) 2017
Author: Christopher Kelly <ckelly@bnl.gov>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: David Murphy <dmurphy@phys.columbia.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace std;
using namespace Grid;
;
typedef GparityWilsonImplD FermionImplPolicyD;
typedef GparityMobiusEOFAFermionD FermionActionD;
typedef typename FermionActionD::FermionField FermionFieldD;
typedef GparityWilsonImplF FermionImplPolicyF;
typedef GparityMobiusEOFAFermionF FermionActionF;
typedef typename FermionActionF::FermionField FermionFieldF;
NAMESPACE_BEGIN(Grid);
template<class FermionOperatorD, class FermionOperatorF, class SchurOperatorD, class SchurOperatorF>
class MixedPrecisionConjugateGradientOperatorFunction : public OperatorFunction<typename FermionOperatorD::FermionField> {
public:
typedef typename FermionOperatorD::FermionField FieldD;
typedef typename FermionOperatorF::FermionField FieldF;
using OperatorFunction<FieldD>::operator();
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;
Integer MaxOuterIterations;
GridBase* SinglePrecGrid4; //Grid for single-precision fields
GridBase* SinglePrecGrid5; //Grid for single-precision fields
RealD OuterLoopNormMult; //Stop the outer loop and move to a final double prec solve when the residual is OuterLoopNormMult * Tolerance
FermionOperatorF &FermOpF;
FermionOperatorD &FermOpD;;
SchurOperatorF &LinOpF;
SchurOperatorD &LinOpD;
Integer TotalInnerIterations; //Number of inner CG iterations
Integer TotalOuterIterations; //Number of restarts
Integer TotalFinalStepIterations; //Number of CG iterations in final patch-up step
MixedPrecisionConjugateGradientOperatorFunction(RealD tol,
Integer maxinnerit,
Integer maxouterit,
GridBase* _sp_grid4,
GridBase* _sp_grid5,
FermionOperatorF &_FermOpF,
FermionOperatorD &_FermOpD,
SchurOperatorF &_LinOpF,
SchurOperatorD &_LinOpD):
LinOpF(_LinOpF),
LinOpD(_LinOpD),
FermOpF(_FermOpF),
FermOpD(_FermOpD),
Tolerance(tol),
InnerTolerance(tol),
MaxInnerIterations(maxinnerit),
MaxOuterIterations(maxouterit),
SinglePrecGrid4(_sp_grid4),
SinglePrecGrid5(_sp_grid5),
OuterLoopNormMult(100.)
{
};
void operator()(LinearOperatorBase<FieldD> &LinOpU, const FieldD &src, FieldD &psi) {
std::cout << GridLogMessage << " Mixed precision CG wrapper operator() "<<std::endl;
SchurOperatorD * SchurOpU = static_cast<SchurOperatorD *>(&LinOpU);
assert(&(SchurOpU->_Mat)==&(LinOpD._Mat));
precisionChange(FermOpF.Umu, FermOpD.Umu);
pickCheckerboard(Even,FermOpF.UmuEven,FermOpF.Umu);
pickCheckerboard(Odd ,FermOpF.UmuOdd ,FermOpF.Umu);
////////////////////////////////////////////////////////////////////////////////////
// Make a mixed precision conjugate gradient
////////////////////////////////////////////////////////////////////////////////////
MixedPrecisionConjugateGradient<FieldD,FieldF> MPCG(Tolerance,MaxInnerIterations,MaxOuterIterations,SinglePrecGrid5,LinOpF,LinOpD);
MPCG.InnerTolerance = InnerTolerance;
std::cout << GridLogMessage << "Calling mixed precision Conjugate Gradient" <<std::endl;
MPCG(src,psi);
}
};
NAMESPACE_END(Grid);
int main (int argc, char** argv)
{
Grid_init(&argc, &argv);
Coordinate latt_size = GridDefaultLatt();
Coordinate mpi_layout = GridDefaultMpi();
const int Ls = 8;
GridCartesian *UGridD = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexD::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian *UrbGridD = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridD);
GridCartesian *FGridD = SpaceTimeGrid::makeFiveDimGrid(Ls, UGridD);
GridRedBlackCartesian *FrbGridD = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGridD);
GridCartesian *UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian *UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian *FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls, UGridF);
GridRedBlackCartesian *FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGridF);
std::vector<int> seeds4({1,2,3,5});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGridD); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGridD); RNG4.SeedFixedIntegers(seeds4);
int threads = GridThread::GetThreads();
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
LatticeGaugeFieldD Ud(UGridD);
SU<Nc>::HotConfiguration(RNG4,Ud);
LatticeGaugeFieldF Uf(UGridF);
precisionChange(Uf, Ud);
RealD b = 2.5;
RealD c = 1.5;
RealD mf = 0.01;
RealD mb = 1.0;
RealD M5 = 1.8;
FermionActionD::ImplParams params;
params.twists[0] = 1; //GPBC in X
params.twists[Nd-1] = 1; //APRD in T
std::vector<int> gtwists(4,0);
gtwists[0] = 1;
ConjugateGimplD::setDirections(gtwists);
FermionActionD LopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, mf, mf, mb, 0.0, -1, M5, b, c, params);
FermionActionD RopD(Ud, *FGridD, *FrbGridD, *UGridD, *UrbGridD, mb, mf, mb, -1.0, 1, M5, b, c, params);
FermionActionF LopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, mf, mf, mb, 0.0, -1, M5, b, c, params);
FermionActionF RopF(Uf, *FGridF, *FrbGridF, *UGridF, *UrbGridF, mb, mf, mb, -1.0, 1, M5, b, c, params);
OneFlavourRationalParams OFRp(0.95, 100.0, 5000, 1.0e-12, 12);
ConjugateGradient<FermionFieldD> CG(1.0e-10, 10000);
typedef SchurDiagMooeeOperator<FermionActionD,FermionFieldD> EOFAschuropD;
typedef SchurDiagMooeeOperator<FermionActionF,FermionFieldF> EOFAschuropF;
EOFAschuropD linopL_D(LopD);
EOFAschuropD linopR_D(RopD);
EOFAschuropF linopL_F(LopF);
EOFAschuropF linopR_F(RopF);
typedef MixedPrecisionConjugateGradientOperatorFunction<FermionActionD, FermionActionF, EOFAschuropD, EOFAschuropF> EOFA_mxCG;
EOFA_mxCG MCG_L(1e-10, 10000, 1000, UGridF, FrbGridF, LopF, LopD, linopL_F, linopL_D);
MCG_L.InnerTolerance = 1e-5;
EOFA_mxCG MCG_R(1e-10, 10000, 1000, UGridF, FrbGridF, RopF, RopD, linopR_F, linopR_D);
MCG_R.InnerTolerance = 1e-5;
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicyD> MeofaD(LopD, RopD, CG, CG, CG, CG, CG, OFRp, true);
ExactOneFlavourRatioMixedPrecHeatbathPseudoFermionAction<FermionImplPolicyD, FermionImplPolicyF> MeofaMx(LopF, RopF, LopD, RopD, MCG_L, MCG_R, MCG_L, MCG_R, MCG_L, MCG_R, OFRp, true);
FermionFieldD eta(FGridD);
gaussian(RNG5, eta);
MeofaD.refresh(Ud, eta);
MeofaMx.refresh(Ud, eta);
FermionFieldD diff_phi(FGridD);
diff_phi = MeofaD.getPhi() - MeofaMx.getPhi();
RealD n = norm2(diff_phi);
std::cout << GridLogMessage << "Phi(double)=" << norm2(MeofaD.getPhi()) << " Phi(mixed)=" << norm2(MeofaMx.getPhi()) << " diff=" << n << std::endl;
assert(n < 1e-8);
RealD Sd = MeofaD.S(Ud);
RealD Smx = MeofaMx.S(Ud);
std::cout << GridLogMessage << "Initial action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
assert(fabs(Sd-Smx) < 1e-6);
SU<Nc>::HotConfiguration(RNG4,Ud);
precisionChange(Uf, Ud);
Sd = MeofaD.S(Ud);
Smx = MeofaMx.S(Ud);
std::cout << GridLogMessage << "After randomizing U, action double=" << Sd << " mixed=" << Smx << " diff=" << Sd-Smx << std::endl;
assert(fabs(Sd-Smx) < 1e-6);
std::cout << GridLogMessage << "Done" << std::endl;
Grid_finalize();
}

View File

@ -34,7 +34,7 @@ using namespace Grid;
;
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityMobiusEOFAFermionR FermionAction;
typedef GparityMobiusEOFAFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
int main (int argc, char** argv)

View File

@ -69,7 +69,7 @@ int main (int argc, char ** argv)
////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
OverlapWilsonPartialFractionTanhFermionR Dpf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonPartialFractionTanhFermionD Dpf(U,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
Dpf.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -57,7 +57,6 @@ int main (int argc, char ** argv)
SU<Nc>::HotConfiguration(pRNG,U);
double beta = 1.0;
double c1 = -0.331;
IwasakiGaugeActionR Action(beta);
// PlaqPlusRectangleActionR Action(beta,c1);

View File

@ -67,7 +67,7 @@ int main (int argc, char ** argv)
// Unmodified matrix element
////////////////////////////////////
RealD mass=-4.0; //kills the diagonal term
WilsonFermionR Dw (U, Grid,RBGrid,mass);
WilsonFermionD Dw (U, Grid,RBGrid,mass);
Dw.M (phi,Mphi);
ComplexD S = innerProduct(Mphi,Mphi); // pdag MdagM p

View File

@ -70,7 +70,7 @@ int main(int argc, char **argv)
////////////////////////////////////
RealD mass = 0.1;
Real csw = 1.0;
WilsonCloverFermionR Dw(U, Grid, RBGrid, mass, csw, csw);
WilsonCloverFermionD Dw(U, Grid, RBGrid, mass, csw, csw);
Dw.ImportGauge(U);
Dw.M(phi, Mphi);
ComplexD S = innerProduct(Mphi, Mphi); // Action : pdag MdagM p

View File

@ -81,7 +81,7 @@ int main (int argc, char ** argv)
omegas.push_back( std::complex<double>(0.0686324988446592,0.0550658530827402) );
omegas.push_back( std::complex<double>(0.0686324988446592,-0.0550658530827402) );
ZMobiusFermionR Ddwf(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,b,c);
ZMobiusFermionD Ddwf(U, *FGrid, *FrbGrid, *UGrid, *UrbGrid, mass, M5, omegas,b,c);
Ddwf.M (phi,Mphi);

View File

@ -0,0 +1,257 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: tests/hmc/Test_action_dwf_gparity2fvs1f.cc
Copyright (C) 2015
Author: Christopher Kelly <ckelly@bnl.gov>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
using namespace Grid;
template<typename FermionField2f, typename FermionField1f>
void copy2fTo1fFermionField(FermionField1f &out, const FermionField2f &in, int gpdir){
auto f0_halfgrid = PeekIndex<GparityFlavourIndex>(in,0); //on 2f Grid
FermionField1f f0_fullgrid_dbl(out.Grid());
Replicate(f0_halfgrid, f0_fullgrid_dbl); //double it up to live on the 1f Grid
auto f1_halfgrid = PeekIndex<GparityFlavourIndex>(in,1);
FermionField1f f1_fullgrid_dbl(out.Grid());
Replicate(f1_halfgrid, f1_fullgrid_dbl);
const Coordinate &dim_2f = in.Grid()->GlobalDimensions();
const Coordinate &dim_1f = out.Grid()->GlobalDimensions();
//We have to be careful for 5d fields; the s-direction is placed before the x,y,z,t and so we need to shift gpdir by 1
std::cout << "gpdir " << gpdir << std::endl;
gpdir+=1;
std::cout << "gpdir for 5D fields " << gpdir << std::endl;
std::cout << "dim_2f " << dim_2f << std::endl;
std::cout << "dim_1f " << dim_1f << std::endl;
assert(dim_1f[gpdir] == 2*dim_2f[gpdir]);
LatticeInteger xcoor_1f(out.Grid()); //5d lattice integer
LatticeCoordinate(xcoor_1f,gpdir);
Integer L = dim_2f[gpdir];
out = where(xcoor_1f < L, f0_fullgrid_dbl, f1_fullgrid_dbl);
}
//Both have the same field type
void copy2fTo1fGaugeField(LatticeGaugeField &out, const LatticeGaugeField &in, int gpdir){
LatticeGaugeField U_dbl(out.Grid());
Replicate(in, U_dbl);
LatticeGaugeField Uconj_dbl = conjugate( U_dbl );
const Coordinate &dim_2f = in.Grid()->GlobalDimensions();
LatticeInteger xcoor_1f(out.Grid());
LatticeCoordinate(xcoor_1f,gpdir);
Integer L = dim_2f[gpdir];
out = where(xcoor_1f < L, U_dbl, Uconj_dbl);
}
std::ostream & operator<<(std::ostream &os, const Coordinate &x){
os << "(";
for(int i=0;i<x.size();i++) os << x[i] << (i<x.size()-1 ? " " : "");
os << ")";
return os;
}
int main(int argc, char **argv) {
using namespace Grid;
Grid_init(&argc, &argv);
int threads = GridThread::GetThreads();
std::cout << GridLogMessage << "Grid is setup to use " << threads << " threads" << std::endl;
int Ls = 16;
Coordinate latt_2f = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(Nd, vComplexD::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
int mu = 0; //Gparity direction
Coordinate latt_1f = latt_2f;
latt_1f[mu] *= 2;
GridCartesian * UGrid_1f = SpaceTimeGrid::makeFourDimGrid(latt_1f, simd_layout, mpi_layout);
GridRedBlackCartesian * UrbGrid_1f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_1f);
GridCartesian * FGrid_1f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_1f);
GridRedBlackCartesian * FrbGrid_1f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_1f);
GridCartesian * UGrid_2f = SpaceTimeGrid::makeFourDimGrid(latt_2f, simd_layout, mpi_layout);
GridRedBlackCartesian * UrbGrid_2f = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid_2f);
GridCartesian * FGrid_2f = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid_2f);
GridRedBlackCartesian * FrbGrid_2f = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid_2f);
std::cout << "SIMD layout " << simd_layout << std::endl;
std::cout << "MPI layout " << mpi_layout << std::endl;
std::cout << "2f dimensions " << latt_2f << std::endl;
std::cout << "1f dimensions " << latt_1f << std::endl;
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5_2f(FGrid_2f); RNG5_2f.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4_2f(UGrid_2f); RNG4_2f.SeedFixedIntegers(seeds4);
std::cout << "Generating hot 2f gauge configuration" << std::endl;
LatticeGaugeField Umu_2f(UGrid_2f);
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
std::cout << "Copying 2f->1f gauge field" << std::endl;
LatticeGaugeField Umu_1f(UGrid_1f);
copy2fTo1fGaugeField(Umu_1f, Umu_2f, mu);
typedef GparityWilsonImplR FermionImplPolicy2f;
typedef GparityDomainWallFermionD FermionAction2f;
typedef typename FermionAction2f::FermionField FermionField2f;
typedef WilsonImplR FermionImplPolicy1f;
typedef DomainWallFermionD FermionAction1f;
typedef typename FermionAction1f::FermionField FermionField1f;
std::cout << "Generating eta 2f" << std::endl;
FermionField2f eta_2f(FGrid_2f);
gaussian(RNG5_2f, eta_2f);
RealD scale = std::sqrt(0.5);
eta_2f=eta_2f*scale;
std::cout << "Copying 2f->1f eta" << std::endl;
FermionField1f eta_1f(FGrid_1f);
copy2fTo1fFermionField(eta_1f, eta_2f, mu);
Real beta = 2.13;
Real light_mass = 0.01;
Real strange_mass = 0.032;
Real pv_mass = 1.0;
RealD M5 = 1.8;
//Setup the Dirac operators
std::cout << "Initializing Dirac operators" << std::endl;
FermionAction2f::ImplParams Params_2f;
Params_2f.twists[mu] = 1;
Params_2f.twists[Nd-1] = 1; //APBC in time direction
//note 'Num' and 'Den' here refer to the determinant ratio, not the operator ratio in the pseudofermion action where the two are inverted
//to my mind the Pauli Villars and 'denominator' are synonymous but the Grid convention has this as the 'Numerator' operator in the RHMC implementation
FermionAction2f NumOp_2f(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f, *UrbGrid_2f, light_mass,M5,Params_2f);
FermionAction2f DenOp_2f(Umu_2f,*FGrid_2f,*FrbGrid_2f,*UGrid_2f, *UrbGrid_2f, pv_mass, M5,Params_2f);
FermionAction1f::ImplParams Params_1f;
Params_1f.boundary_phases[mu] = -1; //antiperiodic in doubled lattice in GP direction
Params_1f.boundary_phases[Nd-1] = -1;
FermionAction1f NumOp_1f(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f, *UrbGrid_1f, light_mass,M5,Params_1f);
FermionAction1f DenOp_1f(Umu_1f,*FGrid_1f,*FrbGrid_1f,*UGrid_1f, *UrbGrid_1f, pv_mass, M5,Params_1f);
//Test the replication routines by running a CG on eta
double StoppingCondition = 1e-10;
double MaxCGIterations = 30000;
ConjugateGradient<FermionField2f> CG_2f(StoppingCondition,MaxCGIterations);
ConjugateGradient<FermionField1f> CG_1f(StoppingCondition,MaxCGIterations);
NumOp_1f.ImportGauge(Umu_1f);
NumOp_2f.ImportGauge(Umu_2f);
FermionField1f test_1f(FGrid_1f);
FermionField2f test_2f(FGrid_2f);
MdagMLinearOperator<FermionAction1f, FermionField1f> Linop_1f(NumOp_1f);
MdagMLinearOperator<FermionAction2f, FermionField2f> Linop_2f(NumOp_2f);
CG_1f(Linop_1f, eta_1f, test_1f);
CG_2f(Linop_2f, eta_2f, test_2f);
RealD test_1f_norm = norm2(test_1f);
RealD test_2f_norm = norm2(test_2f);
std::cout << "Verification of replication routines: " << test_1f_norm << " " << test_2f_norm << " " << test_1f_norm - test_2f_norm << std::endl;
#if 1
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy2f> Action2f;
typedef GeneralEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy1f> Action1f;
RationalActionParams rational_params;
rational_params.inv_pow = 2;
rational_params.lo = 1e-5;
rational_params.hi = 32;
rational_params.md_degree = 16;
rational_params.action_degree = 16;
Action2f action_2f(DenOp_2f, NumOp_2f, rational_params);
Action1f action_1f(DenOp_1f, NumOp_1f, rational_params);
#else
typedef TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy2f> Action2f;
typedef TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy1f> Action1f;
Action2f action_2f(DenOp_2f, NumOp_2f, CG_2f, CG_2f);
Action1f action_1f(DenOp_1f, NumOp_1f, CG_1f, CG_1f);
#endif
std::cout << "Action refresh" << std::endl;
action_2f.refresh(Umu_2f, eta_2f);
action_1f.refresh(Umu_1f, eta_1f);
std::cout << "Action compute post heatbath" << std::endl;
RealD S_2f = action_2f.S(Umu_2f);
RealD S_1f = action_1f.S(Umu_1f);
std::cout << "Action comparison post heatbath" << std::endl;
std::cout << S_2f << " " << S_1f << " " << S_2f-S_1f << std::endl;
//Change the gauge field between refresh and action eval else the matrix and inverse matrices all cancel and we just get |eta|^2
SU<Nc>::HotConfiguration(RNG4_2f,Umu_2f);
copy2fTo1fGaugeField(Umu_1f, Umu_2f, mu);
//Now compute the action with the new gauge field
std::cout << "Action compute post gauge field update" << std::endl;
S_2f = action_2f.S(Umu_2f);
S_1f = action_1f.S(Umu_1f);
std::cout << "Action comparison post gauge field update" << std::endl;
std::cout << S_2f << " " << S_1f << " " << S_2f-S_1f << std::endl;
Grid_finalize();
} // main

View File

@ -43,7 +43,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef DomainWallFermionR FermionAction;
typedef DomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
@ -136,16 +136,9 @@ int main(int argc, char **argv) {
TheHMC.ReadCommandLine(argc, argv); // these can be parameters from file
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
TheHMC.Run(); // no smearing
// TheHMC.Run(SmearingPolicy); // for smearing
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
@ -132,15 +132,9 @@ int main(int argc, char **argv) {
TheHMC.ReadCommandLine(argc, argv); // these can be parameters from file
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
TheHMC.Run(); // no smearing
// TheHMC.Run(SmearingPolicy); // for smearing
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();

View File

@ -83,7 +83,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Serialiser
typedef Grid::XmlReader Serialiser;
@ -211,8 +211,6 @@ int main(int argc, char **argv) {
*/
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
if (ApplySmearing){
SmearingParameters SmPar(Reader);
@ -225,11 +223,6 @@ int main(int argc, char **argv) {
TheHMC.Run(); // no smearing
}
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -89,7 +89,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
// Serialiser
typedef Grid::XmlReader Serialiser;
@ -226,8 +226,6 @@ int main(int argc, char **argv) {
*/
// Reset performance counters
NumOp.ZeroCounters();
DenOp.ZeroCounters();
if (ApplySmearing){
SmearingParameters SmPar(Reader);
@ -240,10 +238,6 @@ int main(int argc, char **argv) {
TheHMC.Run(); // no smearing
}
std::cout << GridLogMessage << "Numerator report, Pauli-Villars term : " << std::endl;
NumOp.Report();
std::cout << GridLogMessage << "Denominator report, Dw(m) term (includes CG) : " << std::endl;
DenOp.Report();
Grid_finalize();
} // main

View File

@ -39,7 +39,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -40,7 +40,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
@ -58,7 +58,7 @@ int main(int argc, char **argv) {
CheckpointerParameters CPparams;
CPparams.config_prefix = "ckpoint_EODWF_lat";
CPparams.rng_prefix = "ckpoint_EODWF_rng";
CPparams.saveInterval = 5;
CPparams.saveInterval = 1;
CPparams.format = "IEEE64BIG";
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
@ -79,7 +79,7 @@ int main(int argc, char **argv) {
// that have a complex construction
// standard
RealD beta = 2.6 ;
const int nu = 3;
const int nu = 1;
std::vector<int> twists(Nd,0);
twists[nu] = 1;
ConjugateGimplD::setDirections(twists);

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
typedef ConjugateHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef GparityWilsonImplR FermionImplPolicy;
typedef GparityDomainWallFermionR FermionAction;
typedef GparityDomainWallFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -39,7 +39,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef WilsonImplR FermionImplPolicy;
typedef MobiusFermionR FermionAction;
typedef MobiusFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
@ -148,14 +148,14 @@ int main(int argc, char **argv) {
// Level1.push_back(&StrangePseudoFermion);
// DJM: setup for EOFA ratio (Shamir)
// DomainWallEOFAFermionR Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5);
// DomainWallEOFAFermionR Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5);
// DomainWallEOFAFermionD Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5);
// DomainWallEOFAFermionD Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5);
// ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> EOFA(Strange_Op_L, Strange_Op_R, CG, OFRp, true);
// Level1.push_back(&EOFA);
// DJM: setup for EOFA ratio (Mobius)
MobiusEOFAFermionR Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionR Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
MobiusEOFAFermionD Strange_Op_L(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, strange_mass, strange_mass, pv_mass, 0.0, -1, M5, b, c);
MobiusEOFAFermionD Strange_Op_R(U, *FGrid, *FrbGrid, *GridPtr, *GridRBPtr, pv_mass, strange_mass, pv_mass, -1.0, 1, M5, b, c);
ExactOneFlavourRatioPseudoFermionAction<FermionImplPolicy> EOFA(Strange_Op_L, Strange_Op_R, CG, OFRp, true);
Level1.push_back(&EOFA);

View File

@ -34,7 +34,7 @@ class ScalarActionParameters : Serializable {
double, lambda,
double, g);
ScalarActionParameters() = default;
ScalarActionParameters() {};
template <class ReaderClass >
ScalarActionParameters(Reader<ReaderClass>& Reader){
@ -45,7 +45,6 @@ class ScalarActionParameters : Serializable {
}
using namespace Grid;
;
template <class Impl>
class MagMeas : public HmcObservable<typename Impl::Field> {
@ -132,8 +131,8 @@ int main(int argc, char **argv) {
// Checkpointer definition
CheckpointerParameters CPparams(Reader);
//TheHMC.Resources.LoadBinaryCheckpointer(CPparams);
TheHMC.Resources.LoadScidacCheckpointer(CPparams, SPar);
TheHMC.Resources.LoadBinaryCheckpointer(CPparams);
//TheHMC.Resources.LoadScidacCheckpointer(CPparams, SPar); this breaks for compilation without lime
RNGModuleParameters RNGpar(Reader);
TheHMC.Resources.SetRNGSeeds(RNGpar);

View File

@ -28,7 +28,7 @@ directory
/* END LEGAL */
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -80,7 +80,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonTwoIndexAntiSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverTwoIndexAntiSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverTwoIndexAntiSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//typedef Grid::JSONReader Serialiser;
typedef Grid::XmlReader Serialiser;
@ -210,4 +210,6 @@ int main(int argc, char **argv)
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -29,6 +29,7 @@ directory
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -81,7 +82,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonTwoIndexSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverTwoIndexSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverTwoIndexSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//typedef Grid::JSONReader Serialiser;
typedef Grid::XmlReader Serialiser;
@ -211,3 +212,6 @@ int main(int argc, char **argv)
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -79,7 +79,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;

View File

@ -32,6 +32,7 @@ directory
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -84,11 +85,11 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonImplR FundImplPolicy;
typedef WilsonCloverFermionR FundFermionAction;
typedef WilsonCloverFermionD FundFermionAction;
typedef typename FundFermionAction::FermionField FundFermionField;
typedef WilsonTwoIndexAntiSymmetricImplR ASymmImplPolicy;
typedef WilsonCloverTwoIndexAntiSymmetricFermionR ASymmFermionAction;
typedef WilsonCloverTwoIndexAntiSymmetricFermionD ASymmFermionAction;
typedef typename ASymmFermionAction::FermionField ASymmFermionField;
typedef Grid::XmlReader Serialiser;
@ -222,3 +223,6 @@ int main(int argc, char **argv) {
Grid_finalize();
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -29,6 +29,7 @@ directory
#include <Grid/Grid.h>
#ifdef ENABLE_FERMION_REPS
namespace Grid{
struct FermionParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(FermionParameters,
@ -81,7 +82,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonAdjImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonCloverAdjFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonCloverAdjFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
typedef Grid::XmlReader Serialiser;
@ -211,3 +212,6 @@ int main(int argc, char **argv)
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -74,10 +74,10 @@ int main(int argc, char **argv) {
// Checkpointer definition
CheckpointerParameters CPparams(Reader);
//TheHMC.Resources.LoadNerscCheckpointer(CPparams);
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
// Store metadata in the Scidac checkpointer
TheHMC.Resources.LoadScidacCheckpointer(CPparams, WilsonPar);
// Store metadata in the Scidac checkpointer - obviously breaks without LIME
//TheHMC.Resources.LoadScidacCheckpointer(CPparams, WilsonPar);
RNGModuleParameters RNGpar(Reader);
TheHMC.Resources.SetRNGSeeds(RNGpar);

View File

@ -31,9 +31,10 @@ directory
/* END LEGAL */
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
using namespace Grid;
;
// Here change the allowed (higher) representations
typedef Representations< FundamentalRepresentation, AdjointRepresentation > TheRepresentations;
@ -46,7 +47,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonAdjImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonAdjFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonAdjFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@ -127,3 +128,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv)
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonCloverFermionR FermionAction;
typedef WilsonCloverFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -69,8 +69,10 @@ int main(int argc, char **argv)
TopologyObsParameters TopParams;
TopParams.interval = 5;
TopParams.do_smearing = true;
TopParams.Smearing.steps = 200;
TopParams.Smearing.step_size = 0.01;
TopParams.Smearing.init_step_size = 0.01;
TopParams.Smearing.tolerance = 1e-5;
// TopParams.Smearing.steps = 200;
// TopParams.Smearing.step_size = 0.01;
TopParams.Smearing.meas_interval = 50;
TopParams.Smearing.maxTau = 2.0;
TheHMC.Resources.AddObservable<QObs>(TopParams);

View File

@ -33,6 +33,7 @@ directory
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
#ifndef GRID_CUDA
@ -51,9 +52,9 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonAdjImplR AdjImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonAdjFermionR AdjFermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonAdjFermionD AdjFermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonTwoIndexSymmetricImplR SymmImplPolicy;
typedef WilsonTwoIndexSymmetricFermionR SymmFermionAction;
typedef WilsonTwoIndexSymmetricFermionD SymmFermionAction;
typedef typename AdjFermionAction::FermionField AdjFermionField;
@ -138,3 +139,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonTMFermionR FermionAction;
typedef WilsonTMFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -29,6 +29,7 @@ directory
/* END LEGAL */
#include "Grid/Grid.h"
#ifdef ENABLE_FERMION_REPS
int main(int argc, char **argv) {
using namespace Grid;
;
@ -45,7 +46,7 @@ int main(int argc, char **argv) {
typedef GenericHMCRunnerHirep<TheRepresentations, MinimumNorm2> HMCWrapper;
typedef WilsonTwoIndexSymmetricImplR FermionImplPolicy; // gauge field implemetation for the pseudofermions
typedef WilsonTwoIndexSymmetricFermionR FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef WilsonTwoIndexSymmetricFermionD FermionAction; // type of lattice fermions (Wilson, DW, ...)
typedef typename FermionAction::FermionField FermionField;
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
@ -127,3 +128,6 @@ int main(int argc, char **argv) {
} // main
#else
int main(int argc, char **argv){}
#endif

View File

@ -41,7 +41,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

View File

@ -42,7 +42,7 @@ int main(int argc, char **argv) {
// Typedefs to simplify notation
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper; // Uses the default minimum norm
typedef WilsonImplR FermionImplPolicy;
typedef WilsonFermionR FermionAction;
typedef WilsonFermionD FermionAction;
typedef typename FermionAction::FermionField FermionField;

Some files were not shown because too many files have changed in this diff Show More