mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-14 13:57:07 +01:00
Imported changes from feature/gparity_HMC branch:
Rework of WilsonFlow class Fixed logic error in smear method where the step index was initialized to 1 rather than 0, resulting in the logged output value of tau being too large by epsilon Previously smear_adaptive would maintain the current value of tau as a class member variable whereas smear would compute it separately; now both methods maintain the current value internally and it is updated by the evolve_step routines. Both evolve methods are now const. smear_adaptive now also maintains the current value of epsilon internally, allowing it to be a const method and also allowing the same class instance to be reused without needing to be reset Replaced the fixed evaluation of the plaquette energy density and plaquette topological charge during the smearing with a highly flexible general strategy where the user can add arbitrary measurements as functional objects that are evaluated at an arbitrary frequency By default the same plaquette-based measurements are performed, but additional example functions are provided where the smearing is performed with different choices of measurement that are returned as an array for further processing Added a method to compute the energy density using the Cloverleaf approach which has smaller discretization errors Added a new tensor utility operation, copyLane, which allows for the copying of a single SIMD lane between two instances of the same tensor type but potentially different precisions To LocalCoherenceLanczos, added the option to compute the high/low eval of the fine operator on every restart to aid in tuning the Chebyshev Added Test_field_array_io which demonstrates and tests a single-file write of an arbitrary array of fields Added Test_evec_compression which generates evecs using Lanczos and attempts to compress them using the local coherence technique Added Test_compressed_lanczos_gparity which demonstrates the local coherence Lanczos for G-parity BCs Added HMC main programs for the 40ID and 48ID G-parity lattices
This commit is contained in:
184
tests/IO/Test_field_array_io.cc
Normal file
184
tests/IO/Test_field_array_io.cc
Normal file
@ -0,0 +1,184 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/IO/Test_field_array_io.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//This test demonstrates and checks a single-file write of an arbitrary array of fields
|
||||
|
||||
uint64_t writeHeader(const uint32_t size, const uint32_t checksum, const std::string &format, const std::string &file){
|
||||
std::ofstream fout(file,std::ios::out|std::ios::in);
|
||||
fout.seekp(0,std::ios::beg);
|
||||
fout << std::setw(10) << size << std::endl;
|
||||
fout << std::hex << std::setw(10) << checksum << std::endl;
|
||||
fout << format << std::endl;
|
||||
return fout.tellp();
|
||||
}
|
||||
|
||||
uint64_t readHeader(uint32_t &size, uint32_t &checksum, std::string &format, const std::string &file){
|
||||
std::ifstream fin(file);
|
||||
std::string line;
|
||||
getline(fin,line);
|
||||
{
|
||||
std::stringstream ss; ss <<line ; ss >> size;
|
||||
}
|
||||
getline(fin,line);
|
||||
{
|
||||
std::stringstream ss; ss <<line ; ss >> std::hex >> checksum;
|
||||
}
|
||||
getline(fin,format);
|
||||
removeWhitespace(format);
|
||||
|
||||
return fin.tellg();
|
||||
}
|
||||
|
||||
template<typename FieldType>
|
||||
void writeFieldArray(const std::string &file, const std::vector<FieldType> &data){
|
||||
typedef typename FieldType::vector_object vobj;
|
||||
typedef typename FieldType::scalar_object sobj;
|
||||
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
|
||||
BinarySimpleMunger<sobj, sobj> munge; //straight copy
|
||||
|
||||
//We need a 2-pass header write, first to establish the size, the second pass writes the checksum
|
||||
std::string format = getFormatString<typename FieldType::vector_object>();
|
||||
|
||||
uint64_t offset; //leave 64 bits for header
|
||||
if ( grid->IsBoss() ) {
|
||||
NerscIO::truncate(file);
|
||||
offset = writeHeader(data.size(), 0, format, file);
|
||||
}
|
||||
grid->Broadcast(0,(void *)&offset,sizeof(offset)); //use as a barrier
|
||||
|
||||
std::cout << "Data offset write " << offset << std::endl;
|
||||
std::cout << "Data size write " << data.size() << std::endl;
|
||||
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
|
||||
std::cout << "Field size = " << field_size << " B" << std::endl;
|
||||
|
||||
uint32_t checksum = 0;
|
||||
for(int i=0;i<data.size();i++){
|
||||
std::cout << "Data field write " << i << " offset " << offset << std::endl;
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::writeLatticeObject<vobj,sobj>(const_cast<FieldType &>(data[i]),file,munge,offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
offset += field_size;
|
||||
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
|
||||
}
|
||||
std::cout << "Write checksum " << checksum << std::endl;
|
||||
|
||||
if ( grid->IsBoss() ) {
|
||||
writeHeader(data.size(), checksum, format, file);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename FieldType>
|
||||
void readFieldArray(std::vector<FieldType> &data, const std::string &file){
|
||||
typedef typename FieldType::vector_object vobj;
|
||||
typedef typename FieldType::scalar_object sobj;
|
||||
assert(data.size() > 0);
|
||||
GridBase* grid = data[0].Grid(); //assume all fields have the same Grid
|
||||
BinarySimpleUnmunger<sobj, sobj> munge; //straight copy
|
||||
|
||||
uint32_t hdr_checksum, hdr_size;
|
||||
std::string format;
|
||||
uint64_t offset = readHeader(hdr_size, hdr_checksum, format, file);
|
||||
|
||||
std::cout << "Data offset read " << offset << std::endl;
|
||||
std::cout << "Data size read " << hdr_size << std::endl;
|
||||
assert(data.size() == hdr_size);
|
||||
|
||||
uint64_t field_size = uint64_t(grid->gSites()) * sizeof(sobj);
|
||||
|
||||
uint32_t checksum = 0;
|
||||
|
||||
for(int i=0;i<data.size();i++){
|
||||
std::cout << "Data field read " << i << " offset " << offset << std::endl;
|
||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||
BinaryIO::readLatticeObject<vobj,sobj>(data[i],file,munge,offset,format,
|
||||
nersc_csum,scidac_csuma,scidac_csumb);
|
||||
offset += field_size;
|
||||
checksum ^= nersc_csum + 0x9e3779b9 + (checksum<<6) + (checksum>>2);
|
||||
}
|
||||
|
||||
std::cout << "Header checksum " << hdr_checksum << std::endl;
|
||||
std::cout << "Read checksum " << checksum << std::endl;
|
||||
|
||||
|
||||
assert( hdr_checksum == checksum );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
Coordinate latt = GridDefaultLatt();
|
||||
Coordinate simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||
Coordinate mpi_layout = GridDefaultMpi();
|
||||
|
||||
const int Ls=8;
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt, simd_layout, mpi_layout);
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
std::vector<int> seeds5({5,6,7,8});
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
typedef DomainWallFermionD::FermionField FermionField;
|
||||
|
||||
int nfield = 20;
|
||||
std::vector<FermionField> data(nfield, FGrid);
|
||||
|
||||
for(int i=0;i<data.size();i++)
|
||||
gaussian(RNG5, data[i]);
|
||||
|
||||
std::string file = "test_field_array_io.0";
|
||||
writeFieldArray(file, data);
|
||||
|
||||
std::vector<FermionField> data_r(nfield, FGrid);
|
||||
readFieldArray(data_r, file);
|
||||
|
||||
for(int i=0;i<nfield;i++){
|
||||
FermionField diff = data_r[i] - data[i];
|
||||
RealD norm_diff = norm2(diff);
|
||||
std::cout << "Norm2 of difference between stored and loaded data index " << i << " : " << norm_diff << std::endl;
|
||||
}
|
||||
|
||||
std::cout << "Done" << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
}
|
485
tests/lanczos/Test_compressed_lanczos_gparity.cc
Normal file
485
tests/lanczos/Test_compressed_lanczos_gparity.cc
Normal file
@ -0,0 +1,485 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_compressed_lanczos_gparity.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Leans heavily on Christoph Lehner's code
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
/*
|
||||
* Reimplement the badly named "multigrid" lanczos as compressed Lanczos using the features
|
||||
* in Grid that were intended to be used to support blocked Aggregates, from
|
||||
*/
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//For the CPS configurations we have to manually seed the RNG and deal with an incorrect factor of 2 in the plaquette metadata
|
||||
void readConfiguration(LatticeGaugeFieldD &U,
|
||||
const std::string &config,
|
||||
bool is_cps_cfg = false){
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = false;
|
||||
|
||||
typedef GaugeStatistics<ConjugateGimplD> GaugeStats;
|
||||
|
||||
FieldMetaData header;
|
||||
NerscIO::readConfiguration<GaugeStats>(U, header, config);
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = true;
|
||||
}
|
||||
|
||||
//Lanczos parameters in CPS conventions
|
||||
struct CPSLanczosParams : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(CPSLanczosParams,
|
||||
RealD, alpha,
|
||||
RealD, beta,
|
||||
int, ch_ord,
|
||||
int, N_use,
|
||||
int, N_get,
|
||||
int, N_true_get,
|
||||
RealD, stop_rsd,
|
||||
int, maxits);
|
||||
|
||||
//Translations
|
||||
ChebyParams getChebyParams() const{
|
||||
ChebyParams out;
|
||||
out.alpha = beta*beta; //aka lo
|
||||
out.beta = alpha*alpha; //aka hi
|
||||
out.Npoly = ch_ord+1;
|
||||
return out;
|
||||
}
|
||||
int Nstop() const{ return N_true_get; }
|
||||
int Nm() const{ return N_use; }
|
||||
int Nk() const{ return N_get; }
|
||||
};
|
||||
|
||||
//Maybe this class should be in the main library?
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class LocalCoherenceLanczosScidac : public LocalCoherenceLanczos<Fobj,CComplex,nbasis>
|
||||
{
|
||||
public:
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
LocalCoherenceLanczosScidac(GridBase *FineGrid,GridBase *CoarseGrid,
|
||||
LinearOperatorBase<FineField> &FineOp,
|
||||
int checkerboard)
|
||||
// Base constructor
|
||||
: LocalCoherenceLanczos<Fobj,CComplex,nbasis>(FineGrid,CoarseGrid,FineOp,checkerboard)
|
||||
{};
|
||||
|
||||
void checkpointFine(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
assert(this->subspace.size()==nbasis);
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(this->_FineGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<nbasis;k++) {
|
||||
WR.writeScidacFieldRecord(this->subspace[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",this->evals_fine);
|
||||
}
|
||||
|
||||
void checkpointFineRestore(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
this->evals_fine.resize(nbasis);
|
||||
this->subspace.resize(nbasis,this->_FineGrid);
|
||||
|
||||
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",this->evals_fine);
|
||||
|
||||
if(this->evals_fine.size() < nbasis) assert(0 && "Not enough fine evals to complete basis");
|
||||
if(this->evals_fine.size() > nbasis){ //allow the use of precomputed evecs with a larger #evecs
|
||||
std::cout << GridLogMessage << "Truncating " << this->evals_fine.size() << " evals to basis size " << nbasis << std::endl;
|
||||
this->evals_fine.resize(nbasis);
|
||||
}
|
||||
|
||||
std::cout << GridLogIRL<< "checkpointFineRestore: Reading evecs from "<<evecs_file<<std::endl;
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<nbasis;k++) {
|
||||
this->subspace[k].Checkerboard()=this->_checkerboard;
|
||||
RD.readScidacFieldRecord(this->subspace[k],record);
|
||||
|
||||
}
|
||||
RD.close();
|
||||
}
|
||||
|
||||
void checkpointCoarse(std::string evecs_file,std::string evals_file)
|
||||
{
|
||||
int n = this->evec_coarse.size();
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(this->_CoarseGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<n;k++) {
|
||||
WR.writeScidacFieldRecord(this->evec_coarse[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",this->evals_coarse);
|
||||
}
|
||||
|
||||
void checkpointCoarseRestore(std::string evecs_file,std::string evals_file,int nvec)
|
||||
{
|
||||
std::cout << "resizing coarse vecs to " << nvec<< std::endl;
|
||||
this->evals_coarse.resize(nvec);
|
||||
this->evec_coarse.resize(nvec,this->_CoarseGrid);
|
||||
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",this->evals_coarse);
|
||||
|
||||
assert(this->evals_coarse.size()==nvec);
|
||||
emptyUserRecord record;
|
||||
std::cout << GridLogIRL<< "checkpointCoarseRestore: Reading evecs from "<<evecs_file<<std::endl;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<nvec;k++) {
|
||||
RD.readScidacFieldRecord(this->evec_coarse[k],record);
|
||||
}
|
||||
RD.close();
|
||||
}
|
||||
};
|
||||
|
||||
struct Options{
|
||||
std::vector<int> blockSize;
|
||||
std::vector<int> GparityDirs;
|
||||
int Ls;
|
||||
RealD mass;
|
||||
RealD M5;
|
||||
RealD mobius_scale;
|
||||
std::string config;
|
||||
bool is_cps_cfg;
|
||||
|
||||
double coarse_relax_tol;
|
||||
int smoother_ord;
|
||||
|
||||
CPSLanczosParams fine;
|
||||
CPSLanczosParams coarse;
|
||||
|
||||
bool write_fine = false;
|
||||
std::string write_fine_file;
|
||||
|
||||
bool read_fine = false;
|
||||
std::string read_fine_file;
|
||||
|
||||
bool write_coarse = false;
|
||||
std::string write_coarse_file;
|
||||
|
||||
bool read_coarse = false;
|
||||
std::string read_coarse_file;
|
||||
|
||||
|
||||
Options(){
|
||||
blockSize = std::vector<int> ({2,2,2,2,2});
|
||||
GparityDirs = std::vector<int> ({1,1,1}); //1 for each GP direction
|
||||
|
||||
Ls = 12;
|
||||
mass = 0.01;
|
||||
M5 = 1.8;
|
||||
is_cps_cfg = false;
|
||||
mobius_scale = 2.0;
|
||||
|
||||
fine.alpha = 2;
|
||||
fine.beta = 0.1;
|
||||
fine.ch_ord = 100;
|
||||
fine.N_use = 70;
|
||||
fine.N_get = 60;
|
||||
fine.N_true_get = 60;
|
||||
fine.stop_rsd = 1e-8;
|
||||
fine.maxits = 10000;
|
||||
|
||||
coarse.alpha = 2;
|
||||
coarse.beta = 0.1;
|
||||
coarse.ch_ord = 100;
|
||||
coarse.N_use = 200;
|
||||
coarse.N_get = 190;
|
||||
coarse.N_true_get = 190;
|
||||
coarse.stop_rsd = 1e-8;
|
||||
coarse.maxits = 10000;
|
||||
|
||||
coarse_relax_tol = 1e5;
|
||||
smoother_ord = 20;
|
||||
|
||||
write_fine = false;
|
||||
read_fine = false;
|
||||
write_coarse = false;
|
||||
read_coarse = false;
|
||||
}
|
||||
};
|
||||
|
||||
template<int nbasis>
|
||||
void runTest(const Options &opt){
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(opt.Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(opt.Ls,UGrid);
|
||||
|
||||
//Setup G-parity BCs
|
||||
assert(Nd == 4);
|
||||
std::vector<int> dirs4(4);
|
||||
for(int i=0;i<3;i++) dirs4[i] = opt.GparityDirs[i];
|
||||
dirs4[3] = 0; //periodic gauge BC in time
|
||||
|
||||
std::cout << GridLogMessage << "Gauge BCs: " << dirs4 << std::endl;
|
||||
ConjugateGimplD::setDirections(dirs4); //gauge BC
|
||||
|
||||
GparityWilsonImplD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = opt.GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
std::cout << GridLogMessage << "Fermion BCs: " << Params.twists << std::endl;
|
||||
|
||||
//Read the gauge field
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
readConfiguration(Umu, opt.config, opt.is_cps_cfg);
|
||||
|
||||
//Setup the coarse grids
|
||||
auto fineLatt = GridDefaultLatt();
|
||||
Coordinate coarseLatt(4);
|
||||
for (int d=0;d<4;d++){
|
||||
coarseLatt[d] = fineLatt[d]/opt.blockSize[d]; assert(coarseLatt[d]*opt.blockSize[d]==fineLatt[d]);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage<< " 5d coarse lattice is ";
|
||||
for (int i=0;i<4;i++){
|
||||
std::cout << coarseLatt[i]<<"x";
|
||||
}
|
||||
int cLs = opt.Ls/opt.blockSize[4]; assert(cLs*opt.blockSize[4]==opt.Ls);
|
||||
std::cout << cLs<<std::endl;
|
||||
|
||||
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * CoarseGrid4rb = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseGrid4);
|
||||
GridCartesian * CoarseGrid5 = SpaceTimeGrid::makeFiveDimGrid(cLs,CoarseGrid4);
|
||||
|
||||
//Dirac operator
|
||||
double bmc = 1.;
|
||||
double b = (opt.mobius_scale + bmc)/2.; // b = 1/2 [ (b+c) + (b-c) ]
|
||||
double c = (opt.mobius_scale - bmc)/2.; // c = 1/2 [ (b+c) - (b-c) ]
|
||||
|
||||
GparityMobiusFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, opt.mass, opt.M5, b,c,Params);
|
||||
typedef GparityMobiusFermionD::FermionField FermionField;
|
||||
|
||||
SchurDiagTwoOperator<GparityMobiusFermionD, FermionField> SchurOp(action);
|
||||
|
||||
typedef GparityWilsonImplD::SiteSpinor SiteSpinor;
|
||||
|
||||
const CPSLanczosParams &fine = opt.fine;
|
||||
const CPSLanczosParams &coarse = opt.coarse;
|
||||
|
||||
std::cout << GridLogMessage << "Keep " << fine.N_true_get << " fine vectors" << std::endl;
|
||||
std::cout << GridLogMessage << "Keep " << coarse.N_true_get << " coarse vectors" << std::endl;
|
||||
assert(coarse.N_true_get >= fine.N_true_get);
|
||||
|
||||
assert(nbasis<=fine.N_true_get);
|
||||
LocalCoherenceLanczosScidac<SiteSpinor,vTComplex,nbasis> _LocalCoherenceLanczos(FrbGrid,CoarseGrid5,SchurOp,Odd);
|
||||
std::cout << GridLogMessage << "Constructed LocalCoherenceLanczos" << std::endl;
|
||||
|
||||
//Compute and/or read fine evecs
|
||||
if(opt.read_fine){
|
||||
_LocalCoherenceLanczos.checkpointFineRestore(opt.read_fine_file + "_evecs.scidac", opt.read_fine_file + "_evals.xml");
|
||||
}else{
|
||||
std::cout << GridLogMessage << "Performing fine grid IRL" << std::endl;
|
||||
std::cout << GridLogMessage << "Using Chebyshev alpha=" << fine.alpha << " beta=" << fine.beta << " ord=" << fine.ch_ord << std::endl;
|
||||
_LocalCoherenceLanczos.calcFine(fine.getChebyParams(),
|
||||
fine.Nstop(),fine.Nk(),fine.Nm(),
|
||||
fine.stop_rsd,fine.maxits,0,0);
|
||||
if(opt.write_fine){
|
||||
std::cout << GridLogIRL<<"Checkpointing Fine evecs"<<std::endl;
|
||||
_LocalCoherenceLanczos.checkpointFine(opt.write_fine_file + "_evecs.scidac", opt.write_fine_file + "_evals.xml");
|
||||
}
|
||||
}
|
||||
|
||||
//Block orthonormalise (this should be part of calcFine?)
|
||||
std::cout << GridLogIRL<<"Orthogonalising"<<std::endl;
|
||||
_LocalCoherenceLanczos.Orthogonalise();
|
||||
std::cout << GridLogIRL<<"Orthogonaled"<<std::endl;
|
||||
|
||||
ChebyParams smoother = fine.getChebyParams();
|
||||
smoother.Npoly = opt.smoother_ord+1;
|
||||
|
||||
if(opt.read_coarse){
|
||||
_LocalCoherenceLanczos.checkpointCoarseRestore(opt.read_coarse_file + "_evecs.scidac", opt.read_coarse_file + "_evals.xml",coarse.Nstop());
|
||||
|
||||
}else{
|
||||
std::cout << GridLogMessage << "Performing coarse grid IRL" << std::endl;
|
||||
std::cout << GridLogMessage << "Using Chebyshev alpha=" << coarse.alpha << " beta=" << coarse.beta << " ord=" << coarse.ch_ord << std::endl;
|
||||
_LocalCoherenceLanczos.calcCoarse(coarse.getChebyParams(), smoother, opt.coarse_relax_tol,
|
||||
coarse.Nstop(), coarse.Nk() ,coarse.Nm(),
|
||||
coarse.stop_rsd, coarse.maxits,
|
||||
0,0);
|
||||
|
||||
if(opt.write_coarse){
|
||||
std::cout << GridLogIRL<<"Checkpointing Coarse evecs"<<std::endl;
|
||||
_LocalCoherenceLanczos.checkpointCoarse(opt.write_coarse_file + "_evecs.scidac", opt.write_coarse_file + "_evals.xml");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Test the eigenvectors
|
||||
//To remove high-frequency noise we apply a Chebyshev smoothing
|
||||
Chebyshev<FermionField> cheb_smoother(smoother);
|
||||
|
||||
FermionField evec(FrbGrid);
|
||||
FermionField evec_sm(FrbGrid); //smoothed
|
||||
FermionField tmp(FrbGrid);
|
||||
RealD eval;
|
||||
|
||||
for(int i=0;i<coarse.N_true_get;i++){
|
||||
_LocalCoherenceLanczos.getFineEvecEval(evec, eval, i);
|
||||
|
||||
//Check unsmoothed evec
|
||||
SchurOp.HermOp(evec, tmp);
|
||||
tmp = tmp - eval*evec;
|
||||
RealD norm_unsmoothed = sqrt(norm2(tmp));
|
||||
|
||||
//Check smoothed evec
|
||||
cheb_smoother(SchurOp, evec, evec_sm);
|
||||
SchurOp.HermOp(evec_sm, tmp);
|
||||
tmp = tmp - eval*evec_sm;
|
||||
RealD norm_smoothed = sqrt(norm2(tmp));
|
||||
|
||||
std::cout << GridLogMessage << "Eval " << eval << " unsmoothed resid " << norm_unsmoothed << " smoothed resid " << norm_smoothed << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//Note: because we rely upon physical properties we must use a "real" gauge configuration
|
||||
int main (int argc, char ** argv) {
|
||||
Grid_init(&argc,&argv);
|
||||
GridLogIRL.TimingMode(1);
|
||||
|
||||
Options opt;
|
||||
int basis_size = 100;
|
||||
|
||||
if(argc < 3){
|
||||
std::cout << GridLogMessage << "Usage: <exe> <config> <gparity dirs> <options>" << std::endl;
|
||||
std::cout << GridLogMessage << "<gparity dirs> should have the format a.b.c where a,b,c are 0,1 depending on whether there are G-parity BCs in that direction" << std::endl;
|
||||
std::cout << GridLogMessage << "Options:" << std::endl;
|
||||
std::cout << GridLogMessage << "--Ls <value> : Set Ls (default 12)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mass <value> : Set the mass (default 0.01)" << std::endl;
|
||||
std::cout << GridLogMessage << "--block <value> : Set the block size. Format should be a.b.c.d.e where a-e are the block extents (default 2.2.2.2.2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--is_cps_cfg : Indicate that the configuration was generated with CPS where until recently the stored plaquette was wrong by a factor of 2" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_irl_templ: Write a template for the parameters file of the Lanczos to \"irl_templ.xml\"" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_fine <filename>: Real the parameters file for the fine Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_coarse <filename>: Real the parameters file for the coarse Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_fine <filename stub>: Write fine evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_fine <filename stub>: Read fine evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_coarse <filename stub>: Write coarse evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_coarse <filename stub>: Read coarse evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--smoother_ord : Set the Chebyshev order of the smoother (default 20)" << std::endl;
|
||||
std::cout << GridLogMessage << "--coarse_relax_tol : Set the relaxation parameter for evaluating the residual of the reconstructed eigenvectors outside of the basis (default 1e5)" << std::endl;
|
||||
std::cout << GridLogMessage << "--basis_size : Select the basis size from 100,200,300,350 (default 100)" << std::endl;
|
||||
Grid_finalize();
|
||||
return 1;
|
||||
}
|
||||
opt.config = argv[1];
|
||||
GridCmdOptionIntVector(argv[2], opt.GparityDirs);
|
||||
assert(opt.GparityDirs.size() == 3);
|
||||
|
||||
for(int i=3;i<argc;i++){
|
||||
std::string sarg = argv[i];
|
||||
if(sarg == "--Ls"){
|
||||
opt.Ls = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set Ls to " << opt.Ls << std::endl;
|
||||
}else if(sarg == "--mass"){
|
||||
std::istringstream ss(argv[i+1]); ss >> opt.mass;
|
||||
std::cout << GridLogMessage << "Set quark mass to " << opt.mass << std::endl;
|
||||
}else if(sarg == "--block"){
|
||||
GridCmdOptionIntVector(argv[i+1], opt.blockSize);
|
||||
assert(opt.blockSize.size() == 5);
|
||||
std::cout << GridLogMessage << "Set block size to ";
|
||||
for(int q=0;q<5;q++) std::cout << opt.blockSize[q] << " ";
|
||||
std::cout << std::endl;
|
||||
}else if(sarg == "--is_cps_cfg"){
|
||||
opt.is_cps_cfg = true;
|
||||
}else if(sarg == "--write_irl_templ"){
|
||||
XmlWriter writer("irl_templ.xml");
|
||||
write(writer,"Params", opt.fine);
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}else if(sarg == "--read_irl_fine"){
|
||||
std::cout << GridLogMessage << "Reading fine IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", opt.fine);
|
||||
}else if(sarg == "--read_irl_coarse"){
|
||||
std::cout << GridLogMessage << "Reading coarse IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", opt.coarse);
|
||||
}else if(sarg == "--write_fine"){
|
||||
opt.write_fine = true;
|
||||
opt.write_fine_file = argv[i+1];
|
||||
}else if(sarg == "--read_fine"){
|
||||
opt.read_fine = true;
|
||||
opt.read_fine_file = argv[i+1];
|
||||
}else if(sarg == "--write_coarse"){
|
||||
opt.write_coarse = true;
|
||||
opt.write_coarse_file = argv[i+1];
|
||||
}else if(sarg == "--read_coarse"){
|
||||
opt.read_coarse = true;
|
||||
opt.read_coarse_file = argv[i+1];
|
||||
}else if(sarg == "--smoother_ord"){
|
||||
std::istringstream ss(argv[i+1]); ss >> opt.smoother_ord;
|
||||
std::cout << GridLogMessage << "Set smoother order to " << opt.smoother_ord << std::endl;
|
||||
}else if(sarg == "--coarse_relax_tol"){
|
||||
std::istringstream ss(argv[i+1]); ss >> opt.coarse_relax_tol;
|
||||
std::cout << GridLogMessage << "Set coarse IRL relaxation parameter to " << opt.coarse_relax_tol << std::endl;
|
||||
}else if(sarg == "--mobius_scale"){
|
||||
std::istringstream ss(argv[i+1]); ss >> opt.mobius_scale;
|
||||
std::cout << GridLogMessage << "Set Mobius scale to " << opt.mobius_scale << std::endl;
|
||||
}else if(sarg == "--basis_size"){
|
||||
basis_size = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set basis size to " << basis_size << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
switch(basis_size){
|
||||
case 100:
|
||||
runTest<100>(opt); break;
|
||||
case 200:
|
||||
runTest<200>(opt); break;
|
||||
case 300:
|
||||
runTest<300>(opt); break;
|
||||
case 350:
|
||||
runTest<350>(opt); break;
|
||||
default:
|
||||
std::cout << GridLogMessage << "Unsupported basis size " << basis_size << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
||||
|
582
tests/lanczos/Test_evec_compression.cc
Normal file
582
tests/lanczos/Test_evec_compression.cc
Normal file
@ -0,0 +1,582 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_evec_compression.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Christopher Kelly <ckelly@bnl.gov>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
/*
|
||||
*
|
||||
* This test generates eigenvectors using the Lanczos algorithm then attempts to use local coherence compression
|
||||
* to express those vectors in terms of a basis formed from a subset. This test is useful for finding the optimal
|
||||
* blocking and basis size for performing a Local Coherence Lanczos
|
||||
*/
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
//For the CPS configurations we have to manually seed the RNG and deal with an incorrect factor of 2 in the plaquette metadata
|
||||
template<typename Gimpl>
|
||||
void readConfiguration(LatticeGaugeFieldD &U,
|
||||
const std::string &config,
|
||||
bool is_cps_cfg = false){
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = false;
|
||||
|
||||
typedef GaugeStatistics<Gimpl> GaugeStats;
|
||||
|
||||
FieldMetaData header;
|
||||
NerscIO::readConfiguration<GaugeStats>(U, header, config);
|
||||
|
||||
if(is_cps_cfg) NerscIO::exitOnReadPlaquetteMismatch() = true;
|
||||
}
|
||||
|
||||
//Lanczos parameters in CPS conventions
|
||||
struct CPSLanczosParams : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(CPSLanczosParams,
|
||||
RealD, alpha,
|
||||
RealD, beta,
|
||||
int, ch_ord,
|
||||
int, N_use,
|
||||
int, N_get,
|
||||
int, N_true_get,
|
||||
RealD, stop_rsd,
|
||||
int, maxits);
|
||||
|
||||
//Translations
|
||||
ChebyParams getChebyParams() const{
|
||||
ChebyParams out;
|
||||
out.alpha = beta*beta; //aka lo
|
||||
out.beta = alpha*alpha; //aka hi
|
||||
out.Npoly = ch_ord+1;
|
||||
return out;
|
||||
}
|
||||
int Nstop() const{ return N_true_get; }
|
||||
int Nm() const{ return N_use; }
|
||||
int Nk() const{ return N_get; }
|
||||
};
|
||||
|
||||
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class LocalCoherenceCompressor{
|
||||
public:
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
void compress(std::vector<FineField> &basis,
|
||||
std::vector<CoarseField> &compressed_evecs,
|
||||
const std::vector<FineField> &evecs_in,
|
||||
GridBase *FineGrid,
|
||||
GridBase *CoarseGrid){
|
||||
int nevecs = evecs_in.size();
|
||||
assert(nevecs > nbasis);
|
||||
|
||||
//Construct the basis
|
||||
basis.resize(nbasis, FineGrid);
|
||||
for(int b=0;b<nbasis;b++) basis[b] = evecs_in[b];
|
||||
|
||||
//Block othornormalize basis
|
||||
CoarseScalar InnerProd(CoarseGrid);
|
||||
std::cout << GridLogMessage <<" Gramm-Schmidt pass 1"<<std::endl;
|
||||
blockOrthogonalise(InnerProd,basis);
|
||||
std::cout << GridLogMessage <<" Gramm-Schmidt pass 2"<<std::endl;
|
||||
blockOrthogonalise(InnerProd,basis);
|
||||
|
||||
//The coarse grid representation is the field of vectors of block inner products
|
||||
std::cout << GridLogMessage << "Compressing eigevectors" << std::endl;
|
||||
compressed_evecs.resize(nevecs, CoarseGrid);
|
||||
for(int i=0;i<nevecs;i++) blockProject(compressed_evecs[i], evecs_in[i], basis);
|
||||
std::cout << GridLogMessage << "Compression complete" << std::endl;
|
||||
}
|
||||
|
||||
void uncompress(FineField &evec, const int i, const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs) const{
|
||||
blockPromote(compressed_evecs[i],evec,basis);
|
||||
}
|
||||
|
||||
//Test uncompressed eigenvectors of Linop.HermOp to precision 'base_tolerance' for i<nbasis and 'base_tolerance*relax' for i>=nbasis
|
||||
//Because the uncompressed evec has a lot of high mode noise (unimportant for deflation) we apply a smoother before testing.
|
||||
//The Chebyshev used by the Lanczos should be sufficient as a smoother
|
||||
bool testCompression(LinearOperatorBase<FineField> &Linop, OperatorFunction<FineField> &smoother,
|
||||
const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs, const std::vector<RealD> &evals,
|
||||
const RealD base_tolerance, const RealD relax){
|
||||
std::cout << GridLogMessage << "Testing quality of uncompressed evecs (after smoothing)" << std::endl;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
bool fail = false;
|
||||
FineField evec(FineGrid), Mevec(FineGrid), evec_sm(FineGrid);
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
std::cout << GridLogMessage << "Uncompressing evec " << i << std::endl;
|
||||
uncompress(evec, i, basis, compressed_evecs);
|
||||
|
||||
std::cout << GridLogMessage << "Smoothing evec " << i << std::endl;
|
||||
smoother(Linop, evec, evec_sm);
|
||||
|
||||
std::cout << GridLogMessage << "Computing residual for evec " << i << std::endl;
|
||||
std::cout << GridLogMessage << "Linop" << std::endl;
|
||||
Linop.HermOp(evec_sm, Mevec);
|
||||
std::cout << GridLogMessage << "Linalg" << std::endl;
|
||||
Mevec = Mevec - evals[i]*evec_sm;
|
||||
|
||||
std::cout << GridLogMessage << "Resid" << std::endl;
|
||||
RealD tol = base_tolerance * (i<nbasis ? 1. : relax);
|
||||
RealD res = sqrt(norm2(Mevec));
|
||||
std::cout << GridLogMessage << "Evec idx " << i << " res " << res << " tol " << tol << std::endl;
|
||||
if(res > tol) fail = true;
|
||||
}
|
||||
return fail;
|
||||
}
|
||||
|
||||
//Compare uncompressed evecs to original evecs
|
||||
void compareEvecs(const std::vector<FineField> &basis, const std::vector<CoarseField> &compressed_evecs, const std::vector<FineField> &orig_evecs){
|
||||
std::cout << GridLogMessage << "Comparing uncompressed evecs to original evecs" << std::endl;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
FineField evec(FineGrid), diff(FineGrid);
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
std::cout << GridLogMessage << "Uncompressing evec " << i << std::endl;
|
||||
uncompress(evec, i, basis, compressed_evecs);
|
||||
diff = orig_evecs[i] - evec;
|
||||
RealD res = sqrt(norm2(diff));
|
||||
std::cout << GridLogMessage << "Evec idx " << i << " res " << res << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
void compareBlockPromoteTimings(const std::vector<Lattice<Fobj> > &basis, const std::vector<Lattice<iVector<CComplex,nbasis > > > &compressed_evecs){
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<Fobj> FineField;
|
||||
|
||||
GridStopWatch timer;
|
||||
|
||||
GridBase* FineGrid = basis[0].Grid();
|
||||
GridBase* CoarseGrid = compressed_evecs[0].Grid();
|
||||
|
||||
FineField v1(FineGrid), v2(FineGrid);
|
||||
|
||||
//Start with a cold start
|
||||
for(int i=0;i<basis.size();i++){
|
||||
autoView( b_ , basis[i], CpuWrite);
|
||||
}
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
autoView( b_ , compressed_evecs[i], CpuWrite);
|
||||
}
|
||||
{
|
||||
autoView( b_, v1, CpuWrite );
|
||||
}
|
||||
|
||||
timer.Start();
|
||||
blockPromote(compressed_evecs[0],v1,basis);
|
||||
timer.Stop();
|
||||
std::cout << GridLogMessage << "Time for cold blockPromote v1 " << timer.Elapsed() << std::endl;
|
||||
|
||||
//Test to ensure it is actually doing a cold start by repeating
|
||||
for(int i=0;i<basis.size();i++){
|
||||
autoView( b_ , basis[i], CpuWrite);
|
||||
}
|
||||
for(int i=0;i<compressed_evecs.size();i++){
|
||||
autoView( b_ , compressed_evecs[i], CpuWrite);
|
||||
}
|
||||
{
|
||||
autoView( b_, v1, CpuWrite );
|
||||
}
|
||||
|
||||
timer.Reset();
|
||||
timer.Start();
|
||||
blockPromote(compressed_evecs[0],v1,basis);
|
||||
timer.Stop();
|
||||
std::cout << GridLogMessage << "Time for cold blockPromote v1 repeat (should be the same as above) " << timer.Elapsed() << std::endl;
|
||||
}
|
||||
|
||||
struct Args{
|
||||
int Ls;
|
||||
RealD mass;
|
||||
RealD M5;
|
||||
bool is_cps_cfg;
|
||||
RealD mobius_scale; //b+c
|
||||
|
||||
CPSLanczosParams fine;
|
||||
double coarse_relax_tol;
|
||||
|
||||
std::vector<int> blockSize;
|
||||
std::vector<int> GparityDirs;
|
||||
|
||||
bool write_fine;
|
||||
std::string write_fine_file;
|
||||
bool read_fine;
|
||||
std::string read_fine_file;
|
||||
|
||||
int basis_size;
|
||||
|
||||
Args(){
|
||||
blockSize = {2,2,2,2,2};
|
||||
GparityDirs = {1,1,1}; //1 for each GP direction
|
||||
|
||||
Ls = 12;
|
||||
mass = 0.01;
|
||||
M5 = 1.8;
|
||||
is_cps_cfg = false;
|
||||
mobius_scale = 2;
|
||||
|
||||
fine.alpha = 2;
|
||||
fine.beta = 0.1;
|
||||
fine.ch_ord = 100;
|
||||
fine.N_use = 70;
|
||||
fine.N_get = 60;
|
||||
fine.N_true_get = 60;
|
||||
fine.stop_rsd = 1e-8;
|
||||
fine.maxits = 10000;
|
||||
|
||||
coarse_relax_tol = 1e5;
|
||||
|
||||
write_fine = false;
|
||||
read_fine = false;
|
||||
|
||||
basis_size = 100;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
GparityWilsonImplD::ImplParams setupGparityParams(const std::vector<int> &GparityDirs){
|
||||
//Setup G-parity BCs
|
||||
assert(Nd == 4);
|
||||
std::vector<int> dirs4(4);
|
||||
for(int i=0;i<3;i++) dirs4[i] = GparityDirs[i];
|
||||
dirs4[3] = 0; //periodic gauge BC in time
|
||||
|
||||
std::cout << GridLogMessage << "Gauge BCs: " << dirs4 << std::endl;
|
||||
ConjugateGimplD::setDirections(dirs4); //gauge BC
|
||||
|
||||
GparityWilsonImplD::ImplParams Params;
|
||||
for(int i=0;i<Nd-1;i++) Params.twists[i] = GparityDirs[i]; //G-parity directions
|
||||
Params.twists[Nd-1] = 1; //APBC in time direction
|
||||
std::cout << GridLogMessage << "Fermion BCs: " << Params.twists << std::endl;
|
||||
return Params;
|
||||
}
|
||||
|
||||
WilsonImplD::ImplParams setupParams(){
|
||||
WilsonImplD::ImplParams Params;
|
||||
Complex one(1.0);
|
||||
Complex mone(-1.0);
|
||||
for(int i=0;i<Nd-1;i++) Params.boundary_phases[i] = one;
|
||||
Params.boundary_phases[Nd-1] = mone;
|
||||
return Params;
|
||||
}
|
||||
|
||||
template<int nbasis, typename ActionType>
|
||||
void run_b(ActionType &action, const std::string &config, const Args &args){
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = (GridCartesian*)action.GaugeGrid();
|
||||
GridRedBlackCartesian * UrbGrid = (GridRedBlackCartesian*)action.GaugeRedBlackGrid();
|
||||
GridCartesian * FGrid = (GridCartesian*)action.FermionGrid();
|
||||
GridRedBlackCartesian * FrbGrid = (GridRedBlackCartesian*)action.FermionRedBlackGrid();
|
||||
|
||||
//Setup the coarse grids
|
||||
auto fineLatt = GridDefaultLatt();
|
||||
Coordinate coarseLatt(4);
|
||||
for (int d=0;d<4;d++){
|
||||
coarseLatt[d] = fineLatt[d]/args.blockSize[d]; assert(coarseLatt[d]*args.blockSize[d]==fineLatt[d]);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage<< " 5d coarse lattice is ";
|
||||
for (int i=0;i<4;i++){
|
||||
std::cout << coarseLatt[i]<<"x";
|
||||
}
|
||||
int cLs = args.Ls/args.blockSize[4]; assert(cLs*args.blockSize[4]==args.Ls);
|
||||
std::cout << cLs<<std::endl;
|
||||
|
||||
GridCartesian * CoarseGrid4 = SpaceTimeGrid::makeFourDimGrid(coarseLatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * CoarseGrid4rb = SpaceTimeGrid::makeFourDimRedBlackGrid(CoarseGrid4);
|
||||
GridCartesian * CoarseGrid5 = SpaceTimeGrid::makeFiveDimGrid(cLs,CoarseGrid4);
|
||||
typedef vTComplex CComplex;
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CComplex> CoarseScalar;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
|
||||
typedef typename ActionType::FermionField FermionField;
|
||||
|
||||
SchurDiagTwoOperator<ActionType,FermionField> SchurOp(action);
|
||||
|
||||
typedef typename ActionType::SiteSpinor SiteSpinor;
|
||||
|
||||
const CPSLanczosParams &fine = args.fine;
|
||||
|
||||
//Do the fine Lanczos
|
||||
std::vector<RealD> evals;
|
||||
std::vector<FermionField> evecs;
|
||||
|
||||
if(args.read_fine){
|
||||
evals.resize(fine.N_true_get);
|
||||
evecs.resize(fine.N_true_get, FrbGrid);
|
||||
|
||||
std::string evals_file = args.read_fine_file + "_evals.xml";
|
||||
std::string evecs_file = args.read_fine_file + "_evecs.scidac";
|
||||
|
||||
std::cout << GridLogIRL<< "Reading evals from "<<evals_file<<std::endl;
|
||||
XmlReader RDx(evals_file);
|
||||
read(RDx,"evals",evals);
|
||||
|
||||
assert(evals.size()==fine.N_true_get);
|
||||
|
||||
std::cout << GridLogIRL<< "Reading evecs from "<<evecs_file<<std::endl;
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacReader RD ;
|
||||
RD.open(evecs_file);
|
||||
for(int k=0;k<fine.N_true_get;k++) {
|
||||
evecs[k].Checkerboard()=Odd;
|
||||
RD.readScidacFieldRecord(evecs[k],record);
|
||||
|
||||
}
|
||||
RD.close();
|
||||
}else{
|
||||
int Nstop = fine.Nstop(); //==N_true_get
|
||||
int Nm = fine.Nm();
|
||||
int Nk = fine.Nk();
|
||||
RealD resid = fine.stop_rsd;
|
||||
int MaxIt = fine.maxits;
|
||||
|
||||
assert(nbasis<=Nm);
|
||||
Chebyshev<FermionField> Cheby(fine.getChebyParams());
|
||||
FunctionHermOp<FermionField> ChebyOp(Cheby,SchurOp);
|
||||
PlainHermOp<FermionField> Op(SchurOp);
|
||||
|
||||
evals.resize(Nm);
|
||||
evecs.resize(Nm,FrbGrid);
|
||||
|
||||
ImplicitlyRestartedLanczos<FermionField> IRL(ChebyOp,Op,Nstop,Nk,Nm,resid,MaxIt,0,0);
|
||||
|
||||
FermionField src(FrbGrid);
|
||||
typedef typename FermionField::scalar_type Scalar;
|
||||
src=Scalar(1.0);
|
||||
src.Checkerboard() = Odd;
|
||||
|
||||
int Nconv;
|
||||
IRL.calc(evals, evecs,src,Nconv,false);
|
||||
if(Nconv < Nstop) assert(0 && "Fine lanczos failed to converge the required number of evecs"); //algorithm doesn't consider this a failure
|
||||
if(Nconv > Nstop){
|
||||
//Yes this potentially throws away some evecs but it is better than having a random number of evecs between Nstop and Nm!
|
||||
evals.resize(Nstop);
|
||||
evecs.resize(Nstop, FrbGrid);
|
||||
}
|
||||
|
||||
if(args.write_fine){
|
||||
std::string evals_file = args.write_fine_file + "_evals.xml";
|
||||
std::string evecs_file = args.write_fine_file + "_evecs.scidac";
|
||||
|
||||
std::cout << GridLogIRL<< "Writing evecs to "<<evecs_file<<std::endl;
|
||||
|
||||
emptyUserRecord record;
|
||||
Grid::ScidacWriter WR(FrbGrid->IsBoss());
|
||||
WR.open(evecs_file);
|
||||
for(int k=0;k<evecs.size();k++) {
|
||||
WR.writeScidacFieldRecord(evecs[k],record);
|
||||
}
|
||||
WR.close();
|
||||
|
||||
std::cout << GridLogIRL<< "Writing evals to "<<evals_file<<std::endl;
|
||||
|
||||
XmlWriter WRx(evals_file);
|
||||
write(WRx,"evals",evals);
|
||||
}
|
||||
}
|
||||
|
||||
//Do the compression
|
||||
LocalCoherenceCompressor<SiteSpinor,vTComplex,nbasis> compressor;
|
||||
std::vector<FermionField> basis(nbasis,FrbGrid);
|
||||
std::vector<CoarseField> compressed_evecs(evecs.size(),CoarseGrid5);
|
||||
|
||||
compressor.compress(basis, compressed_evecs, evecs, FrbGrid, CoarseGrid5);
|
||||
|
||||
compareBlockPromoteTimings(basis, compressed_evecs);
|
||||
|
||||
//Compare uncompressed and original evecs
|
||||
compressor.compareEvecs(basis, compressed_evecs, evecs);
|
||||
|
||||
//Create the smoother
|
||||
Chebyshev<FermionField> smoother(fine.getChebyParams());
|
||||
|
||||
//Test the quality of the uncompressed evecs
|
||||
assert( compressor.testCompression(SchurOp, smoother, basis, compressed_evecs, evals, fine.stop_rsd, args.coarse_relax_tol) );
|
||||
}
|
||||
|
||||
template<typename ActionType>
|
||||
void run(ActionType &action, const std::string &config, const Args &args){
|
||||
switch(args.basis_size){
|
||||
case 50:
|
||||
return run_b<50>(action,config,args);
|
||||
case 100:
|
||||
return run_b<100>(action,config,args);
|
||||
case 150:
|
||||
return run_b<150>(action,config,args);
|
||||
case 200:
|
||||
return run_b<200>(action,config,args);
|
||||
case 250:
|
||||
return run_b<250>(action,config,args);
|
||||
case 300:
|
||||
return run_b<300>(action,config,args);
|
||||
case 350:
|
||||
return run_b<350>(action,config,args);
|
||||
case 400:
|
||||
return run_b<400>(action,config,args);
|
||||
default:
|
||||
assert(0 && "Unsupported basis size: allowed values are 50,100,200,250,300,350,400");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//Note: because we rely upon physical properties we must use a "real" gauge configuration
|
||||
int main (int argc, char ** argv) {
|
||||
Grid_init(&argc,&argv);
|
||||
GridLogIRL.TimingMode(1);
|
||||
|
||||
if(argc < 3){
|
||||
std::cout << GridLogMessage << "Usage: <exe> <config file> <gparity dirs> <options>" << std::endl;
|
||||
std::cout << GridLogMessage << "<gparity dirs> should have the format a.b.c where a,b,c are 0,1 depending on whether there are G-parity BCs in that direction" << std::endl;
|
||||
std::cout << GridLogMessage << "Options:" << std::endl;
|
||||
std::cout << GridLogMessage << "--Ls <value> : Set Ls (default 12)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mass <value> : Set the mass (default 0.01)" << std::endl;
|
||||
std::cout << GridLogMessage << "--block <value> : Set the block size. Format should be a.b.c.d.e where a-e are the block extents (default 2.2.2.2.2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--is_cps_cfg : Indicate that the configuration was generated with CPS where until recently the stored plaquette was wrong by a factor of 2" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_irl_templ: Write a template for the parameters file of the Lanczos to \"irl_templ.xml\"" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_irl_fine <filename>: Real the parameters file for the fine Lanczos" << std::endl;
|
||||
std::cout << GridLogMessage << "--write_fine <filename stub>: Write fine evecs/evals to filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--read_fine <filename stub>: Read fine evecs/evals from filename starting with the stub" << std::endl;
|
||||
std::cout << GridLogMessage << "--coarse_relax_tol : Set the relaxation parameter for evaluating the residual of the reconstructed eigenvectors outside of the basis (default 1e5)" << std::endl;
|
||||
std::cout << GridLogMessage << "--action : Set the action from 'DWF', 'Mobius' (default Mobius)" << std::endl;
|
||||
std::cout << GridLogMessage << "--mobius_scale : Set the Mobius scale b+c (default 2)" << std::endl;
|
||||
std::cout << GridLogMessage << "--basis_size : Set the basis size from 50,100,150,200,250,300,350,400 (default 100)" << std::endl;
|
||||
|
||||
Grid_finalize();
|
||||
return 1;
|
||||
}
|
||||
std::string config = argv[1];
|
||||
|
||||
Args args;
|
||||
GridCmdOptionIntVector(argv[2], args.GparityDirs);
|
||||
assert(args.GparityDirs.size() == 3);
|
||||
|
||||
std::string action_s = "Mobius";
|
||||
|
||||
for(int i=3;i<argc;i++){
|
||||
std::string sarg = argv[i];
|
||||
if(sarg == "--Ls"){
|
||||
args.Ls = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set Ls to " << args.Ls << std::endl;
|
||||
}else if(sarg == "--mass"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.mass;
|
||||
std::cout << GridLogMessage << "Set quark mass to " << args.mass << std::endl;
|
||||
}else if(sarg == "--block"){
|
||||
GridCmdOptionIntVector(argv[i+1], args.blockSize);
|
||||
assert(args.blockSize.size() == 5);
|
||||
std::cout << GridLogMessage << "Set block size to ";
|
||||
for(int q=0;q<5;q++) std::cout << args.blockSize[q] << " ";
|
||||
std::cout << std::endl;
|
||||
}else if(sarg == "--is_cps_cfg"){
|
||||
args.is_cps_cfg = true;
|
||||
}else if(sarg == "--write_irl_templ"){
|
||||
XmlWriter writer("irl_templ.xml");
|
||||
write(writer,"Params",args.fine);
|
||||
Grid_finalize();
|
||||
return 0;
|
||||
}else if(sarg == "--read_irl_fine"){
|
||||
std::cout << GridLogMessage << "Reading fine IRL params from " << argv[i+1] << std::endl;
|
||||
XmlReader reader(argv[i+1]);
|
||||
read(reader, "Params", args.fine);
|
||||
}else if(sarg == "--write_fine"){
|
||||
args.write_fine = true;
|
||||
args.write_fine_file = argv[i+1];
|
||||
}else if(sarg == "--read_fine"){
|
||||
args.read_fine = true;
|
||||
args.read_fine_file = argv[i+1];
|
||||
}else if(sarg == "--coarse_relax_tol"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.coarse_relax_tol;
|
||||
std::cout << GridLogMessage << "Set coarse IRL relaxation parameter to " << args.coarse_relax_tol << std::endl;
|
||||
}else if(sarg == "--action"){
|
||||
action_s = argv[i+1];
|
||||
std::cout << "Action set to " << action_s << std::endl;
|
||||
}else if(sarg == "--mobius_scale"){
|
||||
std::istringstream ss(argv[i+1]); ss >> args.mobius_scale;
|
||||
std::cout << GridLogMessage << "Set Mobius scale to " << args.mobius_scale << std::endl;
|
||||
}else if(sarg == "--basis_size"){
|
||||
args.basis_size = std::stoi(argv[i+1]);
|
||||
std::cout << GridLogMessage << "Set basis size to " << args.basis_size << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
//Fine grids
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(args.Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(args.Ls,UGrid);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
|
||||
bool is_gparity = false;
|
||||
for(auto g : args.GparityDirs) if(g) is_gparity = true;
|
||||
|
||||
double bmc = 1.;
|
||||
double b = (args.mobius_scale + bmc)/2.; // b = 1/2 [ (b+c) + (b-c) ]
|
||||
double c = (args.mobius_scale - bmc)/2.; // c = 1/2 [ (b+c) - (b-c) ]
|
||||
|
||||
if(is_gparity){
|
||||
GparityWilsonImplD::ImplParams Params = setupGparityParams(args.GparityDirs);
|
||||
readConfiguration<ConjugateGimplD>(Umu, config, args.is_cps_cfg); //Read the gauge field
|
||||
|
||||
if(action_s == "DWF"){
|
||||
GparityDomainWallFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, Params);
|
||||
run(action, config, args);
|
||||
}else if(action_s == "Mobius"){
|
||||
GparityMobiusFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, b, c, Params);
|
||||
run(action, config, args);
|
||||
}
|
||||
}else{
|
||||
WilsonImplD::ImplParams Params = setupParams();
|
||||
readConfiguration<PeriodicGimplD>(Umu, config, args.is_cps_cfg); //Read the gauge field
|
||||
|
||||
if(action_s == "DWF"){
|
||||
DomainWallFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, Params);
|
||||
run(action, config, args);
|
||||
}else if(action_s == "Mobius"){
|
||||
MobiusFermionD action(Umu, *FGrid, *FrbGrid, *UGrid, *UrbGrid, args.mass, args.M5, b, c, Params);
|
||||
run(action, config, args);
|
||||
}
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
Reference in New Issue
Block a user