From 2a9cfeb9ea99eaf7575029494a14b6a33ee3bf3d Mon Sep 17 00:00:00 2001 From: Peter Boyle Date: Thu, 26 Sep 2024 14:23:29 -0400 Subject: [PATCH] New files --- ...Test_general_coarse_hdcg_phys48_blockcg.cc | 781 ++++++++++++++++++ ...ral_coarse_hdcg_phys48_lanczos_subspace.cc | 355 ++++++++ 2 files changed, 1136 insertions(+) create mode 100644 tests/debug/Test_general_coarse_hdcg_phys48_blockcg.cc create mode 100644 tests/debug/Test_general_coarse_hdcg_phys48_lanczos_subspace.cc diff --git a/tests/debug/Test_general_coarse_hdcg_phys48_blockcg.cc b/tests/debug/Test_general_coarse_hdcg_phys48_blockcg.cc new file mode 100644 index 00000000..4a7890f4 --- /dev/null +++ b/tests/debug/Test_general_coarse_hdcg_phys48_blockcg.cc @@ -0,0 +1,781 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_general_coarse_hdcg.cc + + Copyright (C) 2023 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace Grid; + +template +void SaveFineEvecs(aggregation &Agg,std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(Agg[0].Grid()->IsBoss()); + WR.open(file); + for(int b=0;b +void SaveBasis(aggregation &Agg,std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(Agg.FineGrid->IsBoss()); + WR.open(file); + for(int b=0;b +void LoadBasis(aggregation &Agg, std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacReader RD ; + RD.open(file); + for(int b=0;b +void LoadBasisSkip(aggregation &Agg, std::string file,int N,LatticeFermionF & tmp) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacReader RD ; + + RD.open(file); + for(int b=0;b +void LoadBasisSum(aggregation &Agg, std::string file,int N,LatticeFermionF & tmp) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacReader RD ; + + LatticeFermionF sum(tmp.Grid()); + RD.open(file); + for(int b=0;b +void SaveEigenvectors(std::vector &eval, + std::vector &evec, + std::string evec_file, + std::string eval_file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(evec[0].Grid()->IsBoss()); + WR.open(evec_file); + for(int b=0;b +void LoadEigenvectors(std::vector &eval, + std::vector &evec, + std::string evec_file, + std::string eval_file) +{ +#ifdef HAVE_LIME + XmlReader RDx(eval_file); + read(RDx,"evals",eval); + emptyUserRecord record; + + Grid::ScidacReader RD ; + RD.open(evec_file); + assert(evec.size()==eval.size()); + for(int k=0;k +class HermOpAdaptor : public LinearOperatorBase +{ + LinearOperatorBase & wrapped; +public: + HermOpAdaptor(LinearOperatorBase &wrapme) : wrapped(wrapme) {}; + void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); } + void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); } + void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); } + void OpDiag (const Field &in, Field &out) { assert(0); } + void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); } + void OpDirAll (const Field &in, std::vector &out) { assert(0); }; + void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); } +}; + +template class FixedCGPolynomial : public LinearFunction +{ +public: + using LinearFunction::operator(); + typedef LinearOperatorBase FineOperator; + FineOperator & _SmootherOperator; + ConjugateGradientPolynomial CG; + int iters; + bool record; + int replay_count; + FixedCGPolynomial(int _iters, FineOperator &SmootherOperator) : + _SmootherOperator(SmootherOperator), + iters(_iters), + record(true), + CG(0.0,_iters,false) + { + std::cout << GridLogMessage<<" FixedCGPolynomial order "< &in, std::vector &out) + { + for(int i=0;i BCGV (BlockCGrQVec,blockDim,0.0,iters,false); + BCGV(_SmootherOperator,in,out); + } + +}; +template class CGSmoother : public LinearFunction +{ +public: + using LinearFunction::operator(); + typedef LinearOperatorBase FineOperator; + FineOperator & _SmootherOperator; + int iters; + CGSmoother(int _iters, FineOperator &SmootherOperator) : + _SmootherOperator(SmootherOperator), + iters(_iters) + { + std::cout << GridLogMessage<<" Mirs smoother order "< CG(0.0,iters,false); // non-converge is just fine in a smoother + + out=Zero(); + + CG(_SmootherOperator,in,out); + } +}; + + +RealD InverseApproximation(RealD x){ + return 1.0/x; +} +template class ChebyshevSmoother : public LinearFunction +{ +public: + using LinearFunction::operator(); + typedef LinearOperatorBase FineOperator; + FineOperator & _SmootherOperator; + Chebyshev Cheby; + ChebyshevSmoother(RealD _lo,RealD _hi,int _ord, FineOperator &SmootherOperator) : + _SmootherOperator(SmootherOperator), + Cheby(_lo,_hi,_ord,InverseApproximation) + { + std::cout << GridLogMessage<<" Chebyshev smoother order "<<_ord<<" ["<<_lo<<","<<_hi<<"]"< class ChebyshevInverter : public LinearFunction +{ +public: + using LinearFunction::operator(); + typedef LinearOperatorBase FineOperator; + FineOperator & _Operator; + Chebyshev Cheby; + ChebyshevInverter(RealD _lo,RealD _hi,int _ord, FineOperator &Operator) : + _Operator(Operator), + Cheby(_lo,_hi,_ord,InverseApproximation) + { + std::cout << GridLogMessage<<" Chebyshev Inverter order "<<_ord<<" ["<<_lo<<","<<_hi<<"]"< HermOpEO(Ddwf); + + std::cout << "**************************************"< fPM; + fPM(HermOpEO,pm_src); + } + + if(0) + { + + std::cout << "**************************************"< HermOpEOF(DdwfF); + + const int Fine_Nstop = 200; + const int Fine_Nk = 200; + const int Fine_Np = 200; + const int Fine_Nm = Fine_Nk+Fine_Np; + const int Fine_MaxIt= 10; + + RealD Fine_resid = 1.0e-4; + std::cout << GridLogMessage << "Fine Lanczos "< Cheby(0.002,92.0,401); + // Chebyshev Cheby(0.1,92.0,401); + FunctionHermOp OpCheby(Cheby,HermOpEOF); + PlainHermOp Op (HermOpEOF); + ImplicitlyRestartedLanczos IRL(OpCheby,Op,Fine_Nstop,Fine_Nk,Fine_Nm,Fine_resid,Fine_MaxIt); + std::vector Fine_eval(Fine_Nm); + FermionField Fine_src(FrbGridF); + Fine_src = ComplexF(1.0); + std::vector Fine_evec(Fine_Nm,FrbGridF); + + int Fine_Nconv; + std::cout << GridLogMessage <<" Calling IRL.calc single prec"< seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + std::vector cseeds({5,6,7,8}); + + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); + + + typedef HermOpAdaptor HermFineMatrix; + HermFineMatrix FineHermOp(HermOpEO); + + //////////////////////////////////////////////////////////// + ///////////// Coarse basis and Little Dirac Operator /////// + //////////////////////////////////////////////////////////// + typedef GeneralCoarsenedMatrix LittleDiracOperator; + typedef LittleDiracOperator::CoarseVector CoarseVector; + + NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); + + typedef Aggregation Subspace; + Subspace Aggregates(Coarse5d,FrbGrid,cb); + + //////////////////////////////////////////////////////////// + // Need to check about red-black grid coarsening + //////////////////////////////////////////////////////////// + std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.mixed.2500.60"); + // // std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.new.62"); + // std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.evecF"); + std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.mixed.2500.60"); + std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.mixed.60"); + std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac"); + std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml"); + bool load_agg=true; + bool load_refine=true; + bool load_mat=false; + bool load_evec=false; + + int refine=1; + if ( load_agg ) { + if ( !(refine) || (!load_refine) ) { + LoadBasis(Aggregates,subspace_file); + } + } else { + // Aggregates.CreateSubspaceMultishift(RNG5,HermOpEO, + // 0.0003,1.0e-5,2000); // Lo, tol, maxit + // Aggregates.CreateSubspaceChebyshev(RNG5,HermOpEO,nbasis,95.,0.01,1500);// <== last run + Aggregates.CreateSubspaceChebyshevNew(RNG5,HermOpEO,95.); + SaveBasis(Aggregates,subspace_file); + } + + std::cout << "**************************************"< coarseCG(4.0e-2,20000,true); + + const int nrhs=24; + + Coordinate mpi=GridDefaultMpi(); + Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); + Coordinate rhLatt({nrhs,1,clatt[0],clatt[1],clatt[2],clatt[3]}); + Coordinate rhSimd({vComplex::Nsimd(),1, 1,1,1,1}); + + GridCartesian *CoarseMrhs = new GridCartesian(rhLatt,rhSimd,rhMpi); + typedef MultiGeneralCoarsenedMatrix MultiGeneralCoarsenedMatrix_t; + MultiGeneralCoarsenedMatrix_t mrhs(geom,CoarseMrhs); + + std::cout << "**************************************"< MrhsHermMatrix; + Chebyshev IRLCheby(0.005,42.0,301); // 1 iter + MrhsHermMatrix MrhsCoarseOp (mrhs); + + // CoarseVector pm_src(CoarseMrhs); + // pm_src = ComplexD(1.0); + // PowerMethod cPM; cPM(MrhsCoarseOp,pm_src); + + int Nk=192; + int Nm=384; + int Nstop=Nk; + int Nconv_test_interval=1; + + ImplicitlyRestartedBlockLanczosCoarse IRL(MrhsCoarseOp, + Coarse5d, + CoarseMrhs, + nrhs, + IRLCheby, + Nstop, + Nconv_test_interval, + nrhs, + Nk, + Nm, + 1e-5,10); + + int Nconv; + std::vector eval(Nm); + std::vector evec(Nm,Coarse5d); + std::vector c_src(nrhs,Coarse5d); + + /////////////////////// + // Deflation guesser object + /////////////////////// + MultiRHSDeflation MrhsGuesser; + + ////////////////////////////////////////// + // Block projector for coarse/fine + ////////////////////////////////////////// + MultiRHSBlockProject MrhsProjector; + + ////////////////////////// + // Extra HDCG parameters + ////////////////////////// + int maxit=300; + ConjugateGradient CG(5.0e-2,maxit,false); + ConjugateGradient CGstart(5.0e-2,maxit,false); + RealD lo=2.0; + int ord = 7; + // int ord = 11; + + int blockDim = 0;//not used for BlockCG + BlockConjugateGradient BCG (BlockCGrQ,blockDim,5.0e-5,maxit,true); + + DoNothingGuesser DoNothing; + // HPDSolver HPDSolveMrhs(MrhsCoarseOp,CG,DoNothing); + // HPDSolver HPDSolveMrhsStart(MrhsCoarseOp,CGstart,DoNothing); + // HPDSolver HPDSolveMrhs(MrhsCoarseOp,BCG,DoNothing); + // HPDSolver HPDSolveMrhsRefine(MrhsCoarseOp,BCG,DoNothing); + // FixedCGPolynomial HPDSolveMrhs(maxit,MrhsCoarseOp); + + ChebyshevInverter HPDSolveMrhs(1.0e-2,40.0,120,MrhsCoarseOp); // + // ChebyshevInverter HPDSolveMrhs(1.0e-2,40.0,110,MrhsCoarseOp); // 114 iter with Chebysmooth and BlockCG + // ChebyshevInverter HPDSolveMrhs(1.0e-2,40.0,120,MrhsCoarseOp); // 138 iter with Chebysmooth + // ChebyshevInverter HPDSolveMrhs(1.0e-2,40.0,200,MrhsCoarseOp); // 139 iter + // ChebyshevInverter HPDSolveMrhs(3.0e-3,40.0,200,MrhsCoarseOp); // 137 iter, CG smooth, flex + // ChebyshevInverter HPDSolveMrhs(1.0e-3,40.0,200,MrhsCoarseOp); // 146 iter, CG smooth, flex + // ChebyshevInverter HPDSolveMrhs(3.0e-4,40.0,200,MrhsCoarseOp); // 156 iter, CG smooth, flex + + ///////////////////////////////////////////////// + // Mirs smoother + ///////////////////////////////////////////////// + ShiftedHermOpLinearOperator ShiftedFineHermOp(HermOpEO,lo); + // FixedCGPolynomial CGsmooth(ord,ShiftedFineHermOp) ; + // CGSmoother CGsmooth(ord,ShiftedFineHermOp) ; + ChebyshevSmoother CGsmooth(2.0,92.0,8,HermOpEO) ; + + if ( load_refine ) { + LoadBasis(Aggregates,refine_file); + // LatticeFermionF conv_tmp(FrbGridF); + // LoadBasisSum(Aggregates,refine_file,sample,conv_tmp); + } else { + Aggregates.RefineSubspace(HermOpEO,0.001,1.0e-3,3000); // 172 iters + SaveBasis(Aggregates,refine_file); + } + Aggregates.Orthogonalise(); + + std::cout << "**************************************"< + HDCGmrhs(1.0e-8, 300, + FineHermOp, + CGsmooth, + HPDSolveMrhs, // Used in M1 + HPDSolveMrhs, // Used in Vstart + MrhsProjector, + MrhsGuesser, + CoarseMrhs); + + std::vector src_mrhs(nrhs,FrbGrid); + std::vector res_mrhs(nrhs,FrbGrid); + LatticeFermionD result_accurate(FrbGrid); + LatticeFermionD result_sloppy(FrbGrid); + LatticeFermionD error(FrbGrid); + LatticeFermionD residual(FrbGrid); + + for(int r=0;r bins({1.0e-3,1.0e-2,1.0e-1,1.0,10.0,100.0}); + std::vector orders({6000 ,4000 ,1000 ,500,500 ,500}); + PowerSpectrum GraphicEqualizer(bins,orders); + + std::cout << "**************************************"< tols({1.0e-3,1.0e-4,1.0e-5}); + + + for(auto tol : tols) { + + TwoLevelADEF2mrhs + HDCGmrhsSloppy(tol, 500, + FineHermOp, + CGsmooth, + HPDSolveMrhs, // Used in M1 + HPDSolveMrhs, // Used in Vstart + MrhsProjector, + MrhsGuesser, + CoarseMrhs); + + // Solve again to 10^-5 + for(int r=0;r CGfine(1.0e-8,30000,false); + CGfine(HermOpEO, src, result); + } +#endif + Grid_finalize(); + return 0; +} diff --git a/tests/debug/Test_general_coarse_hdcg_phys48_lanczos_subspace.cc b/tests/debug/Test_general_coarse_hdcg_phys48_lanczos_subspace.cc new file mode 100644 index 00000000..6efe53bd --- /dev/null +++ b/tests/debug/Test_general_coarse_hdcg_phys48_lanczos_subspace.cc @@ -0,0 +1,355 @@ +/************************************************************************************* + + Grid physics library, www.github.com/paboyle/Grid + + Source file: ./tests/Test_general_coarse_hdcg.cc + + Copyright (C) 2023 + +Author: Peter Boyle + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + See the full license in the file "LICENSE" in the top level distribution directory + *************************************************************************************/ + /* END LEGAL */ +#include +#include +#include +#include + +using namespace std; +using namespace Grid; + +template +void SaveFineEvecs(aggregation &Agg,std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(Agg[0].Grid()->IsBoss()); + WR.open(file); + for(int b=0;b +void SaveBasis(aggregation &Agg,std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(Agg.FineGrid->IsBoss()); + WR.open(file); + for(int b=0;b +void LoadBasis(aggregation &Agg, std::string file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacReader RD ; + RD.open(file); + for(int b=0;b +void LoadFineEvecs(aggregation &Agg, std::string file,LatticeFermionF & conv_tmp) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacReader RD ; + RD.open(file); + for(int b=0;b +void SaveEigenvectors(std::vector &eval, + std::vector &evec, + std::string evec_file, + std::string eval_file) +{ +#ifdef HAVE_LIME + emptyUserRecord record; + ScidacWriter WR(evec[0].Grid()->IsBoss()); + WR.open(evec_file); + for(int b=0;b +void LoadEigenvectors(std::vector &eval, + std::vector &evec, + std::string evec_file, + std::string eval_file) +{ +#ifdef HAVE_LIME + XmlReader RDx(eval_file); + read(RDx,"evals",eval); + emptyUserRecord record; + + Grid::ScidacReader RD ; + RD.open(evec_file); + assert(evec.size()==eval.size()); + for(int k=0;k +class HermOpAdaptor : public LinearOperatorBase +{ + LinearOperatorBase & wrapped; +public: + HermOpAdaptor(LinearOperatorBase &wrapme) : wrapped(wrapme) {}; + void Op (const Field &in, Field &out) { wrapped.HermOp(in,out); } + void HermOp(const Field &in, Field &out) { wrapped.HermOp(in,out); } + void AdjOp (const Field &in, Field &out){ wrapped.HermOp(in,out); } + void OpDiag (const Field &in, Field &out) { assert(0); } + void OpDir (const Field &in, Field &out,int dir,int disp) { assert(0); } + void OpDirAll (const Field &in, std::vector &out) { assert(0); }; + void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){ assert(0); } +}; + +template class CGSmoother : public LinearFunction +{ +public: + using LinearFunction::operator(); + typedef LinearOperatorBase FineOperator; + FineOperator & _SmootherOperator; + int iters; + CGSmoother(int _iters, FineOperator &SmootherOperator) : + _SmootherOperator(SmootherOperator), + iters(_iters) + { + std::cout << GridLogMessage<<" Mirs smoother order "< CG(0.0,iters,false); // non-converge is just fine in a smoother + + out=Zero(); + + CG(_SmootherOperator,in,out); + } +}; + + +int main (int argc, char ** argv) +{ + Grid_init(&argc,&argv); + + const int Ls=24; + const int nbasis = 62; + const int cb = 0 ; + RealD mass=0.00078; + RealD M5=1.8; + RealD b=1.5; + RealD c=0.5; + + GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), + GridDefaultSimd(Nd,vComplex::Nsimd()), + GridDefaultMpi()); + GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); + GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); + GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); + + // Construct a coarsened grid with 4^4 cell + Coordinate Block({4,4,6,4}); + Coordinate clatt = GridDefaultLatt(); + for(int d=0;d seeds4({1,2,3,4}); + std::vector seeds5({5,6,7,8}); + std::vector cseeds({5,6,7,8}); + + GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); + GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); + GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds); + + ///////////////////////// Configuration ///////////////////////////////// + LatticeGaugeField Umu(UGrid); + + FieldMetaData header; + std::string file("ckpoint_lat.1000"); + NerscIO::readConfiguration(Umu,header,file); + + //////////////////////// Fermion action ////////////////////////////////// + MobiusFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c); + + SchurDiagMooeeOperator HermOpEO(Ddwf); + + const int Fine_Nstop = 200; + const int Fine_Nk = 100; + const int Fine_Np = 100; + const int Fine_Nm = Fine_Nk+Fine_Np; + + typedef LatticeFermion FermionField; + std::vector Fine_eval; + std::vector Fine_evec; + + LatticeFermionF conv_tmp(FrbGridF); + Fine_eval.resize(Fine_Nstop); + Fine_evec.resize(Fine_Nstop,FrbGrid); + std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.evecF"); + LoadFineEvecs(Fine_evec,evec_file,conv_tmp); + + typedef HermOpAdaptor HermFineMatrix; + HermFineMatrix FineHermOp(HermOpEO); + + //////////////////////////////////////////////////////////// + ///////////// Coarse basis and Little Dirac Operator /////// + //////////////////////////////////////////////////////////// + typedef GeneralCoarsenedMatrix LittleDiracOperator; + typedef LittleDiracOperator::CoarseVector CoarseVector; + + NextToNextToNextToNearestStencilGeometry5D geom(Coarse5d); + + typedef Aggregation Subspace; + Subspace Aggregates(Coarse5d,FrbGrid,cb); + + //////////////////////////////////////////////////////////// + // Need to check about red-black grid coarsening + //////////////////////////////////////////////////////////// + // std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.mixed.2500.60"); + // // std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.new.62"); + // std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.evec"); + std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.mixed.2500.60"); + // std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.mixed.60"); + // std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac"); + // std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml"); + bool load_agg=true; + bool load_refine=true; + + ////////////////////////////////////////// + // Block projector for coarse/fine + ////////////////////////////////////////// + MultiRHSBlockProject MrhsProjector; + + + ///////////////////////////////////////////////// + // Mirs smoother + ///////////////////////////////////////////////// + int ord=8; + RealD lo=2.0; + RealD MirsShift = lo; + ShiftedHermOpLinearOperator ShiftedFineHermOp(HermOpEO,MirsShift); + CGSmoother CGsmooth(ord,ShiftedFineHermOp) ; + + LoadBasis(Aggregates,refine_file); + Aggregates.Orthogonalise(); + + std::cout << "**************************************"< Fine_ev(1,FrbGrid); + std::vector Fine_ev_compressed(1,FrbGrid); + std::vector c_evec(1,Coarse5d); + for(int ev=0;ev