1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-14 01:35:36 +00:00

Running on 12 rhs on 18 nodes of frontier

This commit is contained in:
Peter Boyle 2024-01-22 17:44:15 -05:00
parent 3d13fd56c5
commit eb702f581b

View File

@ -112,6 +112,44 @@ void LoadBasis(aggregation &Agg, std::string file)
RD.close(); RD.close();
#endif #endif
} }
template<class CoarseVector>
void SaveEigenvectors(std::vector<RealD> &eval,
std::vector<CoarseVector> &evec,
std::string evec_file,
std::string eval_file)
{
#ifdef HAVE_LIME
emptyUserRecord record;
ScidacWriter WR(evec[0].Grid()->IsBoss());
WR.open(evec_file);
for(int b=0;b<evec.size();b++){
WR.writeScidacFieldRecord(evec[b],record,0,0);
}
WR.close();
XmlWriter WRx(eval_file);
write(WRx,"evals",eval);
#endif
}
template<class CoarseVector>
void LoadEigenvectors(std::vector<RealD> &eval,
std::vector<CoarseVector> &evec,
std::string evec_file,
std::string eval_file)
{
#ifdef HAVE_LIME
XmlReader RDx(eval_file);
read(RDx,"evals",eval);
emptyUserRecord record;
Grid::ScidacReader RD ;
RD.open(evec_file);
assert(evec.size()==eval.size());
for(int k=0;k<eval.size();k++) {
RD.readScidacFieldRecord(evec[k],record);
}
RD.close();
#endif
}
RealD InverseApproximation(RealD x){ RealD InverseApproximation(RealD x){
return 1.0/x; return 1.0/x;
@ -169,6 +207,7 @@ public:
void operator() (const Field &in, Field &out) void operator() (const Field &in, Field &out)
{ {
ConjugateGradient<Field> CG(0.0,iters,false); // non-converge is just fine in a smoother ConjugateGradient<Field> CG(0.0,iters,false); // non-converge is just fine in a smoother
out=Zero();
CG(_SmootherOperator,in,out); CG(_SmootherOperator,in,out);
} }
}; };
@ -237,12 +276,8 @@ int main (int argc, char ** argv)
typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix; typedef HermOpAdaptor<LatticeFermionD> HermFineMatrix;
HermFineMatrix FineHermOp(HermOpEO); HermFineMatrix FineHermOp(HermOpEO);
LatticeFermion result(FrbGrid); result=Zero();
LatticeFermion src(FrbGrid); random(RNG5,src);
// Run power method on FineHermOp // Run power method on FineHermOp
PowerMethod<LatticeFermion> PM; PM(HermOpEO,src); // PowerMethod<LatticeFermion> PM; PM(HermOpEO,src);
//////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////
///////////// Coarse basis and Little Dirac Operator /////// ///////////// Coarse basis and Little Dirac Operator ///////
@ -262,12 +297,15 @@ int main (int argc, char ** argv)
//////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////
LittleDiracOperator LittleDiracOp(geom,FrbGrid,Coarse5d); LittleDiracOperator LittleDiracOp(geom,FrbGrid,Coarse5d);
std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.rat.repro.62"); std::string subspace_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Subspace.phys48.rat.18node.62");
std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.rat.repro.62"); std::string refine_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/Refine.phys48.rat.18node.62");
std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.rat.repro.62"); std::string ldop_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/LittleDiracOp.phys48.rat.18node.62");
std::string evec_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/evecs.scidac");
std::string eval_file("/lustre/orion/phy157/proj-shared/phy157_dwf/paboyle/eval.xml");
bool load_agg=true; bool load_agg=true;
bool load_refine=true; bool load_refine=true;
bool load_mat=true; bool load_mat=true;
bool load_evec=false;
MemoryManager::Print(); MemoryManager::Print();
int refine=1; int refine=1;
@ -369,7 +407,7 @@ slurm-1482367.out:Grid : Message : 6169.469330 s : HDCG: Pcg converged in 487 it
CoarseVector c_res(Coarse5d); CoarseVector c_res(Coarse5d);
CoarseVector c_ref(Coarse5d); CoarseVector c_ref(Coarse5d);
if (1){ if (0){
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
// Test the operator // Test the operator
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
@ -436,7 +474,16 @@ slurm-1482367.out:Grid : Message : 6169.469330 s : HDCG: Pcg converged in 487 it
PowerMethod<CoarseVector> cPM; cPM(CoarseOp,c_src); PowerMethod<CoarseVector> cPM; cPM(CoarseOp,c_src);
IRL.calc(eval,evec,c_src,Nconv); if ( load_evec ) {
eval.resize(Nstop);
evec.resize(Nstop,Coarse5d);
LoadEigenvectors(eval,evec,evec_file,eval_file);
} else {
IRL.calc(eval,evec,c_src,Nconv);
assert(Nstop==eval.size());
SaveEigenvectors(eval,evec,evec_file,eval_file);
}
DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval); DeflatedGuesser<CoarseVector> DeflCoarseGuesser(evec,eval);
////////////////////////////////////////// //////////////////////////////////////////
@ -549,7 +596,7 @@ slurm-1482367.out:Grid : Message : 6169.469330 s : HDCG: Pcg converged in 487 it
////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////
ConjugateGradient<CoarseVector> coarseCG(4.0e-2,20000,true); ConjugateGradient<CoarseVector> coarseCG(4.0e-2,20000,true);
const int nrhs=vComplex::Nsimd()*2; const int nrhs=vComplex::Nsimd()*3;
Coordinate mpi=GridDefaultMpi(); Coordinate mpi=GridDefaultMpi();
Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]}); Coordinate rhMpi ({1,1,mpi[0],mpi[1],mpi[2],mpi[3]});
@ -721,9 +768,9 @@ Conclusion: higher order smoother is doing better. Much better. Use a Krylov smo
HPDSolveSloppy, HPDSolveSloppy,
HPDSolve, HPDSolve,
Aggregates); Aggregates);
result=Zero(); // result=Zero();
std::cout << "Calling HDCG single RHS"<<std::endl; // std::cout << "Calling HDCG single RHS"<<std::endl;
HDCG(src,result); // HDCG(src,result);
////////////////////////////////////////// //////////////////////////////////////////
// Build a HDCG mrhs solver // Build a HDCG mrhs solver
@ -749,26 +796,33 @@ Conclusion: higher order smoother is doing better. Much better. Use a Krylov smo
MemoryManager::Print(); MemoryManager::Print();
std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid); std::vector<LatticeFermionD> src_mrhs(nrhs,FrbGrid);
std::cout << " mRHS source"<<std::endl;
std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid); std::vector<LatticeFermionD> res_mrhs(nrhs,FrbGrid);
std::cout << " mRHS result"<<std::endl;
MemoryManager::Print(); MemoryManager::Print();
random(RNG5,src_mrhs[0]); random(RNG5,src_mrhs[0]);
for(int r=0;r<nrhs;r++){ for(int r=0;r<nrhs;r++){
if(r>0)src_mrhs[r]=src_mrhs[0]; if(r>0)src_mrhs[r]=src_mrhs[0];
res_mrhs[r]=Zero(); res_mrhs[r]=Zero();
std::cout << "Setup mrhs source "<<r<<std::endl; std::cout << "Setup mrhs source "<<r<<std::endl;
} }
std::cout << "Calling the mRHS HDCG"<<std::endl; std::cout << "Calling the mRHS HDCG"<<std::endl;
MemoryManager::Print(); MemoryManager::Print();
HDCGmrhs(src_mrhs,res_mrhs); HDCGmrhs(src_mrhs,res_mrhs);
MemoryManager::Print(); MemoryManager::Print();
#endif #endif
} }
} }
// Standard CG // Standard CG
result=Zero(); #if 1
CGfine(HermOpEO, src, result); {
LatticeFermion result(FrbGrid); result=Zero();
LatticeFermion src(FrbGrid); random(RNG5,src);
result=Zero();
CGfine(HermOpEO, src, result);
}
#endif
Grid_finalize(); Grid_finalize();
return 0; return 0;
} }