1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

Merge remote-tracking branch 'upstream/develop' into feature/new-solver-algorithms

This commit is contained in:
Daniel Richtmann
2017-11-06 13:09:36 +01:00
39 changed files with 1899 additions and 1452 deletions

View File

@ -555,13 +555,13 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
Subspace Aggregates(Coarse5d,FGrid);
Subspace Aggregates(Coarse5d,FGrid,0);
// Aggregates.CreateSubspace(RNG5,HermDefOp,nbasis);
assert ( (nbasis & 0x1)==0);
int nb=nbasis/2;
std::cout<<GridLogMessage << " nbasis/2 = "<<nb<<std::endl;
// Aggregates.CreateSubspace(RNG5,HermDefOp,nb);
Aggregates.CreateSubspaceLanczos(RNG5,HermDefOp,nb);
Aggregates.CreateSubspace(RNG5,HermDefOp,nb);
// Aggregates.CreateSubspaceLanczos(RNG5,HermDefOp,nb);
for(int n=0;n<nb;n++){
G5R5(Aggregates.subspace[n+nb],Aggregates.subspace[n]);
std::cout<<GridLogMessage<<n<<" subspace "<<norm2(Aggregates.subspace[n+nb])<<" "<<norm2(Aggregates.subspace[n]) <<std::endl;

View File

@ -52,15 +52,28 @@ int main (int argc, char ** argv)
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
int nrhs = UGrid->RankCount() ;
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
for(int i=0;i<argc;i++){
if(std::string(argv[i]) == "--split"){
for(int k=0;k<mpi_layout.size();k++){
std::stringstream ss;
ss << argv[i+1+k];
ss >> mpi_split[k];
}
break;
}
}
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++) nrhs *= (mpi_layout[i]/mpi_split[i]);
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid);
*UGrid,me);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
@ -70,7 +83,6 @@ int main (int argc, char ** argv)
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
std::vector<FermionField> src(nrhs,FGrid);
@ -93,7 +105,7 @@ int main (int argc, char ** argv)
emptyUserRecord record;
std::string file("./scratch.scidac");
std::string filef("./scratch.scidac.ferm");
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_src_split(SFGrid);
@ -169,7 +181,7 @@ int main (int argc, char ** argv)
for(int n=0;n<nrhs;n++){
FGrid->Barrier();
if ( n==me ) {
std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<<std::endl;
std::cout << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<<std::endl;
}
FGrid->Barrier();
}
@ -190,7 +202,7 @@ int main (int argc, char ** argv)
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
ConjugateGradient<FermionField> CG((1.0e-5/(me+1)),10000);
s_res = zero;
CG(HermOp,s_src,s_res);
@ -218,7 +230,6 @@ int main (int argc, char ** argv)
std::cout << " diff " <<tmp<<std::endl;
}
*/
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
for(int n=0;n<nrhs;n++){
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];

View File

@ -1,4 +1,4 @@
/*************************************************************************************
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
@ -47,20 +47,36 @@ int main (int argc, char ** argv)
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (mpi_layout.size(),1);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
int nrhs = UGrid->RankCount() ;
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
for(int i=0;i<argc;i++){
if(std::string(argv[i]) == "--split"){
for(int k=0;k<mpi_layout.size();k++){
std::stringstream ss;
ss << argv[i+1+k];
ss >> mpi_split[k];
}
break;
}
}
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++) nrhs *= (mpi_layout[i]/mpi_split[i]);
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid);
*UGrid,me);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
@ -78,16 +94,46 @@ int main (int argc, char ** argv)
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
for(int s=0;s<nrhs;s++) result[s]=zero;
#undef LEXICO_TEST
#ifdef LEXICO_TEST
{
LatticeFermion lex(FGrid); lex = zero;
LatticeFermion ftmp(FGrid);
Integer stride =10000;
double nrm;
LatticeComplex coor(FGrid);
for(int d=0;d<5;d++){
LatticeCoordinate(coor,d);
ftmp = stride;
ftmp = ftmp * coor;
lex = lex + ftmp;
stride=stride/10;
}
for(int s=0;s<nrhs;s++) {
src[s]=lex;
ftmp = 1000*1000*s;
src[s] = src[s] + ftmp;
}
}
#else
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
tmp = 100.0*s;
src[s] = (src[s] * 0.1) + tmp;
std::cout << " src ]"<<s<<"] "<<norm2(src[s])<<std::endl;
}
#endif
for(int n =0 ; n< nrhs ; n++) {
std::cout << " src"<<n<<"\n"<< src[n] <<std::endl;
}
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
/////////////////
// MPI only sends
/////////////////
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_tmp(SFGrid);
@ -98,6 +144,36 @@ int main (int argc, char ** argv)
///////////////////////////////////////////////////////////////
Grid_split (Umu,s_Umu);
Grid_split (src,s_src);
std::cout << " split rank " <<me << " s_src "<<norm2(s_src)<<std::endl;
std::cout << " s_src\n "<< s_src <<std::endl;
#ifdef LEXICO_TEST
FermionField s_src_tmp(SFGrid);
FermionField s_src_diff(SFGrid);
{
LatticeFermion lex(SFGrid); lex = zero;
LatticeFermion ftmp(SFGrid);
Integer stride =10000;
double nrm;
LatticeComplex coor(SFGrid);
for(int d=0;d<5;d++){
LatticeCoordinate(coor,d);
ftmp = stride;
ftmp = ftmp * coor;
lex = lex + ftmp;
stride=stride/10;
}
s_src_tmp=lex;
ftmp = 1000*1000*me;
s_src_tmp = s_src_tmp + ftmp;
}
s_src_diff = s_src_tmp - s_src;
std::cout << " s_src_diff " << norm2(s_src_diff)<<std::endl;
std::cout << " s_src \n" << s_src << std::endl;
std::cout << " s_src_tmp \n" << s_src_tmp << std::endl;
std::cout << " s_src_diff \n" << s_src_diff << std::endl;
#endif
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
@ -113,10 +189,11 @@ int main (int argc, char ** argv)
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
ConjugateGradient<FermionField> CG((1.0e-5),10000);
s_res = zero;
CG(HermOp,s_src,s_res);
std::cout << " s_res norm "<<norm2(s_res)<<std::endl;
/////////////////////////////////////////////////////////////
// Report how long they all took
/////////////////////////////////////////////////////////////
@ -134,10 +211,12 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
Grid_unsplit(result,s_res);
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
for(int n=0;n<nrhs;n++){
std::cout << " res["<<n<<"] norm "<<norm2(result[n])<<std::endl;
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)<<std::endl;
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)/norm2(src[n])<<std::endl;
}
Grid_finalize();

View File

@ -47,7 +47,9 @@ int main (int argc, char ** argv)
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (mpi_layout.size(),1);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
@ -57,10 +59,11 @@ int main (int argc, char ** argv)
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
int me;
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid);
*UGrid,me);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
@ -89,8 +92,6 @@ int main (int argc, char ** argv)
/////////////////
// MPI only sends
/////////////////
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_src_e(SFrbGrid);

View File

@ -0,0 +1,157 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
const int Ls=4;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (mpi_layout.size(),1);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
for(int i=0;i<argc;i++){
if(std::string(argv[i]) == "--split"){
for(int k=0;k<mpi_layout.size();k++){
std::stringstream ss;
ss << argv[i+1+k];
ss >> mpi_split[k];
}
break;
}
}
int nrhs = 1;
for(int i=0;i<mpi_layout.size();i++) nrhs *= (mpi_layout[i]/mpi_split[i]);
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
for(int s=0;s<nrhs;s++) result[s]=zero;
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
/////////////////
// MPI only sends
/////////////////
int me = UGrid->ThisRank();
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_tmp(SFGrid);
FermionField s_res(SFGrid);
///////////////////////////////////////////////////////////////
// split the source out using MPI instead of I/O
///////////////////////////////////////////////////////////////
Grid_split (Umu,s_Umu);
Grid_split (src,s_src);
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
s_res = zero;
CG(HermOp,s_src,s_res);
/////////////////////////////////////////////////////////////
// Report how long they all took
/////////////////////////////////////////////////////////////
std::vector<uint32_t> iterations(nrhs,0);
iterations[me] = CG.IterationsToComplete;
for(int n=0;n<nrhs;n++){
UGrid->GlobalSum(iterations[n]);
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
}
/////////////////////////////////////////////////////////////
// Gather and residual check on the results
/////////////////////////////////////////////////////////////
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
Grid_unsplit(result,s_res);
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
for(int n=0;n<nrhs;n++){
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)<<std::endl;
}
Grid_finalize();
}