mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-13 01:05:36 +00:00
Split CG testing
This commit is contained in:
parent
08ca338875
commit
153672d8ec
@ -38,7 +38,7 @@ int main (int argc, char ** argv)
|
|||||||
typedef typename DomainWallFermionR::ComplexField ComplexField;
|
typedef typename DomainWallFermionR::ComplexField ComplexField;
|
||||||
typename DomainWallFermionR::ImplParams params;
|
typename DomainWallFermionR::ImplParams params;
|
||||||
|
|
||||||
const int Ls=8;
|
const int Ls=4;
|
||||||
|
|
||||||
Grid_init(&argc,&argv);
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
@ -47,29 +47,24 @@ int main (int argc, char ** argv)
|
|||||||
std::vector<int> mpi_layout = GridDefaultMpi();
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
std::vector<int> mpi_split (mpi_layout.size(),1);
|
std::vector<int> mpi_split (mpi_layout.size(),1);
|
||||||
|
|
||||||
std::cout << "UGrid (world root)"<<std::endl;
|
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
|
|
||||||
std::cout << "FGrid (child of UGrid)"<<std::endl;
|
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
int nrhs = UGrid->RankCount() ;
|
int nrhs = UGrid->RankCount() ;
|
||||||
|
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
// Split into 1^4 mpi communicators
|
// Split into 1^4 mpi communicators
|
||||||
/////////////////////////////////////////////
|
/////////////////////////////////////////////
|
||||||
std::cout << "SGrid (world root)"<<std::endl;
|
|
||||||
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
|
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
|
||||||
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
mpi_split,
|
mpi_split,
|
||||||
*UGrid);
|
*UGrid);
|
||||||
|
|
||||||
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
|
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
|
||||||
std::cout << "SFGrid"<<std::endl;
|
|
||||||
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
|
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
|
||||||
std::cout << "SrbGrid"<<std::endl;
|
|
||||||
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
|
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
|
||||||
std::cout << "SFrbGrid"<<std::endl;
|
|
||||||
|
|
||||||
///////////////////////////////////////////////
|
///////////////////////////////////////////////
|
||||||
// Set up the problem as a 4d spreadout job
|
// Set up the problem as a 4d spreadout job
|
||||||
@ -79,10 +74,12 @@ int main (int argc, char ** argv)
|
|||||||
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
|
||||||
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
|
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
|
||||||
std::vector<FermionField> src(nrhs,FGrid);
|
std::vector<FermionField> src(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> src_chk(nrhs,FGrid);
|
||||||
std::vector<FermionField> result(nrhs,FGrid);
|
std::vector<FermionField> result(nrhs,FGrid);
|
||||||
|
FermionField tmp(FGrid);
|
||||||
|
|
||||||
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
|
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
|
||||||
for(int s=0;s<nrhs;s++) result[s] = zero;
|
for(int s=0;s<nrhs;s++) result[s]=zero;
|
||||||
|
|
||||||
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
@ -99,6 +96,8 @@ int main (int argc, char ** argv)
|
|||||||
int me = UGrid->ThisRank();
|
int me = UGrid->ThisRank();
|
||||||
LatticeGaugeField s_Umu(SGrid);
|
LatticeGaugeField s_Umu(SGrid);
|
||||||
FermionField s_src(SFGrid);
|
FermionField s_src(SFGrid);
|
||||||
|
FermionField s_src_split(SFGrid);
|
||||||
|
FermionField s_tmp(SFGrid);
|
||||||
FermionField s_res(SFGrid);
|
FermionField s_res(SFGrid);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -157,6 +156,24 @@ int main (int argc, char ** argv)
|
|||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// split the source out using MPI instead of I/O
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << " Splitting the grid data "<<std::endl;
|
||||||
|
Grid_split (src,s_src_split);
|
||||||
|
std::cout << GridLogMessage << " Finished splitting the grid data "<<std::endl;
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
std::cout <<GridLogMessage<<"Full "<< n <<" "<< norm2(src[n])<<std::endl;
|
||||||
|
}
|
||||||
|
s_tmp = s_src_split - s_src;
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
FGrid->Barrier();
|
||||||
|
if ( n==me ) {
|
||||||
|
std::cerr << GridLogMessage<<"Split "<< me << " " << norm2(s_src_split) << " " << norm2(s_src)<< " diff " << norm2(s_tmp)<<std::endl;
|
||||||
|
}
|
||||||
|
FGrid->Barrier();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
// Set up N-solvers as trivially parallel
|
// Set up N-solvers as trivially parallel
|
||||||
@ -164,6 +181,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
RealD mass=0.01;
|
RealD mass=0.01;
|
||||||
RealD M5=1.8;
|
RealD M5=1.8;
|
||||||
|
DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
|
||||||
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
|
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
|
||||||
|
|
||||||
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
@ -171,25 +189,41 @@ int main (int argc, char ** argv)
|
|||||||
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
|
||||||
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
|
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
|
||||||
s_res = zero;
|
s_res = zero;
|
||||||
CG(HermOp,s_src,s_res);
|
CG(HermOp,s_src,s_res);
|
||||||
|
|
||||||
///////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
// Share the information
|
// Report how long they all took
|
||||||
///////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
std::vector<uint32_t> iterations(nrhs,0);
|
std::vector<uint32_t> iterations(nrhs,0);
|
||||||
iterations[me] = CG.IterationsToComplete;
|
iterations[me] = CG.IterationsToComplete;
|
||||||
|
|
||||||
for(int n=0;n<nrhs;n++){
|
for(int n=0;n<nrhs;n++){
|
||||||
UGrid->GlobalSum(iterations[n]);
|
UGrid->GlobalSum(iterations[n]);
|
||||||
|
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
// Report how long they all took
|
// Gather and residual check on the results
|
||||||
/////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////
|
||||||
for(int r=0;r<nrhs;r++){
|
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
|
||||||
std::cout << GridLogMessage<<" Rank "<<r<<" "<< iterations[r]<<" CG iterations"<<std::endl;
|
Grid_unsplit(result,s_res);
|
||||||
|
/*
|
||||||
|
Grid_unsplit(src_chk,s_src);
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
tmp = src[n]-src_chk[n];
|
||||||
|
std::cout << " src_chk "<<n<<" "<<norm2(src_chk[n])<<" " <<norm2(src[n])<<" " <<norm2(tmp)<< std::endl;
|
||||||
|
std::cout << " diff " <<tmp<<std::endl;
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
|
||||||
|
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
Grid_finalize();
|
Grid_finalize();
|
||||||
}
|
}
|
||||||
|
144
tests/solver/Test_dwf_mrhs_cg_mpi.cc
Normal file
144
tests/solver/Test_dwf_mrhs_cg_mpi.cc
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_dwf_mrhs_cg.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
using namespace Grid::QCD;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
typedef typename DomainWallFermionR::FermionField FermionField;
|
||||||
|
typedef typename DomainWallFermionR::ComplexField ComplexField;
|
||||||
|
typename DomainWallFermionR::ImplParams params;
|
||||||
|
|
||||||
|
const int Ls=4;
|
||||||
|
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
std::vector<int> mpi_split (mpi_layout.size(),1);
|
||||||
|
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
|
int nrhs = UGrid->RankCount() ;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Split into 1^4 mpi communicators
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
|
||||||
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
|
mpi_split,
|
||||||
|
*UGrid);
|
||||||
|
|
||||||
|
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
|
||||||
|
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
|
||||||
|
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Set up the problem as a 4d spreadout job
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
|
||||||
|
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
|
||||||
|
std::vector<FermionField> src(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> src_chk(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> result(nrhs,FGrid);
|
||||||
|
FermionField tmp(FGrid);
|
||||||
|
|
||||||
|
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
|
||||||
|
for(int s=0;s<nrhs;s++) result[s]=zero;
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
/////////////////
|
||||||
|
// MPI only sends
|
||||||
|
/////////////////
|
||||||
|
int me = UGrid->ThisRank();
|
||||||
|
|
||||||
|
LatticeGaugeField s_Umu(SGrid);
|
||||||
|
FermionField s_src(SFGrid);
|
||||||
|
FermionField s_tmp(SFGrid);
|
||||||
|
FermionField s_res(SFGrid);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// split the source out using MPI instead of I/O
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
Grid_split (Umu,s_Umu);
|
||||||
|
Grid_split (src,s_src);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Set up N-solvers as trivially parallel
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
RealD mass=0.01;
|
||||||
|
RealD M5=1.8;
|
||||||
|
DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
|
||||||
|
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
|
||||||
|
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
|
||||||
|
s_res = zero;
|
||||||
|
CG(HermOp,s_src,s_res);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Report how long they all took
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
std::vector<uint32_t> iterations(nrhs,0);
|
||||||
|
iterations[me] = CG.IterationsToComplete;
|
||||||
|
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
UGrid->GlobalSum(iterations[n]);
|
||||||
|
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Gather and residual check on the results
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
|
||||||
|
Grid_unsplit(result,s_res);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
|
||||||
|
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
163
tests/solver/Test_dwf_mrhs_cg_mpieo.cc
Normal file
163
tests/solver/Test_dwf_mrhs_cg_mpieo.cc
Normal file
@ -0,0 +1,163 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_dwf_mrhs_cg.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
using namespace Grid::QCD;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
typedef typename DomainWallFermionR::FermionField FermionField;
|
||||||
|
typedef typename DomainWallFermionR::ComplexField ComplexField;
|
||||||
|
typename DomainWallFermionR::ImplParams params;
|
||||||
|
|
||||||
|
const int Ls=4;
|
||||||
|
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
std::vector<int> latt_size = GridDefaultLatt();
|
||||||
|
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
|
||||||
|
std::vector<int> mpi_layout = GridDefaultMpi();
|
||||||
|
std::vector<int> mpi_split (mpi_layout.size(),1);
|
||||||
|
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
|
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
|
int nrhs = UGrid->RankCount() ;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
// Split into 1^4 mpi communicators
|
||||||
|
/////////////////////////////////////////////
|
||||||
|
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
|
||||||
|
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||||
|
mpi_split,
|
||||||
|
*UGrid);
|
||||||
|
|
||||||
|
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
|
||||||
|
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
|
||||||
|
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
// Set up the problem as a 4d spreadout job
|
||||||
|
///////////////////////////////////////////////
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
|
||||||
|
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
|
||||||
|
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
|
||||||
|
std::vector<FermionField> src(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> src_chk(nrhs,FGrid);
|
||||||
|
std::vector<FermionField> result(nrhs,FGrid);
|
||||||
|
FermionField tmp(FGrid);
|
||||||
|
|
||||||
|
std::vector<FermionField> src_e(nrhs,FrbGrid);
|
||||||
|
std::vector<FermionField> src_o(nrhs,FrbGrid);
|
||||||
|
|
||||||
|
for(int s=0;s<nrhs;s++) random(pRNG5,src[s]);
|
||||||
|
for(int s=0;s<nrhs;s++) result[s]=zero;
|
||||||
|
|
||||||
|
LatticeGaugeField Umu(UGrid); SU3::HotConfiguration(pRNG,Umu);
|
||||||
|
|
||||||
|
/////////////////
|
||||||
|
// MPI only sends
|
||||||
|
/////////////////
|
||||||
|
int me = UGrid->ThisRank();
|
||||||
|
|
||||||
|
LatticeGaugeField s_Umu(SGrid);
|
||||||
|
FermionField s_src(SFGrid);
|
||||||
|
FermionField s_src_e(SFrbGrid);
|
||||||
|
FermionField s_src_o(SFrbGrid);
|
||||||
|
FermionField s_tmp(SFGrid);
|
||||||
|
FermionField s_res(SFGrid);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// split the source out using MPI instead of I/O
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
Grid_split (Umu,s_Umu);
|
||||||
|
Grid_split (src,s_src);
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Check even odd cases
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
for(int s=0;s<nrhs;s++){
|
||||||
|
pickCheckerboard(Odd , src_o[s], src[s]);
|
||||||
|
pickCheckerboard(Even, src_e[s], src[s]);
|
||||||
|
}
|
||||||
|
Grid_split (src_e,s_src_e);
|
||||||
|
Grid_split (src_o,s_src_o);
|
||||||
|
setCheckerboard(s_tmp, s_src_o);
|
||||||
|
setCheckerboard(s_tmp, s_src_e);
|
||||||
|
s_tmp = s_tmp - s_src;
|
||||||
|
std::cout << GridLogMessage<<" EvenOdd Difference " <<norm2(s_tmp)<<std::endl;
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
// Set up N-solvers as trivially parallel
|
||||||
|
///////////////////////////////////////////////////////////////
|
||||||
|
RealD mass=0.01;
|
||||||
|
RealD M5=1.8;
|
||||||
|
DomainWallFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5);
|
||||||
|
DomainWallFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
|
||||||
|
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
|
||||||
|
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
|
||||||
|
ConjugateGradient<FermionField> CG((1.0e-8/(me+1)),10000);
|
||||||
|
s_res = zero;
|
||||||
|
CG(HermOp,s_src,s_res);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Report how long they all took
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
std::vector<uint32_t> iterations(nrhs,0);
|
||||||
|
iterations[me] = CG.IterationsToComplete;
|
||||||
|
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
UGrid->GlobalSum(iterations[n]);
|
||||||
|
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Gather and residual check on the results
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
|
||||||
|
Grid_unsplit(result,s_res);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
|
||||||
|
for(int n=0;n<nrhs;n++){
|
||||||
|
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
|
||||||
|
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user