1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

Compare commits

...

6 Commits

Author SHA1 Message Date
751fae9f0d Changing boundary phase to be always double 2018-07-10 12:18:12 -07:00
118746b1e9 Adding Mobius BlockCG test 2018-05-28 18:29:50 +00:00
8f6039646b Added Hermition check in BlockCG 2018-05-24 02:56:53 -04:00
95e9fd1889 More diag output 2018-05-23 04:08:21 -04:00
66da4a38f9 Added Lattice I/O 2018-05-22 00:21:25 -04:00
236868d2e9 Checking in vectorized Blocked CG 2018-05-21 18:51:59 -04:00
9 changed files with 596 additions and 71 deletions

View File

@ -117,7 +117,7 @@ void TDWF<FImpl>::setup(void)
auto &grb4 = *env().getRbGrid();
auto &g5 = *env().getGrid(par().Ls);
auto &grb5 = *env().getRbGrid(par().Ls);
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
std::vector<ComplexD> boundary = strToVec<ComplexD>(par().boundary);
typename DomainWallFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, DomainWallFermion<FImpl>, getName(), par().Ls, U, g5,
grb5, g4, grb4, par().mass, par().M5, implParams);

View File

@ -110,7 +110,7 @@ void TWilson<FImpl>::setup(void)
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &grid = *env().getGrid();
auto &gridRb = *env().getRbGrid();
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
std::vector<ComplexD> boundary = strToVec<ComplexD>(par().boundary);
typename WilsonFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, WilsonFermion<FImpl>, getName(), 1, U, grid, gridRb,
par().mass, implParams);

View File

@ -121,7 +121,7 @@ void TWilsonClover<FImpl>::setup(void)
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &grid = *env().getGrid();
auto &gridRb = *env().getRbGrid();
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
std::vector<ComplexD> boundary = strToVec<ComplexD>(par().boundary);
typename WilsonCloverFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, WilsonCloverFermion<FImpl>, getName(), 1, U, grid, gridRb, par().mass,
par().csw_r,

View File

@ -33,7 +33,7 @@ directory
namespace Grid {
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS };
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS, BlockCGVec };
//////////////////////////////////////////////////////////////////////////
// Block conjugate gradient. Dimension zero should be the block direction
@ -54,9 +54,10 @@ class BlockConjugateGradient : public OperatorFunction<Field> {
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
Integer PrintInterval; //GridLogMessages or Iterative
BlockConjugateGradient(BlockCGtype cgtype,int _Orthog,RealD tol, Integer maxit, bool err_on_no_conv = true)
: Tolerance(tol), CGtype(cgtype), blockDim(_Orthog), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv)
: Tolerance(tol), CGtype(cgtype), blockDim(_Orthog), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv),PrintInterval(100)
{};
////////////////////////////////////////////////////////////////////////////////////////////////////
@ -127,6 +128,14 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
assert(0);
}
}
void operator()(LinearOperatorBase<Field> &Linop, const std::vector<Field> &Src, std::vector<Field> &Psi)
{
if ( CGtype == BlockCGVec ) {
BlockCGVecsolve(Linop,Src,Psi);
} else {
assert(0);
}
}
////////////////////////////////////////////////////////////////////////////
// BlockCGrQ implementation:
@ -600,6 +609,272 @@ void CGmultiRHSsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &
IterationsToComplete = k;
}
void InnerProductMatrix(Eigen::MatrixXcd &m , const std::vector<Field> &X, std::vector<Field> &Y){
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<Nblock;bp++) {
m(b,bp) = innerProduct(X[b],Y[bp]);
}
}
double HermCheck( Eigen::MatrixXcd &m, const std::string &str, int ForceHerm=1 , int Print = 0) {
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<=b;bp++) {
if(Print)
std::cout<<GridLogMessage << "HermCheck "<<str<<" "<<b<<" "<<bp<<" : "<< m(b,bp) <<" "<<conj(m(bp,b))<<" " <<m(b,bp)-conj(m(bp,b)) <<std::endl;
if(ForceHerm){
if(b==bp) m(b,b) = real(m(b,b));
else{
auto temp = 0.5*(m(b,bp)+conj(m(bp,b)));
m(b,bp) = temp;
m(bp,b) = conj(temp);
}
}
}
}
void BlockCGVecsolve(LinearOperatorBase<Field> &Linop, const std::vector<Field> &Src, std::vector<Field> &Psi)
{
// int Orthog = blockDim; // First dimension is block dim; this is an assumption
// Nblock = Src._grid->_fdimensions[Orthog];
Nblock = Src.size();
assert(Nblock == Psi.size());
std::cout<<GridLogMessage<<" Block Conjugate Gradient : Nblock "<<Nblock<<std::endl;
for(int b=0;b<Nblock;b++){
Psi[b].checkerboard = Src[0].checkerboard;
conformable(Psi[b], Src[b]);
}
Field Fake(Src[0]);
std::vector<Field> P(Nblock,Fake);
// P.resize(Nblock);
std::vector<Field> AP(Nblock,Fake);
//AP.resize(Nblock);
std::vector<Field> R(Nblock,Fake);
std::vector<Field> TMP(Nblock,Fake);
//R.resize(Nblock);
Eigen::MatrixXcd m_pAp = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_pAp_inv= Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_rr = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_rr_inv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_alpha = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_beta = Eigen::MatrixXcd::Zero(Nblock,Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
// sliceNorm(ssq,Src,Orthog);
for(int b=0;b<Nblock;b++){ ssq[b] = norm2(Src[b]);}
RealD sssum=0;
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
// sliceNorm(residuals,Src,Orthog);
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(Src[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
// sliceNorm(residuals,Psi,Orthog);
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(Psi[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
// Initial search dir is guess
for(int b=0;b<Nblock;b++) Linop.HermOp(Psi[b], AP[b]);
for(int b=0;b<Nblock;b++)
std::cout << b << " Psi " << norm2(Psi[b]) <<" AP "<<norm2(AP[b])<<std::endl;
/************************************************************************
* Block conjugate gradient (Stephen Pickles, thesis 1995, pp 71, O Leary 1980)
************************************************************************
* O'Leary : R = B - A X
* O'Leary : P = M R ; preconditioner M = 1
* O'Leary : alpha = PAP^{-1} RMR
* O'Leary : beta = RMR^{-1}_old RMR_new
* O'Leary : X=X+Palpha
* O'Leary : R_new=R_old-AP alpha
* O'Leary : P=MR_new+P beta
*/
for(int b=0;b<Nblock;b++){
R[b] = Src[b] - AP[b]; //R_0
P[b] = R[b]; // P_1
}
// sliceInnerProductMatrix(m_rr,R,R,Orthog);
InnerProductMatrix(m_rr,R,R);
HermCheck(m_rr, "R_0 R_0",1,1);
HermCheck(m_rr, "R_0 R_0",0,1);
#if 0
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<Nblock;bp++) {
m_rr(b,bp) = innerProduct(R[b],R[bp]);
std::cout << 0 <<" : R_0 R_0 "<< b <<" "<<bp<<" "<<innerProduct(R[b],R[bp]) <<std::endl;
}
#endif
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
int if_print =0;
for (k = 1; k <= MaxIterations; k++) {
RealD rrsum=0;
for(int b=0;b<Nblock;b++) rrsum+=real(m_rr(b,b));
if(PrintInterval && (k%PrintInterval)==0){
if_print=1;
std::cout << GridLogMessage << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
<<" / "<<std::sqrt(rrsum/sssum) <<std::endl;
} else {
if_print=0;
std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
<<" / "<<std::sqrt(rrsum/sssum) <<std::endl;
}
MatrixTimer.Start();
for(int b=0;b<Nblock;b++) Linop.HermOp(P[b], AP[b]);
MatrixTimer.Stop();
// Alpha
sliceInnerTimer.Start();
// sliceInnerProductMatrix(m_pAp,P,AP,Orthog);
InnerProductMatrix(m_pAp,P,AP);
HermCheck(m_pAp, "P AP",1,if_print);
if(if_print) HermCheck(m_pAp, "P AP",0,if_print);
#if 0
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<Nblock;bp++) {
m_pAp(b,bp) = innerProduct(P[b],AP[bp]);
std::cout << k <<" : m_pAp "<< b <<" "<<bp<<" "<<innerProduct(P[b],AP[bp]) <<std::endl;
}
#endif
sliceInnerTimer.Stop();
m_pAp_inv = m_pAp.inverse();
HermCheck(m_pAp_inv, "inv (P AP)",1,if_print);
if(if_print) HermCheck(m_pAp_inv, "inv (P AP)",0,if_print);
if(if_print)
{
m_alpha = m_pAp*m_pAp_inv;
for(int b=0;b<Nblock;b++){
for(int bp=0;bp<Nblock;bp++) {
std::cout << k <<" : pAp*pAp_inv "<< b <<" "<<bp<<" "<<m_alpha(b,bp)<<std::endl;
}
}
}
m_alpha = m_pAp_inv * m_rr ; //alpha_k+1 = (P_k+1^t A P_k+1)^-1 (R_k^t R_k)
// Psi, R update
sliceMaddTimer.Start();
// sliceMaddMatrix(Psi,m_alpha, P,Psi,Orthog); // X_k+1=X_k+P_k+1 alpha_k+1
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<Nblock;bp++) {
Psi[b] += m_alpha(bp,b)*P[bp]; // X_k+1 = X_k + P_k+1 alpha_k+1
}
for(int b=0;b<Nblock;b++) TMP[b] = R[b];
// sliceMaddMatrix(R ,m_alpha,AP, R,Orthog,-1.0);// sub alpha * AP to resid
for(int b=0;b<Nblock;b++)
for(int bp=0;bp<Nblock;bp++) {
R[b] -= m_alpha(bp,b)*AP[bp]; // R_k+1 = R_k - AP_k+1 alpha_k+1
}
sliceMaddTimer.Stop();
if(if_print)
{
//check
for(int b=0;b<Nblock;b++){
for(int bp=0;bp<Nblock;bp++) {
std::cout << k <<" : R_k+1 R_k "<< b <<" "<<bp<<" "<<innerProduct(R[b],TMP[bp]) <<std::endl;
std::cout << k <<" : R_k R_k "<< b <<" "<<bp<<" "<<innerProduct(TMP[b],TMP[bp]) <<std::endl;
}
}
}
// Beta
m_rr_inv = m_rr.inverse(); //m_rr_inv = (R_k^t R_k)^-1
HermCheck(m_rr_inv,"m_rr_inv",1,if_print);
if(if_print) HermCheck(m_rr_inv,"m_rr_inv",0,if_print);
sliceInnerTimer.Start();
// sliceInnerProductMatrix(m_rr,R,R,Orthog);
InnerProductMatrix(m_rr,R,R);
HermCheck(m_rr,"m_rr",1,if_print);
if(if_print) HermCheck(m_rr,"m_rr",0,if_print);
sliceInnerTimer.Stop();
m_beta = m_rr_inv *m_rr; // beta_k+2 = (R_k^t R_k)^-1 (R_k+1^5 R_k+1)
// HermCheck(m_beta,"m_beta");
// Search update
sliceMaddTimer.Start();
// sliceMaddMatrix(AP,m_beta,P,R,Orthog);
for(int b=0;b<Nblock;b++){
AP[b] = R[b];
for(int bp=0;bp<Nblock;bp++) {
AP[b] += m_beta(bp,b)*P[bp]; //AP = R_k+1 + P_k+1 beta_k+1
}
}
if(if_print)
{
//check
for(int b=0;b<Nblock;b++) Linop.HermOp(P[b], TMP[b]);
for(int b=0;b<Nblock;b++){
for(int bp=0;bp<Nblock;bp++) {
std::cout << k <<" : P_k+2 A P "<< b <<" "<<bp<<" "<<innerProduct(AP[b],TMP[bp]) <<std::endl;
}
}
}
sliceMaddTimer.Stop();
for(int b=0;b<Nblock;b++) P[b]= AP[b]; //P_k+2 = AP
/*********************
* convergence monitor
*********************
*/
RealD max_resid=0;
RealD rr;
for(int b=0;b<Nblock;b++){
rr = real(m_rr(b,b))/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"BlockCG converged in "<<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "
<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
for(int b=0;b<Nblock;b++) {
Linop.HermOp(Psi[b], AP[b]);
AP[b] = AP[b]-Src[b];
std::cout << GridLogMessage <<"\t True residual is " << b<<" "<<std::sqrt(norm2(AP[b])/norm2(Src[b])) <<std::endl;
}
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BlockConjugateGradient did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
};
}

View File

@ -179,6 +179,10 @@ public:
assert(checker_dim_mask.size() == _ndimension);
assert(processor_grid.size() == _ndimension);
assert(simd_layout.size() == _ndimension);
std::cout <<"dimensions processor_grid simd_layout checker_dim_mask"<<std::endl;
for(int i=0;i<_ndimension;i++){
std::cout <<i << " "<<dimensions[i]<<" "<<processor_grid[i]<<" "<< simd_layout[i]<<" "<< checker_dim_mask[i]<<std::endl;
}
_fdimensions.resize(_ndimension);
_gdimensions.resize(_ndimension);

View File

@ -44,11 +44,11 @@ namespace QCD {
struct WilsonImplParams {
bool overlapCommsCompute;
std::vector<Complex> boundary_phases;
std::vector<ComplexD> boundary_phases;
WilsonImplParams() : overlapCommsCompute(false) {
boundary_phases.resize(Nd, 1.0);
};
WilsonImplParams(const std::vector<Complex> phi)
WilsonImplParams(const std::vector<ComplexD> phi)
: boundary_phases(phi), overlapCommsCompute(false) {}
};

View File

@ -68,6 +68,7 @@ int main (int argc, char ** argv)
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++) cout <<" node split = "<<mpi_layout[i]<<" "<<mpi_split[i]<<endl;
for(int i=0;i<mpi_layout.size();i++) nrhs *= (mpi_layout[i]/mpi_split[i]);
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
@ -99,12 +100,6 @@ int main (int argc, char ** argv)
// Bounce these fields to disk
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Writing out in parallel view "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
emptyUserRecord record;
std::string file("./scratch.scidac");
std::string filef("./scratch.scidac.ferm");
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
@ -114,57 +109,10 @@ int main (int argc, char ** argv)
{
FGrid->Barrier();
ScidacWriter _ScidacWriter(FGrid->IsBoss());
_ScidacWriter.open(file);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Writing out gauge field "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
_ScidacWriter.writeScidacFieldRecord(Umu,record);
_ScidacWriter.close();
FGrid->Barrier();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Reading in gauge field "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
ScidacReader _ScidacReader;
_ScidacReader.open(file);
_ScidacReader.readScidacFieldRecord(s_Umu,record);
_ScidacReader.close();
FGrid->Barrier();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Read in gauge field "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
}
{
for(int n=0;n<nrhs;n++){
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Writing out record "<<n<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::stringstream filefn; filefn << filef << "."<< n;
ScidacWriter _ScidacWriter(FGrid->IsBoss());
_ScidacWriter.open(filefn.str());
_ScidacWriter.writeScidacFieldRecord(src[n],record);
_ScidacWriter.close();
}
FGrid->Barrier();
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Reading back in the single process view "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
for(int n=0;n<nrhs;n++){
if ( n==me ) {
std::stringstream filefn; filefn << filef << "."<< n;
ScidacReader _ScidacReader;
_ScidacReader.open(filefn.str());
_ScidacReader.readScidacFieldRecord(s_src,record);
_ScidacReader.close();
}
}
FGrid->Barrier();
}

View File

@ -38,7 +38,7 @@ int main (int argc, char ** argv)
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
const int Ls=4;
const int Ls=8;
Grid_init(&argc,&argv);
@ -69,6 +69,8 @@ int main (int argc, char ** argv)
}
}
double stp = 1.e-5;
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++) nrhs *= (mpi_layout[i]/mpi_split[i]);
@ -90,7 +92,7 @@ int main (int argc, char ** argv)
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
@ -123,25 +125,34 @@ int main (int argc, char ** argv)
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
tmp = 10.0*s;
src[s] = (src[s] * 0.1) + tmp;
// src[s] = (src[s] * 0.1) + tmp;
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
#endif
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
if(1) {
FieldMetaData header;
std::string file("./lat.in");
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
if(0) {
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " "<<file<<" successfully read" <<std::endl;
} else {
GridParallelRNG pRNG(UGrid );
std::cout << GridLogMessage << "Intialising 4D RNG "<<std::endl;
pRNG.SeedFixedIntegers(seeds);
std::cout << GridLogMessage << "Intialised 4D RNG "<<std::endl;
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
// std::cout << " Site zero "<< Umu._odata[0] <<std::endl;
} else {
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
}
std::cout << " Site zero "<< Umu._odata[0] <<std::endl;
}
int precision32 = 0;
int tworow = 0;
std::string file2("./lat.out");
NerscIO::writeConfiguration(Umu,file2,tworow,precision32);
std::cout << GridLogMessage << " Successfully saved to " <<file2 <<std::endl;
/////////////////
// MPI only sends
/////////////////
@ -197,9 +208,9 @@ int main (int argc, char ** argv)
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((1.0e-2),10000);
ConjugateGradient<FermionField> CG((stp),10000);
s_res = zero;
CG(HermOp,s_src,s_res);
// CG(HermOp,s_src,s_res);
std::cout << GridLogMessage << " split residual norm "<<norm2(s_res)<<std::endl;
/////////////////////////////////////////////////////////////
@ -227,5 +238,15 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)/norm2(src[n])<<std::endl;
}
for(int s=0;s<nrhs;s++) result[s]=zero;
int blockDim = 0;//not used for BlockCGVec
BlockConjugateGradient<FermionField> BCGV (BlockCGVec,blockDim,stp,10000);
BCGV.PrintInterval=10;
{
BCGV(HermOpCk,src,result);
}
Grid_finalize();
}

View File

@ -0,0 +1,277 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename MobiusFermionR::FermionField FermionField;
typedef typename MobiusFermionR::ComplexField ComplexField;
typename MobiusFermionR::ImplParams params;
const int Ls=12;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (mpi_layout.size(),1);
std::vector<int> split_coor (mpi_layout.size(),1);
std::vector<int> split_dim (mpi_layout.size(),1);
std::vector<ComplexD> boundary_phases(Nd,1.);
boundary_phases[Nd-1]=-1.;
params.boundary_phases = boundary_phases;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
for(int i=0;i<argc;i++){
if(std::string(argv[i]) == "--split"){
for(int k=0;k<mpi_layout.size();k++){
std::stringstream ss;
ss << argv[i+1+k];
ss >> mpi_split[k];
}
break;
}
}
double stp = 1.e-5;
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++){
// split_dim[i] = (mpi_layout[i]/mpi_split[i]);
nrhs *= (mpi_layout[i]/mpi_split[i]);
// split_coor[i] = FGrid._processor_coor[i]/mpi_split[i];
}
std::cout << GridLogMessage << "Creating split grids " <<std::endl;
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid,me);
std::cout << GridLogMessage <<"Creating split ferm grids " <<std::endl;
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
std::cout << GridLogMessage <<"Creating split rb grids " <<std::endl;
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
std::cout << GridLogMessage <<"Creating split ferm rb grids " <<std::endl;
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
std::cout << GridLogMessage << "Made the grids"<<std::endl;
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
std::cout << GridLogMessage << "Made the Fermion Fields"<<std::endl;
for(int s=0;s<nrhs;s++) result[s]=zero;
#undef LEXICO_TEST
#ifdef LEXICO_TEST
{
LatticeFermion lex(FGrid); lex = zero;
LatticeFermion ftmp(FGrid);
Integer stride =10000;
double nrm;
LatticeComplex coor(FGrid);
for(int d=0;d<5;d++){
LatticeCoordinate(coor,d);
ftmp = stride;
ftmp = ftmp * coor;
lex = lex + ftmp;
stride=stride/10;
}
for(int s=0;s<nrhs;s++) {
src[s]=lex;
ftmp = 1000*1000*s;
src[s] = src[s] + ftmp;
}
}
#else
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
tmp = 10.0*s;
// src[s] = (src[s] * 0.1) + tmp;
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
#endif
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
FieldMetaData header;
std::string file("./lat.in.32IDfine");
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
if(1) {
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " "<<file<<" successfully read" <<std::endl;
} else {
GridParallelRNG pRNG(UGrid );
std::cout << GridLogMessage << "Intialising 4D RNG "<<std::endl;
pRNG.SeedFixedIntegers(seeds);
std::cout << GridLogMessage << "Intialised 4D RNG "<<std::endl;
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
std::cout << " Site zero "<< Umu._odata[0] <<std::endl;
}
int precision32 = 0;
int tworow = 0;
std::string file2("./lat.out");
NerscIO::writeConfiguration(Umu,file2,tworow,precision32);
std::cout << GridLogMessage << " Successfully saved to " <<file2 <<std::endl;
/////////////////
// MPI only sends
/////////////////
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_tmp(SFGrid);
FermionField s_res(SFGrid);
std::cout << GridLogMessage << "Made the split grid fields"<<std::endl;
///////////////////////////////////////////////////////////////
// split the source out using MPI instead of I/O
///////////////////////////////////////////////////////////////
Grid_split (Umu,s_Umu);
Grid_split (src,s_src);
std::cout << GridLogMessage << " split rank " <<me << " s_src "<<norm2(s_src)<<std::endl;
#ifdef LEXICO_TEST
FermionField s_src_tmp(SFGrid);
FermionField s_src_diff(SFGrid);
{
LatticeFermion lex(SFGrid); lex = zero;
LatticeFermion ftmp(SFGrid);
Integer stride =10000;
double nrm;
LatticeComplex coor(SFGrid);
for(int d=0;d<5;d++){
LatticeCoordinate(coor,d);
ftmp = stride;
ftmp = ftmp * coor;
lex = lex + ftmp;
stride=stride/10;
}
s_src_tmp=lex;
ftmp = 1000*1000*me;
s_src_tmp = s_src_tmp + ftmp;
}
s_src_diff = s_src_tmp - s_src;
std::cout << GridLogMessage <<" LEXICO test: s_src_diff " << norm2(s_src_diff)<<std::endl;
#endif
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << " Building the solvers"<<std::endl;
RealD mass=0.00107;
// RealD mass=0.01;
RealD M5=1.8;
RealD mobius_factor=32./12.;
RealD mobius_b=0.5*(mobius_factor+1.);
RealD mobius_c=0.5*(mobius_factor-1.);
MobiusFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5,mobius_b,mobius_c,params);
MobiusFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,mobius_b,mobius_c,params);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<MobiusFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<MobiusFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((stp),100000);
s_res = zero;
if(0){
// CG(HermOp,s_src,s_res);
std::cout << GridLogMessage << " split residual norm "<<norm2(s_res)<<std::endl;
/////////////////////////////////////////////////////////////
// Report how long they all took
/////////////////////////////////////////////////////////////
std::vector<uint32_t> iterations(nrhs,0);
iterations[me] = CG.IterationsToComplete;
for(int n=0;n<nrhs;n++){
UGrid->GlobalSum(iterations[n]);
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
}
/////////////////////////////////////////////////////////////
// Gather and residual check on the results
/////////////////////////////////////////////////////////////
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
Grid_unsplit(result,s_res);
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
for(int n=0;n<nrhs;n++){
std::cout << GridLogMessage<< " res["<<n<<"] norm "<<norm2(result[n])<<std::endl;
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)/norm2(src[n])<<std::endl;
}
}
// faking enlarged/cooperative CG
if(0){
std::cout << GridLogMessage<<" Trying blocking enlarged CG" <<std::endl;
assert(me < nrhs);
if (me>0) src[me] = src[0];
for(int s=0;s<nrhs;s++){
result[s]=zero;
if(s!=me) src[s] = zero;
}
}
int blockDim = 0;//not used for BlockCGVec
BlockConjugateGradient<FermionField> BCGV (BlockCGVec,blockDim,stp,100000);
BCGV.PrintInterval=10;
{
BCGV(HermOpCk,src,result);
}
Grid_finalize();
}