1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-09-20 01:05:38 +01:00

Merge pull request #415 from paboyle/feature/block_lanczos22

Feature/block lanczos22
This commit is contained in:
Peter Boyle 2023-03-24 12:08:16 -04:00 committed by GitHub
commit dcf172da3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 2783 additions and 27 deletions

File diff suppressed because it is too large Load Diff

View File

@ -440,7 +440,17 @@ public:
_grid->GlobalCoorToGlobalIndex(gcoor,gidx); _grid->GlobalCoorToGlobalIndex(gcoor,gidx);
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor); _grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
#if 1
assert(rank == _grid->ThisRank() ); assert(rank == _grid->ThisRank() );
#else
//
if (rank != _grid->ThisRank() ){
std::cout <<"rank "<<rank<<" _grid->ThisRank() "<<_grid->ThisRank()<< std::endl;
// exit(-42);
// assert(0);
}
#endif
int l_idx=generator_idx(o_idx,i_idx); int l_idx=generator_idx(o_idx,i_idx);
_generators[l_idx] = master_engine; _generators[l_idx] = master_engine;

View File

@ -167,14 +167,13 @@ void GridCmdOptionInt(std::string &str,int & val)
return; return;
} }
void GridCmdOptionFloat(std::string &str,float & val) void GridCmdOptionFloat(std::string &str,double & val)
{ {
std::stringstream ss(str); std::stringstream ss(str);
ss>>val; ss>>val;
return; return;
} }
void GridParseLayout(char **argv,int argc, void GridParseLayout(char **argv,int argc,
Coordinate &latt_c, Coordinate &latt_c,
Coordinate &mpi_c) Coordinate &mpi_c)

View File

@ -57,7 +57,7 @@ void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
template<class VectorInt> template<class VectorInt>
void GridCmdOptionIntVector(const std::string &str,VectorInt & vec); void GridCmdOptionIntVector(const std::string &str,VectorInt & vec);
void GridCmdOptionInt(std::string &str,int & val); void GridCmdOptionInt(std::string &str,int & val);
void GridCmdOptionFloat(std::string &str,float & val); void GridCmdOptionFloat(std::string &str,double & val);
void GridParseLayout(char **argv,int argc, void GridParseLayout(char **argv,int argc,

View File

@ -0,0 +1,73 @@
#Example script
DIR=/gpfs/alpine/phy157/proj-shared/phy157dwf/chulwoo/Grid/BL/build/tests/lanczos
BIN=${DIR}/Test_dwf_block_lanczos
VOL='--grid 16.16.16.32 '
GRID='--mpi 1.1.1.4 '
CONF='--gconf ckpoint_lat.IEEE64BIG.2000 '
OPT='--mass 0.01 --M5 1.8 --phase in.params --omega in.params --shm 4096'
#BL='--rbl 16.1024.128.1000.10 --split 1.1.4.4 --check_int 100 --resid 1.0e-5 --cheby_l 0.007 --cheby_u 7 --cheby_n 51'
BL='--rbl 4.128.16.100.10 --split 1.1.1.4 --check_int 25 --resid 1.0e-5 --cheby_l 0.007 --cheby_u 7 --cheby_n 51'
ARGS=${CONF}" "${OPT}" "${BL}" "${VOL}" "${GRID}
export APP="${BIN} ${ARGS}"
echo APP=${APP}
#export JS="jsrun --nrs 32 -a4 -g4 -c42 -dpacked -b packed:7 --smpiargs="-gpu" "
export JS="jsrun --nrs 1 -a4 -g4 -c42 -dpacked -b packed:10 --smpiargs="-gpu" "
$JS $APP
#sample in.param
boundary_phase 0 1 0
boundary_phase 1 1 0
boundary_phase 2 1 0
boundary_phase 3 -1 0
omega 0 0.5 0
omega 1 0.5 0
omega 2 0.5 0
omega 3 0.5 0
omega 4 0.5 0
omega 5 0.5 0
omega 6 0.5 0
omega 7 0.5 0
omega 8 0.5 0
omega 9 0.5 0
omega 10 0.5 0
omega 11 0.5 0
#output
Grid : Message : 1.717474 s : Gauge Configuration ckpoint_lat.IEEE64BIG.2000
Grid : Message : 1.717478 s : boundary_phase[0] = (1,0)
Grid : Message : 1.717497 s : boundary_phase[1] = (1,0)
Grid : Message : 1.717500 s : boundary_phase[2] = (1,0)
Grid : Message : 1.717503 s : boundary_phase[3] = (-1,0)
Grid : Message : 1.717506 s : Ls 12
Grid : Message : 1.717507 s : mass 0.01
Grid : Message : 1.717510 s : M5 1.8
Grid : Message : 1.717512 s : mob_b 1.5
Grid : Message : 1.717514 s : omega[0] = (0.5,0)
Grid : Message : 1.717517 s : omega[1] = (0.5,0)
Grid : Message : 1.717520 s : omega[2] = (0.5,0)
Grid : Message : 1.717523 s : omega[3] = (0.5,0)
Grid : Message : 1.717526 s : omega[4] = (0.5,0)
Grid : Message : 1.717529 s : omega[5] = (0.5,0)
Grid : Message : 1.717532 s : omega[6] = (0.5,0)
Grid : Message : 1.717535 s : omega[7] = (0.5,0)
Grid : Message : 1.717538 s : omega[8] = (0.5,0)
Grid : Message : 1.717541 s : omega[9] = (0.5,0)
Grid : Message : 1.717544 s : omega[10] = (0.5,0)
Grid : Message : 1.717547 s : omega[11] = (0.5,0)
Grid : Message : 1.717550 s : Nu 4
Grid : Message : 1.717551 s : Nk 128
Grid : Message : 1.717552 s : Np 16
Grid : Message : 1.717553 s : Nm 288
Grid : Message : 1.717554 s : Nstop 100
Grid : Message : 1.717555 s : Ntest 25
Grid : Message : 1.717557 s : MaxIter 10
Grid : Message : 1.717558 s : resid 1e-05
Grid : Message : 1.717560 s : Cheby Poly 0.007,7,51

View File

@ -0,0 +1,410 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_block_lanczos.cc
Copyright (C) 2022
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Yong-Chull Jang <ypj@quark.phy.bnl.gov>
Author: Chulwoo Jung <chulwoo@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/util/Init.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h>
using namespace std;
using namespace Grid;
//using namespace Grid::QCD;
//typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename ZMobiusFermionF::FermionField FermionField;
RealD AllZero(RealD x){ return 0.;}
class CmdJobParams
{
public:
std::string gaugefile;
int Ls;
double mass;
double M5;
double mob_b;
std::vector<ComplexD> omega;
std::vector<Complex> boundary_phase;
std::vector<int> mpi_split;
LanczosType Impl;
int Nu;
int Nk;
int Np;
int Nm;
int Nstop;
int Ntest;
int MaxIter;
double resid;
double low;
double high;
int order;
CmdJobParams()
: gaugefile("Hot"),
Ls(8), mass(0.01), M5(1.8), mob_b(1.5),
Impl(LanczosType::irbl),mpi_split(4,1),
Nu(4), Nk(200), Np(200), Nstop(100), Ntest(1), MaxIter(10), resid(1.0e-8),
low(0.2), high(5.5), order(11)
{Nm=Nk+Np;};
void Parse(char **argv, int argc);
};
void CmdJobParams::Parse(char **argv,int argc)
{
std::string arg;
std::vector<int> vi;
double re,im;
int expect, idx;
std::string vstr;
std::ifstream pfile;
if( GridCmdOptionExists(argv,argv+argc,"--gconf") ){
gaugefile = GridCmdOptionPayload(argv,argv+argc,"--gconf");
}
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
boundary_phase.push_back({re,im});
expect++;
}
}
pfile.close();
} else {
for (int i=0; i<4; ++i) boundary_phase.push_back({1.,0.});
}
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
omega.push_back({re,im});
Ls++;
}
}
pfile.close();
} else {
if( GridCmdOptionExists(argv,argv+argc,"--Ls") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--Ls");
GridCmdOptionInt(arg,Ls);
}
}
if( GridCmdOptionExists(argv,argv+argc,"--mass") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mass");
GridCmdOptionFloat(arg,mass);
}
if( GridCmdOptionExists(argv,argv+argc,"--M5") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--M5");
GridCmdOptionFloat(arg,M5);
}
if( GridCmdOptionExists(argv,argv+argc,"--mob_b") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mob_b");
GridCmdOptionFloat(arg,mob_b);
}
if( GridCmdOptionExists(argv,argv+argc,"--irbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--irbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2];
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::irbl;
Nm = Nk+Np;
}
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--rbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--rbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2]; // vector space is enlarged by adding Np vectors
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::rbl;
Nm = Nk+Np*MaxIter;
}
#if 1
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--split") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--split");
GridCmdOptionIntVector(arg,vi);
for(int i=0;i<mpi_split.size();i++)
mpi_split[i] = vi[i];
}
#endif
if( GridCmdOptionExists(argv,argv+argc,"--check_int") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--check_int");
GridCmdOptionInt(arg,Ntest);
}
if( GridCmdOptionExists(argv,argv+argc,"--resid") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--resid");
GridCmdOptionFloat(arg,resid);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_l") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_l");
GridCmdOptionFloat(arg,low);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_u") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_u");
GridCmdOptionFloat(arg,high);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_n") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_n");
GridCmdOptionInt(arg,order);
}
if ( CartesianCommunicator::RankWorld() == 0 ) {
std::streamsize ss = std::cout.precision();
std::cout << GridLogMessage <<" Gauge Configuration "<< gaugefile << '\n';
std::cout.precision(15);
for ( int i=0; i<4; ++i ) std::cout << GridLogMessage <<" boundary_phase["<< i << "] = " << boundary_phase[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Ls "<< Ls << '\n';
std::cout << GridLogMessage <<" mass "<< mass << '\n';
std::cout << GridLogMessage <<" M5 "<< M5 << '\n';
std::cout << GridLogMessage <<" mob_b "<< mob_b << '\n';
std::cout.precision(15);
for ( int i=0; i<Ls; ++i ) std::cout << GridLogMessage <<" omega["<< i << "] = " << omega[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Nu "<< Nu << '\n';
std::cout << GridLogMessage <<" Nk "<< Nk << '\n';
std::cout << GridLogMessage <<" Np "<< Np << '\n';
std::cout << GridLogMessage <<" Nm "<< Nm << '\n';
std::cout << GridLogMessage <<" Nstop "<< Nstop << '\n';
std::cout << GridLogMessage <<" Ntest "<< Ntest << '\n';
std::cout << GridLogMessage <<" MaxIter "<< MaxIter << '\n';
std::cout << GridLogMessage <<" resid "<< resid << '\n';
std::cout << GridLogMessage <<" Cheby Poly "<< low << "," << high << "," << order << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
CmdJobParams JP;
JP.Parse(argv,argc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGrid);
// printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid);
GridCartesian * UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian * FGridF = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGridF);
GridRedBlackCartesian * FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGridF);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGridF); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
// ypj [note] why seed RNG5 again? bug? In this case, run with a default seed().
GridParallelRNG RNG5rb(FrbGridF); RNG5rb.SeedFixedIntegers(seeds5);
LatticeGaugeField Umu(UGrid);
std::vector<LatticeColourMatrix> U(4,UGrid);
LatticeGaugeFieldF UmuF(UGridF);
std::vector<LatticeColourMatrix> UF(4,UGridF);
if ( JP.gaugefile.compare("Hot") == 0 ) {
SU3::HotConfiguration(RNG4, Umu);
} else {
FieldMetaData header;
NerscIO::readConfiguration(Umu,header,JP.gaugefile);
// ypj [fixme] additional checks for the loaded configuration?
}
precisionChange (UmuF,Umu);
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
}
RealD mass = JP.mass;
RealD M5 = JP.M5;
// ypj [fixme] flexible support for a various Fermions
// RealD mob_b = JP.mob_b; // Gparity
// std::vector<ComplexD> omega; // ZMobius
// GparityMobiusFermionD ::ImplParams params;
// std::vector<int> twists({1,1,1,0});
// params.twists = twists;
// GparityMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
// SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
// int mrhs = JP.Nu;
int Ndir=4;
auto mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (Ndir,1);
#if 0
int tmp=mrhs, dir=0;
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_split[dir]<<std::endl;
while ( tmp> 1) {
if ((mpi_split[dir]*2) <= mpi_layout[dir]){
mpi_split[dir] *=2;
tmp = tmp/2;
}
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_layout[dir]<<std::endl;
dir = (dir+1)%Ndir;
}
#endif
int mrhs=1;
for(int i =0;i<Ndir;i++){
mpi_split[i] = mpi_layout[i] / JP.mpi_split[i] ;
mrhs *= JP.mpi_split[i];
}
std::cout << GridLogMessage << "mpi_layout= " << mpi_layout << std::endl;
std::cout << GridLogMessage << "mpi_split= " << mpi_split << std::endl;
std::cout << GridLogMessage << "mrhs= " << mrhs << std::endl;
// assert(JP.Nu==tmp);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators, keeping it explicitly single
/////////////////////////////////////////////
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplexF::Nsimd()),
mpi_split,
*UGrid);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,SGrid);
LatticeGaugeFieldF s_Umu(SGrid);
Grid_split (UmuF,s_Umu);
//WilsonFermionR::ImplParams params;
ZMobiusFermionF::ImplParams params;
params.overlapCommsCompute = true;
params.boundary_phases = JP.boundary_phase;
ZMobiusFermionF DdwfF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionF,FermionField> HermOp(Ddwf);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> HermOp(DdwfF);
ZMobiusFermionF Dsplit(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionF,FermionField> SHermOp(Dsplit);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> SHermOp(Dsplit);
//std::vector<double> Coeffs { 0.,-1.};
// ypj [note] this may not be supported by some compilers
std::vector<double> Coeffs({ 0.,-1.});
Polynomial<FermionField> PolyX(Coeffs);
//Chebyshev<FermionField> Cheb(0.2,5.5,11);
Chebyshev<FermionField> Cheb(JP.low,JP.high,JP.order);
// Cheb.csv(std::cout);
ImplicitlyRestartedBlockLanczos<FermionField> IRBL(HermOp, SHermOp,
FrbGridF,SFrbGrid,mrhs,
Cheb,
JP.Nstop, JP.Ntest,
JP.Nu, JP.Nk, JP.Nm,
JP.resid,
JP.MaxIter,
IRBLdiagonaliseWithEigen);
// IRBLdiagonaliseWithLAPACK);
IRBL.split_test=1;
std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGridF);
if (0)
{
// in case RNG is too slow
std::cout << GridLogMessage << "Using RNG5"<<std::endl;
FermionField src_tmp(FGrid);
for ( int i=0; i<JP.Nu; ++i ){
// gaussian(RNG5,src_tmp);
ComplexD rnd;
RealD re;
fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]);
std::cout << i <<" / "<< JP.Nm <<" re "<< re << std::endl;
// printf("%d / %d re %e\n",i,FGrid->_processor,re);
src_tmp=re;
pickCheckerboard(Odd,src[i],src_tmp);
}
RNG5.Report();
} else {
std::cout << GridLogMessage << "Using RNG5rb"<<std::endl;
for ( int i=0; i<JP.Nu; ++i )
gaussian(RNG5rb,src[i]);
RNG5rb.Report();
}
std::vector<FermionField> evec(JP.Nm,FrbGridF);
for(int i=0;i<1;++i){
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" grid pointer "<< evec[i].Grid() << std::endl;
};
int Nconv;
IRBL.calc(eval,evec,src,Nconv,JP.Impl);
Grid_finalize();
}

View File

@ -0,0 +1,401 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_block_lanczos.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/util/Init.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h>
using namespace std;
using namespace Grid;
//using namespace Grid::QCD;
//typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename ZMobiusFermionR::FermionField FermionField;
RealD AllZero(RealD x){ return 0.;}
class CmdJobParams
{
public:
std::string gaugefile;
int Ls;
double mass;
double M5;
double mob_b;
std::vector<ComplexD> omega;
std::vector<Complex> boundary_phase;
std::vector<int> mpi_split;
LanczosType Impl;
int Nu;
int Nk;
int Np;
int Nm;
int Nstop;
int Ntest;
int MaxIter;
double resid;
double low;
double high;
int order;
CmdJobParams()
: gaugefile("Hot"),
Ls(8), mass(0.01), M5(1.8), mob_b(1.5),
Impl(LanczosType::irbl),mpi_split(4,1),
Nu(4), Nk(200), Np(200), Nstop(100), Ntest(1), MaxIter(10), resid(1.0e-8),
low(0.2), high(5.5), order(11)
{Nm=Nk+Np;};
void Parse(char **argv, int argc);
};
void CmdJobParams::Parse(char **argv,int argc)
{
std::string arg;
std::vector<int> vi;
double re,im;
int expect, idx;
std::string vstr;
std::ifstream pfile;
if( GridCmdOptionExists(argv,argv+argc,"--gconf") ){
gaugefile = GridCmdOptionPayload(argv,argv+argc,"--gconf");
}
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
boundary_phase.push_back({re,im});
expect++;
}
}
pfile.close();
} else {
for (int i=0; i<4; ++i) boundary_phase.push_back({1.,0.});
}
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
omega.push_back({re,im});
Ls++;
}
}
pfile.close();
} else {
if( GridCmdOptionExists(argv,argv+argc,"--Ls") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--Ls");
GridCmdOptionInt(arg,Ls);
}
}
if( GridCmdOptionExists(argv,argv+argc,"--mass") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mass");
GridCmdOptionFloat(arg,mass);
}
if( GridCmdOptionExists(argv,argv+argc,"--M5") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--M5");
GridCmdOptionFloat(arg,M5);
}
if( GridCmdOptionExists(argv,argv+argc,"--mob_b") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mob_b");
GridCmdOptionFloat(arg,mob_b);
}
if( GridCmdOptionExists(argv,argv+argc,"--irbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--irbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2];
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::irbl;
Nm = Nk+Np;
}
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--rbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--rbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2]; // vector space is enlarged by adding Np vectors
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::rbl;
Nm = Nk+Np*MaxIter;
}
#if 1
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--split") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--split");
GridCmdOptionIntVector(arg,vi);
for(int i=0;i<mpi_split.size();i++)
mpi_split[i] = vi[i];
}
#endif
if( GridCmdOptionExists(argv,argv+argc,"--check_int") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--check_int");
GridCmdOptionInt(arg,Ntest);
}
if( GridCmdOptionExists(argv,argv+argc,"--resid") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--resid");
GridCmdOptionFloat(arg,resid);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_l") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_l");
GridCmdOptionFloat(arg,low);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_u") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_u");
GridCmdOptionFloat(arg,high);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_n") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_n");
GridCmdOptionInt(arg,order);
}
if ( CartesianCommunicator::RankWorld() == 0 ) {
std::streamsize ss = std::cout.precision();
std::cout << GridLogMessage <<" Gauge Configuration "<< gaugefile << '\n';
std::cout.precision(15);
for ( int i=0; i<4; ++i ) std::cout << GridLogMessage <<" boundary_phase["<< i << "] = " << boundary_phase[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Ls "<< Ls << '\n';
std::cout << GridLogMessage <<" mass "<< mass << '\n';
std::cout << GridLogMessage <<" M5 "<< M5 << '\n';
std::cout << GridLogMessage <<" mob_b "<< mob_b << '\n';
std::cout.precision(15);
for ( int i=0; i<Ls; ++i ) std::cout << GridLogMessage <<" omega["<< i << "] = " << omega[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Nu "<< Nu << '\n';
std::cout << GridLogMessage <<" Nk "<< Nk << '\n';
std::cout << GridLogMessage <<" Np "<< Np << '\n';
std::cout << GridLogMessage <<" Nm "<< Nm << '\n';
std::cout << GridLogMessage <<" Nstop "<< Nstop << '\n';
std::cout << GridLogMessage <<" Ntest "<< Ntest << '\n';
std::cout << GridLogMessage <<" MaxIter "<< MaxIter << '\n';
std::cout << GridLogMessage <<" resid "<< resid << '\n';
std::cout << GridLogMessage <<" Cheby Poly "<< low << "," << high << "," << order << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
CmdJobParams JP;
JP.Parse(argv,argc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGrid);
// printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
// ypj [note] why seed RNG5 again? bug? In this case, run with a default seed().
GridParallelRNG RNG5rb(FrbGrid); RNG5rb.SeedFixedIntegers(seeds5);
LatticeGaugeField Umu(UGrid);
std::vector<LatticeColourMatrix> U(4,UGrid);
if ( JP.gaugefile.compare("Hot") == 0 ) {
SU3::HotConfiguration(RNG4, Umu);
} else {
FieldMetaData header;
NerscIO::readConfiguration(Umu,header,JP.gaugefile);
// ypj [fixme] additional checks for the loaded configuration?
}
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
}
RealD mass = JP.mass;
RealD M5 = JP.M5;
// ypj [fixme] flexible support for a various Fermions
// RealD mob_b = JP.mob_b; // Gparity
// std::vector<ComplexD> omega; // ZMobius
// GparityMobiusFermionD ::ImplParams params;
// std::vector<int> twists({1,1,1,0});
// params.twists = twists;
// GparityMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
// SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
// int mrhs = JP.Nu;
int Ndir=4;
auto mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (Ndir,1);
#if 0
int tmp=mrhs, dir=0;
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_split[dir]<<std::endl;
while ( tmp> 1) {
if ((mpi_split[dir]*2) <= mpi_layout[dir]){
mpi_split[dir] *=2;
tmp = tmp/2;
}
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_layout[dir]<<std::endl;
dir = (dir+1)%Ndir;
}
#endif
int mrhs=1;
for(int i =0;i<Ndir;i++){
mpi_split[i] = mpi_layout[i] / JP.mpi_split[i] ;
mrhs *= JP.mpi_split[i];
}
std::cout << GridLogMessage << "mpi_layout= " << mpi_layout << std::endl;
std::cout << GridLogMessage << "mpi_split= " << mpi_split << std::endl;
std::cout << GridLogMessage << "mrhs= " << mrhs << std::endl;
// assert(JP.Nu==tmp);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,SGrid);
LatticeGaugeField s_Umu(SGrid);
Grid_split (Umu,s_Umu);
//WilsonFermionR::ImplParams params;
ZMobiusFermionR::ImplParams params;
params.overlapCommsCompute = true;
params.boundary_phases = JP.boundary_phase;
ZMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionR,FermionField> HermOp(Ddwf);
SchurDiagOneOperator<ZMobiusFermionR,FermionField> HermOp(Ddwf);
ZMobiusFermionR Dsplit(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionR,FermionField> SHermOp(Dsplit);
SchurDiagOneOperator<ZMobiusFermionR,FermionField> SHermOp(Dsplit);
//std::vector<double> Coeffs { 0.,-1.};
// ypj [note] this may not be supported by some compilers
std::vector<double> Coeffs({ 0.,-1.});
Polynomial<FermionField> PolyX(Coeffs);
//Chebyshev<FermionField> Cheb(0.2,5.5,11);
Chebyshev<FermionField> Cheb(JP.low,JP.high,JP.order);
// Cheb.csv(std::cout);
ImplicitlyRestartedBlockLanczos<FermionField> IRBL(HermOp, SHermOp,
FrbGrid,SFrbGrid,mrhs,
Cheb,
JP.Nstop, JP.Ntest,
JP.Nu, JP.Nk, JP.Nm,
JP.resid,
JP.MaxIter,
IRBLdiagonaliseWithEigen);
// IRBLdiagonaliseWithLAPACK);
IRBL.split_test=0;
std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGrid);
if (0)
{
// in case RNG is too slow
std::cout << GridLogMessage << "Using RNG5"<<std::endl;
FermionField src_tmp(FGrid);
for ( int i=0; i<JP.Nu; ++i ){
// gaussian(RNG5,src_tmp);
ComplexD rnd;
RealD re;
fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]);
std::cout << i <<" / "<< JP.Nm <<" re "<< re << std::endl;
// printf("%d / %d re %e\n",i,FGrid->_processor,re);
src_tmp=re;
pickCheckerboard(Odd,src[i],src_tmp);
}
RNG5.Report();
} else {
std::cout << GridLogMessage << "Using RNG5rb"<<std::endl;
for ( int i=0; i<JP.Nu; ++i )
gaussian(RNG5rb,src[i]);
RNG5rb.Report();
}
std::vector<FermionField> evec(JP.Nm,FrbGrid);
for(int i=0;i<1;++i){
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" grid pointer "<< evec[i].Grid() << std::endl;
};
int Nconv;
IRBL.calc(eval,evec,src,Nconv,JP.Impl);
Grid_finalize();
}

View File

@ -0,0 +1,408 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_block_lanczos.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/util/Init.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedBlockLanczos.h>
using namespace std;
using namespace Grid;
//using namespace Grid::QCD;
//typedef typename GparityDomainWallFermionR::FermionField FermionField;
typedef typename ZMobiusFermionF::FermionField FermionField;
RealD AllZero(RealD x){ return 0.;}
class CmdJobParams
{
public:
std::string gaugefile;
int Ls;
double mass;
double M5;
double mob_b;
std::vector<ComplexD> omega;
std::vector<Complex> boundary_phase;
std::vector<int> mpi_split;
LanczosType Impl;
int Nu;
int Nk;
int Np;
int Nm;
int Nstop;
int Ntest;
int MaxIter;
double resid;
double low;
double high;
int order;
CmdJobParams()
: gaugefile("Hot"),
Ls(8), mass(0.01), M5(1.8), mob_b(1.5),
Impl(LanczosType::irbl),mpi_split(4,1),
Nu(4), Nk(200), Np(200), Nstop(100), Ntest(1), MaxIter(10), resid(1.0e-8),
low(0.2), high(5.5), order(11)
{Nm=Nk+Np;};
void Parse(char **argv, int argc);
};
void CmdJobParams::Parse(char **argv,int argc)
{
std::string arg;
std::vector<int> vi;
double re,im;
int expect, idx;
std::string vstr;
std::ifstream pfile;
if( GridCmdOptionExists(argv,argv+argc,"--gconf") ){
gaugefile = GridCmdOptionPayload(argv,argv+argc,"--gconf");
}
if( GridCmdOptionExists(argv,argv+argc,"--phase") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--phase");
pfile.open(arg);
assert(pfile);
expect = 0;
while( pfile >> vstr ) {
if ( vstr.compare("boundary_phase") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(expect==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
boundary_phase.push_back({re,im});
expect++;
}
}
pfile.close();
} else {
for (int i=0; i<4; ++i) boundary_phase.push_back({1.,0.});
}
if( GridCmdOptionExists(argv,argv+argc,"--omega") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--omega");
pfile.open(arg);
assert(pfile);
Ls = 0;
while( pfile >> vstr ) {
if ( vstr.compare("omega") == 0 ) {
pfile >> vstr;
GridCmdOptionInt(vstr,idx);
assert(Ls==idx);
pfile >> vstr;
GridCmdOptionFloat(vstr,re);
pfile >> vstr;
GridCmdOptionFloat(vstr,im);
omega.push_back({re,im});
Ls++;
}
}
pfile.close();
} else {
if( GridCmdOptionExists(argv,argv+argc,"--Ls") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--Ls");
GridCmdOptionInt(arg,Ls);
}
}
if( GridCmdOptionExists(argv,argv+argc,"--mass") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mass");
GridCmdOptionFloat(arg,mass);
}
if( GridCmdOptionExists(argv,argv+argc,"--M5") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--M5");
GridCmdOptionFloat(arg,M5);
}
if( GridCmdOptionExists(argv,argv+argc,"--mob_b") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--mob_b");
GridCmdOptionFloat(arg,mob_b);
}
if( GridCmdOptionExists(argv,argv+argc,"--irbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--irbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2];
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::irbl;
Nm = Nk+Np;
}
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--rbl") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--rbl");
GridCmdOptionIntVector(arg,vi);
Nu = vi[0];
Nk = vi[1];
Np = vi[2]; // vector space is enlarged by adding Np vectors
Nstop = vi[3];
MaxIter = vi[4];
// ypj[fixme] mode overriding message is needed.
Impl = LanczosType::rbl;
Nm = Nk+Np*MaxIter;
}
#if 1
// block Lanczos with explicit extension of its dimensions
if( GridCmdOptionExists(argv,argv+argc,"--split") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--split");
GridCmdOptionIntVector(arg,vi);
for(int i=0;i<mpi_split.size();i++)
mpi_split[i] = vi[i];
}
#endif
if( GridCmdOptionExists(argv,argv+argc,"--check_int") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--check_int");
GridCmdOptionInt(arg,Ntest);
}
if( GridCmdOptionExists(argv,argv+argc,"--resid") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--resid");
GridCmdOptionFloat(arg,resid);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_l") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_l");
GridCmdOptionFloat(arg,low);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_u") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_u");
GridCmdOptionFloat(arg,high);
}
if( GridCmdOptionExists(argv,argv+argc,"--cheby_n") ){
arg = GridCmdOptionPayload(argv,argv+argc,"--cheby_n");
GridCmdOptionInt(arg,order);
}
if ( CartesianCommunicator::RankWorld() == 0 ) {
std::streamsize ss = std::cout.precision();
std::cout << GridLogMessage <<" Gauge Configuration "<< gaugefile << '\n';
std::cout.precision(15);
for ( int i=0; i<4; ++i ) std::cout << GridLogMessage <<" boundary_phase["<< i << "] = " << boundary_phase[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Ls "<< Ls << '\n';
std::cout << GridLogMessage <<" mass "<< mass << '\n';
std::cout << GridLogMessage <<" M5 "<< M5 << '\n';
std::cout << GridLogMessage <<" mob_b "<< mob_b << '\n';
std::cout.precision(15);
for ( int i=0; i<Ls; ++i ) std::cout << GridLogMessage <<" omega["<< i << "] = " << omega[i] << '\n';
std::cout.precision(ss);
std::cout << GridLogMessage <<" Nu "<< Nu << '\n';
std::cout << GridLogMessage <<" Nk "<< Nk << '\n';
std::cout << GridLogMessage <<" Np "<< Np << '\n';
std::cout << GridLogMessage <<" Nm "<< Nm << '\n';
std::cout << GridLogMessage <<" Nstop "<< Nstop << '\n';
std::cout << GridLogMessage <<" Ntest "<< Ntest << '\n';
std::cout << GridLogMessage <<" MaxIter "<< MaxIter << '\n';
std::cout << GridLogMessage <<" resid "<< resid << '\n';
std::cout << GridLogMessage <<" Cheby Poly "<< low << "," << high << "," << order << std::endl;
}
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
CmdJobParams JP;
JP.Parse(argv,argc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGrid);
// printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid);
GridCartesian * UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian * FGridF = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,UGridF);
GridRedBlackCartesian * FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,UGridF);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGridF); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
// ypj [note] why seed RNG5 again? bug? In this case, run with a default seed().
GridParallelRNG RNG5rb(FrbGridF); RNG5rb.SeedFixedIntegers(seeds5);
LatticeGaugeField Umu(UGrid);
std::vector<LatticeColourMatrix> U(4,UGrid);
LatticeGaugeFieldF UmuF(UGridF);
std::vector<LatticeColourMatrix> UF(4,UGridF);
if ( JP.gaugefile.compare("Hot") == 0 ) {
SU3::HotConfiguration(RNG4, Umu);
} else {
FieldMetaData header;
NerscIO::readConfiguration(Umu,header,JP.gaugefile);
// ypj [fixme] additional checks for the loaded configuration?
}
precisionChange (UmuF,Umu);
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
}
RealD mass = JP.mass;
RealD M5 = JP.M5;
// ypj [fixme] flexible support for a various Fermions
// RealD mob_b = JP.mob_b; // Gparity
// std::vector<ComplexD> omega; // ZMobius
// GparityMobiusFermionD ::ImplParams params;
// std::vector<int> twists({1,1,1,0});
// params.twists = twists;
// GparityMobiusFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
// SchurDiagTwoOperator<GparityMobiusFermionR,FermionField> HermOp(Ddwf);
// int mrhs = JP.Nu;
int Ndir=4;
auto mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (Ndir,1);
#if 0
int tmp=mrhs, dir=0;
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_split[dir]<<std::endl;
while ( tmp> 1) {
if ((mpi_split[dir]*2) <= mpi_layout[dir]){
mpi_split[dir] *=2;
tmp = tmp/2;
}
std::cout << GridLogMessage << "dir= "<<dir <<"tmp= "<<tmp<<"mpi_split= "<<mpi_split[dir]<<"mpi_layout= "<<mpi_layout[dir]<<std::endl;
dir = (dir+1)%Ndir;
}
#endif
int mrhs=1;
for(int i =0;i<Ndir;i++){
mpi_split[i] = mpi_layout[i] / JP.mpi_split[i] ;
mrhs *= JP.mpi_split[i];
}
std::cout << GridLogMessage << "mpi_layout= " << mpi_layout << std::endl;
std::cout << GridLogMessage << "mpi_split= " << mpi_split << std::endl;
std::cout << GridLogMessage << "mrhs= " << mrhs << std::endl;
// assert(JP.Nu==tmp);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators, keeping it explicitly single
/////////////////////////////////////////////
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplexF::Nsimd()),
mpi_split,
*UGrid);
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(JP.Ls,SGrid);
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(JP.Ls,SGrid);
LatticeGaugeFieldF s_Umu(SGrid);
Grid_split (UmuF,s_Umu);
//WilsonFermionR::ImplParams params;
ZMobiusFermionF::ImplParams params;
params.overlapCommsCompute = true;
params.boundary_phases = JP.boundary_phase;
ZMobiusFermionF DdwfF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionF,FermionField> HermOp(Ddwf);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> HermOp(DdwfF);
ZMobiusFermionF Dsplit(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,JP.omega,1.,0.,params);
// SchurDiagTwoOperator<ZMobiusFermionF,FermionField> SHermOp(Dsplit);
SchurDiagOneOperator<ZMobiusFermionF,FermionField> SHermOp(Dsplit);
//std::vector<double> Coeffs { 0.,-1.};
// ypj [note] this may not be supported by some compilers
std::vector<double> Coeffs({ 0.,-1.});
Polynomial<FermionField> PolyX(Coeffs);
//Chebyshev<FermionField> Cheb(0.2,5.5,11);
Chebyshev<FermionField> Cheb(JP.low,JP.high,JP.order);
// Cheb.csv(std::cout);
ImplicitlyRestartedBlockLanczos<FermionField> IRBL(HermOp, SHermOp,
FrbGridF,SFrbGrid,mrhs,
Cheb,
JP.Nstop, JP.Ntest,
JP.Nu, JP.Nk, JP.Nm,
JP.resid,
JP.MaxIter,
IRBLdiagonaliseWithEigen);
// IRBLdiagonaliseWithLAPACK);
IRBL.split_test=1;
std::vector<RealD> eval(JP.Nm);
std::vector<FermionField> src(JP.Nu,FrbGridF);
if (0)
{
// in case RNG is too slow
std::cout << GridLogMessage << "Using RNG5"<<std::endl;
FermionField src_tmp(FGrid);
for ( int i=0; i<JP.Nu; ++i ){
// gaussian(RNG5,src_tmp);
ComplexD rnd;
RealD re;
fillScalar(re,RNG5._gaussian[0],RNG5._generators[0]);
std::cout << i <<" / "<< JP.Nm <<" re "<< re << std::endl;
// printf("%d / %d re %e\n",i,FGrid->_processor,re);
src_tmp=re;
pickCheckerboard(Odd,src[i],src_tmp);
}
RNG5.Report();
} else {
std::cout << GridLogMessage << "Using RNG5rb"<<std::endl;
for ( int i=0; i<JP.Nu; ++i )
gaussian(RNG5rb,src[i]);
RNG5rb.Report();
}
std::vector<FermionField> evec(JP.Nm,FrbGridF);
for(int i=0;i<1;++i){
std::cout << GridLogMessage << i <<" / "<< JP.Nm <<" grid pointer "<< evec[i].Grid() << std::endl;
};
int Nconv;
IRBL.calc(eval,evec,src,Nconv,JP.Impl);
Grid_finalize();
}

View File

@ -35,26 +35,45 @@ template<typename Action>
struct Setup{}; struct Setup{};
template<> template<>
struct Setup<GparityMobiusFermionD>{ struct Setup<GparityMobiusFermionF>{
static GparityMobiusFermionD* getAction(LatticeGaugeField &Umu, static GparityMobiusFermionF* getAction(LatticeGaugeFieldF &Umu,
GridCartesian* FGrid, GridRedBlackCartesian* FrbGrid, GridCartesian* UGrid, GridRedBlackCartesian* UrbGrid){ GridCartesian* FGrid, GridRedBlackCartesian* FrbGrid, GridCartesian* UGrid, GridRedBlackCartesian* UrbGrid){
RealD mass=0.01; RealD mass=0.00054;
RealD M5=1.8; RealD M5=1.8;
RealD mob_b=1.5; RealD mob_b=1.5;
GparityMobiusFermionD ::ImplParams params; GparityMobiusFermionD ::ImplParams params;
std::vector<int> twists({1,1,1,0}); std::vector<int> twists({1,1,1,0});
params.twists = twists; params.twists = twists;
return new GparityMobiusFermionD(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params); return new GparityMobiusFermionF(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,params);
} }
}; };
template<> template<>
struct Setup<DomainWallFermionF>{
static DomainWallFermionF* getAction(LatticeGaugeFieldF &Umu,
struct Setup<DomainWallFermionD>{ struct Setup<DomainWallFermionD>{
static DomainWallFermionD* getAction(LatticeGaugeField &Umu, static DomainWallFermionD* getAction(LatticeGaugeField &Umu,
GridCartesian* FGrid, GridRedBlackCartesian* FrbGrid, GridCartesian* UGrid, GridRedBlackCartesian* UrbGrid){ GridCartesian* FGrid, GridRedBlackCartesian* FrbGrid, GridCartesian* UGrid, GridRedBlackCartesian* UrbGrid){
RealD mass=0.01; RealD mass=0.00054;
RealD M5=1.8; RealD M5=1.8;
return new DomainWallFermionD(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); return new DomainWallFermionF(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
}
};
template<>
struct Setup<MobiusFermionF>{
static MobiusFermionF* getAction(LatticeGaugeFieldF &Umu,
GridCartesian* FGrid, GridRedBlackCartesian* FrbGrid, GridCartesian* UGrid, GridRedBlackCartesian* UrbGrid){
RealD mass=0.00054;
RealD M5=1.8;
RealD mob_b=1.5;
std::vector<Complex> boundary = {1,1,1,-1};
MobiusFermionF::ImplParams Params(boundary);
std::cout << GridLogMessage << "mass "<<mass<<std::endl;
std::cout << GridLogMessage << "M5 "<<M5<<std::endl;
std::cout << GridLogMessage << "mob_b "<<mob_b<<std::endl;
return new MobiusFermionF(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,mob_b,mob_b-1.,Params);
} }
}; };
@ -63,48 +82,70 @@ struct Setup<DomainWallFermionD>{
template<typename Action> template<typename Action>
void run(){ void run(){
typedef typename Action::FermionField FermionField; typedef typename Action::FermionField FermionField;
const int Ls=8; const int Ls=12;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi()); GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid); // printf("UGrid=%p UrbGrid=%p FGrid=%p FrbGrid=%p\n",UGrid,UrbGrid,FGrid,FrbGrid);
GridCartesian* UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vComplexF::Nsimd()), GridDefaultMpi());
GridRedBlackCartesian* UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian* FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls, UGridF);
GridRedBlackCartesian* FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, UGridF);
std::vector<int> seeds4({1,2,3,4}); std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8}); std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5); GridParallelRNG RNG5(FGridF); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4); GridParallelRNG RNG4(UGridF); RNG4.SeedFixedIntegers(seeds4);
GridParallelRNG RNG5rb(FrbGrid); RNG5.SeedFixedIntegers(seeds5); GridParallelRNG RNG5rb(FrbGridF); RNG5.SeedFixedIntegers(seeds5);
LatticeGaugeField Umu(UGrid); LatticeGaugeField Umu(UGrid);
SU<Nc>::HotConfiguration(RNG4, Umu); // SU<Nc>::HotConfiguration(RNG4, Umu);
FieldMetaData header;
std::string file("./config");
Action *action = Setup<Action>::getAction(Umu,FGrid,FrbGrid,UGrid,UrbGrid); // int precision32 = 0;
// int tworow = 0;
// NerscIO::writeConfiguration(Umu,file,tworow,precision32);
NerscIO::readConfiguration(Umu,header,file);
LatticeGaugeFieldF UmuF(UGridF);
precisionChange(UmuF, Umu);
Action *action = Setup<Action>::getAction(UmuF,FGridF,FrbGridF,UGridF,UrbGridF);
//MdagMLinearOperator<Action,FermionField> HermOp(Ddwf); //MdagMLinearOperator<Action,FermionField> HermOp(Ddwf);
SchurDiagTwoOperator<Action,FermionField> HermOp(*action); // SchurDiagTwoOperator<Action,FermionField> HermOp(*action);
SchurDiagOneOperator<Action,FermionField> HermOp(*action);
const int Nstop = 30; const int Nstop = 150;
const int Nk = 40; const int Nk = 160;
const int Np = 40; const int Np = 40;
const int Nm = Nk+Np; const int Nm = Nk+Np;
const int MaxIt= 10000; const int MaxIt= 10000;
RealD resid = 1.0e-8; RealD resid = 1.0e-6;
std::cout << GridLogMessage << "Nstop "<<Nstop<<std::endl;
std::cout << GridLogMessage << "Nk "<<Nk<<std::endl;
std::cout << GridLogMessage << "Np "<<Np<<std::endl;
std::cout << GridLogMessage << "resid "<<resid<<std::endl;
std::vector<double> Coeffs { 0.,-1.}; std::vector<double> Coeffs { 0.,-1.};
Polynomial<FermionField> PolyX(Coeffs); Polynomial<FermionField> PolyX(Coeffs);
Chebyshev<FermionField> Cheby(0.2,5.,11); Chebyshev<FermionField> Cheby(0.0000006,5.5,4001);
std::cout << GridLogMessage << "Cheby(0.0000006,5.5,4001) "<<std::endl;
FunctionHermOp<FermionField> OpCheby(Cheby,HermOp); FunctionHermOp<FermionField> OpCheby(Cheby,HermOp);
PlainHermOp<FermionField> Op (HermOp); PlainHermOp<FermionField> Op (HermOp);
ImplicitlyRestartedLanczos<FermionField> IRL(OpCheby,Op,Nstop,Nk,Nm,resid,MaxIt); ImplicitlyRestartedLanczos<FermionField> IRL(OpCheby,Op,Nstop,Nk,Nm,resid,MaxIt);
std::vector<RealD> eval(Nm); std::vector<RealD> eval(Nm);
FermionField src(FrbGrid); FermionField src(FrbGridF);
gaussian(RNG5rb,src); gaussian(RNG5rb,src);
std::vector<FermionField> evec(Nm,FrbGrid); std::vector<FermionField> evec(Nm,FrbGridF);
for(int i=0;i<1;i++){ for(int i=0;i<1;i++){
std::cout << GridLogMessage <<i<<" / "<< Nm<< " grid pointer "<<evec[i].Grid()<<std::endl; std::cout << GridLogMessage <<i<<" / "<< Nm<< " grid pointer "<<evec[i].Grid()<<std::endl;
}; };
@ -119,7 +160,7 @@ int main (int argc, char ** argv)
{ {
Grid_init(&argc,&argv); Grid_init(&argc,&argv);
std::string action = "GparityMobius"; std::string action = "Mobius";
for(int i=1;i<argc;i++){ for(int i=1;i<argc;i++){
if(std::string(argv[i]) == "-action"){ if(std::string(argv[i]) == "-action"){
action = argv[i+1]; action = argv[i+1];
@ -127,9 +168,11 @@ int main (int argc, char ** argv)
} }
if(action == "GparityMobius"){ if(action == "GparityMobius"){
run<GparityMobiusFermionD>(); run<GparityMobiusFermionF>();
}else if(action == "DWF"){ }else if(action == "DWF"){
run<DomainWallFermionD>(); run<DomainWallFermionF>();
}else if(action == "Mobius"){
run<MobiusFermionF>();
}else{ }else{
std::cout << "Unknown action" << std::endl; std::cout << "Unknown action" << std::endl;
exit(1); exit(1);