2016-01-02 14:51:32 +00:00
|
|
|
/*************************************************************************************
|
|
|
|
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
|
|
|
|
Source file: ./tests/Test_cayley_ldop_cr.cc
|
|
|
|
|
|
|
|
Copyright (C) 2015
|
|
|
|
|
|
|
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution directory
|
|
|
|
*************************************************************************************/
|
|
|
|
/* END LEGAL */
|
2016-07-07 22:31:07 +01:00
|
|
|
#include <Grid/Grid.h>
|
2015-06-09 22:51:02 +01:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
using namespace Grid;
|
|
|
|
using namespace Grid::QCD;
|
|
|
|
|
|
|
|
int main (int argc, char ** argv)
|
|
|
|
{
|
|
|
|
Grid_init(&argc,&argv);
|
|
|
|
|
|
|
|
const int Ls=8;
|
|
|
|
|
2015-07-01 22:45:15 +01:00
|
|
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
2015-06-09 22:51:02 +01:00
|
|
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
|
|
|
|
|
|
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
|
|
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
|
|
|
|
2015-06-22 12:49:44 +01:00
|
|
|
///////////////////////////////////////////////////
|
|
|
|
// Construct a coarsened grid; utility for this?
|
|
|
|
///////////////////////////////////////////////////
|
2015-06-09 22:51:02 +01:00
|
|
|
std::vector<int> clatt = GridDefaultLatt();
|
|
|
|
for(int d=0;d<clatt.size();d++){
|
|
|
|
clatt[d] = clatt[d]/2;
|
|
|
|
}
|
2015-07-01 22:45:15 +01:00
|
|
|
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
|
2015-06-09 22:51:02 +01:00
|
|
|
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
|
|
|
|
|
|
|
|
std::vector<int> seeds4({1,2,3,4});
|
|
|
|
std::vector<int> seeds5({5,6,7,8});
|
|
|
|
std::vector<int> cseeds({5,6,7,8});
|
|
|
|
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
|
|
|
|
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
|
|
|
GridParallelRNG CRNG(Coarse5d);CRNG.SeedFixedIntegers(cseeds);
|
|
|
|
|
|
|
|
LatticeFermion src(FGrid); gaussian(RNG5,src);
|
|
|
|
LatticeFermion result(FGrid); result=zero;
|
|
|
|
LatticeFermion ref(FGrid); ref=zero;
|
|
|
|
LatticeFermion tmp(FGrid);
|
|
|
|
LatticeFermion err(FGrid);
|
2015-06-20 22:22:56 +01:00
|
|
|
LatticeGaugeField Umu(UGrid);
|
2015-06-09 22:51:02 +01:00
|
|
|
|
2017-06-19 22:54:18 +01:00
|
|
|
FieldMetaData header;
|
2015-06-20 22:22:56 +01:00
|
|
|
std::string file("./ckpoint_lat.400");
|
Binary IO file for generic Grid array parallel I/O.
Number of IO MPI tasks can be varied by selecting which
dimensions use parallel IO and which dimensions use Serial send to boss
I/O.
Thus can neck down from, say 1024 nodes = 4x4x8x8 to {1,8,32,64,128,256,1024} nodes
doing the I/O.
Interpolates nicely between ALL nodes write their data, a single boss per time-plane
in processor space [old UKQCD fortran code did this], and a single node doing all I/O.
Not sure I have the transfer sizes big enough and am not overly convinced fstream
is guaranteed to not give buffer inconsistencies unless I set streambuf size to zero.
Practically it has worked on 8 tasks, 2x1x2x2 writing /cloning NERSC configurations
on my MacOS + OpenMPI and Clang environment.
It is VERY easy to switch to pwrite at a later date, and also easy to send x-strips around from
each node in order to gather bigger chunks at the syscall level.
That would push us up to the circa 8x 18*4*8 == 4KB size write chunk, and by taking, say, x/y non
parallel we get to 16MB contiguous chunks written in multi 4KB transactions
per IOnode in 64^3 lattices for configuration I/O.
I suspect this is fine for system performance.
2015-08-26 13:40:29 +01:00
|
|
|
NerscIO::readConfiguration(Umu,header,file);
|
2015-06-22 12:49:44 +01:00
|
|
|
|
2015-06-20 22:22:56 +01:00
|
|
|
// SU3::ColdConfiguration(RNG4,Umu);
|
|
|
|
// SU3::TepidConfiguration(RNG4,Umu);
|
|
|
|
// SU3::HotConfiguration(RNG4,Umu);
|
|
|
|
// Umu=zero;
|
|
|
|
|
|
|
|
RealD mass=0.1;
|
|
|
|
RealD M5=1.5;
|
2018-02-13 02:08:49 +00:00
|
|
|
int cb=0;
|
2015-06-09 22:51:02 +01:00
|
|
|
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "Building g5R5 hermitian DWF operator" <<std::endl;
|
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
2015-08-10 20:47:44 +01:00
|
|
|
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
|
|
|
Gamma5R5HermitianLinearOperator<DomainWallFermionR,LatticeFermion> HermIndefOp(Ddwf);
|
2015-06-09 22:51:02 +01:00
|
|
|
|
|
|
|
const int nbasis = 8;
|
2015-06-20 22:22:56 +01:00
|
|
|
|
2015-06-22 12:49:44 +01:00
|
|
|
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
|
|
|
|
typedef CoarsenedMatrix<vSpinColourVector,vTComplex,nbasis> LittleDiracOperator;
|
|
|
|
typedef LittleDiracOperator::CoarseVector CoarseVector;
|
2015-06-20 22:22:56 +01:00
|
|
|
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "Calling Aggregation class to build subspace" <<std::endl;
|
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
2015-08-10 20:47:44 +01:00
|
|
|
MdagMLinearOperator<DomainWallFermionR,LatticeFermion> HermDefOp(Ddwf);
|
2018-02-13 02:08:49 +00:00
|
|
|
Subspace Aggregates(Coarse5d,FGrid,cb);
|
2015-06-20 22:22:56 +01:00
|
|
|
Aggregates.CreateSubspace(RNG5,HermDefOp);
|
2015-06-09 22:51:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
LittleDiracOperator LittleDiracOp(*Coarse5d);
|
2015-06-20 22:22:56 +01:00
|
|
|
LittleDiracOp.CoarsenOperator(FGrid,HermIndefOp,Aggregates);
|
2015-06-09 22:51:02 +01:00
|
|
|
|
|
|
|
CoarseVector c_src (Coarse5d);
|
|
|
|
CoarseVector c_res (Coarse5d);
|
|
|
|
gaussian(CRNG,c_src);
|
|
|
|
c_res=zero;
|
|
|
|
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "Solving mdagm-CG on coarse space "<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
2015-06-20 22:22:56 +01:00
|
|
|
MdagMLinearOperator<LittleDiracOperator,CoarseVector> PosdefLdop(LittleDiracOp);
|
|
|
|
ConjugateGradient<CoarseVector> CG(1.0e-6,10000);
|
|
|
|
CG(PosdefLdop,c_src,c_res);
|
|
|
|
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "Solving indef-MCR on coarse space "<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
2015-06-09 22:51:02 +01:00
|
|
|
HermitianLinearOperator<LittleDiracOperator,CoarseVector> HermIndefLdop(LittleDiracOp);
|
2015-06-20 22:22:56 +01:00
|
|
|
ConjugateResidual<CoarseVector> MCR(1.0e-6,10000);
|
2015-06-09 22:51:02 +01:00
|
|
|
MCR(HermIndefLdop,c_src,c_res);
|
|
|
|
|
2015-07-23 17:31:13 +01:00
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "Done "<< std::endl;
|
|
|
|
std::cout<<GridLogMessage << "**************************************************"<< std::endl;
|
2015-06-09 22:51:02 +01:00
|
|
|
Grid_finalize();
|
|
|
|
}
|