1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-10 07:55:35 +00:00
This commit is contained in:
Michael Marshall 2019-04-30 14:41:48 +01:00
parent d74d443d1b
commit e56ead55ef
7 changed files with 230 additions and 334 deletions

View File

@ -47,9 +47,7 @@
/******************************************************************************
A consistent set of cross-platform methods for big endian <-> host byte ordering
I imagine this exists already?
RELEASE NOTE:
This can be removed once the (deprecated) NamedTensor::ReadBinary & WriteBinary methods are deleted
******************************************************************************/
#if defined(__linux__)
@ -81,7 +79,6 @@
/******************************************************************************
This potentially belongs in CartesianCommunicator
Turns out I don't actually need this when running inside hadrons
******************************************************************************/
BEGIN_MODULE_NAMESPACE(Grid)
@ -139,14 +136,12 @@ inline void SliceShare( GridBase * gridLowDim, GridBase * gridHighDim, void * Bu
/*************************************************************************************
-Grad^2 (Peardon, 2009, pg 2, equation 3)
Not sure where the right home for this is? But presumably in Grid
-Grad^2 (Peardon, 2009, pg 2, equation 3, https://arxiv.org/abs/0905.2160)
Field Type of field the operator will be applied to
GaugeField Gauge field the operator will smear using
TODO CANDIDATE for integration into laplacian operator
should just require adding number of dimensions to act on to constructor,
where the default=all dimensions, but we could specify 3 spatial dimensions
*************************************************************************************/
template<typename Field, typename GaugeField=LatticeGaugeField>
@ -202,15 +197,6 @@ public:
}
};
class BFieldIO: Serializable{
public:
using BaryonTensorSet = Eigen::Tensor<Complex, 7>;
GRID_SERIALIZABLE_CLASS_MEMBERS(BFieldIO,
BaryonTensorSet, BField
);
};
END_MODULE_NAMESPACE // Grid
/******************************************************************************
@ -221,29 +207,33 @@ BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MDistil)
typedef Grid::Hadrons::EigenPack<LatticeColourVector> DistilEP;
typedef std::vector<std::vector<std::vector<SpinVector> > > DistilNoises;
using DistilEP = Grid::Hadrons::EigenPack<LatticeColourVector>;
// Noise vector index order: nnoise, nt, nvec, ns
using NoiseTensor = Eigen::Tensor<Complex, 4, Eigen::RowMajor>;
struct DistilParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(DistilParameters,
int, TI,
int, LI,
int, nnoise,
int, tsrc,
int, SI,
int, Ns,
int, Nt_inv)
DistilParameters() = default;
template <class ReaderClass> DistilParameters(Reader<ReaderClass>& Reader){read(Reader,"Distil",*this);}
};
class BFieldIO: Serializable{
public:
using BaryonTensorSet = Eigen::Tensor<Complex, 7>;
GRID_SERIALIZABLE_CLASS_MEMBERS(BFieldIO, BaryonTensorSet, BField );
};
/******************************************************************************
Make a lower dimensional grid
******************************************************************************/
inline GridCartesian * MakeLowerDimGrid( GridCartesian * gridHD )
{
//LOG(Message) << "MakeLowerDimGrid() begin" << std::endl;
int nd{static_cast<int>(gridHD->_ndimension)};
std::vector<int> latt_size = gridHD->_gdimensions;
latt_size[nd-1] = 1;
std::vector<int> simd_layout = GridDefaultSimd(nd-1, vComplex::Nsimd());
simd_layout.push_back( 1 );
std::vector<int> mpi_layout = gridHD->_processors;
mpi_layout[nd-1] = 1;
GridCartesian * gridLD = new GridCartesian(latt_size,simd_layout,mpi_layout,*gridHD);
//LOG(Message) << "MakeLowerDimGrid() end" << std::endl;
return gridLD;
}
Default for distillation file operations. For now only used by NamedTensor
******************************************************************************/
#ifdef HAVE_HDF5
using Default_Reader = Grid::Hdf5Reader;
@ -348,6 +338,14 @@ template<typename T, typename V = void> struct is_named_tensor : public std::fal
template<typename Scalar_, int NumIndices_, uint16_t Endian_Scalar_Size_> struct is_named_tensor<NamedTensor<Scalar_, NumIndices_, Endian_Scalar_Size_>> : public std::true_type {};
template<typename T> struct is_named_tensor<T, typename std::enable_if<std::is_base_of<NamedTensor<typename T::Scalar, T::NumIndices, T::Endian_Scalar_Size_>, T>::value>::type> : public std::true_type {};
/******************************************************************************
Perambulator object
Endian_Scalar_Size can be removed once (deprecated) NamedTensor::ReadBinary & WriteBinary methods deleted
******************************************************************************/
template<typename Scalar_, int NumIndices_, uint16_t Endian_Scalar_Size = sizeof(Scalar_)>
using Perambulator = NamedTensor<Scalar_, NumIndices_, Endian_Scalar_Size>;
/******************************************************************************
Save NamedTensor binary format (NB: On-disk format is Big Endian)
Assumes the Scalar_ objects are contiguous (no padding)
@ -586,31 +584,25 @@ void NamedTensor<Scalar_, NumIndices_, Endian_Scalar_Size>::read(const char * fi
}
/******************************************************************************
Perambulator object
Make a lower dimensional grid in preparation for local slice operations
******************************************************************************/
template<typename Scalar_, int NumIndices_, uint16_t Endian_Scalar_Size = sizeof(Scalar_)>
using Perambulator = NamedTensor<Scalar_, NumIndices_, Endian_Scalar_Size>;
struct DistilParameters: Serializable {
GRID_SERIALIZABLE_CLASS_MEMBERS(DistilParameters,
int, TI,
int, LI,
int, nnoise,
int, tsrc,
int, SI,
int, Ns,
int, Nt,
int, Nt_inv)
DistilParameters() = default;
template <class ReaderClass> DistilParameters(Reader<ReaderClass>& Reader){read(Reader,"Distil",*this);}
};
inline GridCartesian * MakeLowerDimGrid( GridCartesian * gridHD )
{
int nd{static_cast<int>(gridHD->_ndimension)};
std::vector<int> latt_size = gridHD->_gdimensions;
latt_size[nd-1] = 1;
std::vector<int> simd_layout = GridDefaultSimd(nd-1, vComplex::Nsimd());
simd_layout.push_back( 1 );
std::vector<int> mpi_layout = gridHD->_processors;
mpi_layout[nd-1] = 1;
GridCartesian * gridLD = new GridCartesian(latt_size,simd_layout,mpi_layout,*gridHD);
return gridLD;
}
/*************************************************************************************
Rotate eigenvectors into our phase convention
First component of first eigenvector is real and positive
*************************************************************************************/
inline void RotateEigen(std::vector<LatticeColourVector> & evec)
@ -642,7 +634,5 @@ inline void RotateEigen(std::vector<LatticeColourVector> & evec)
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MDistil_Distil_hpp_

View File

@ -60,13 +60,11 @@ public:
std::string, sink,
bool, multiFile,
int, tsrc,
//int, nnoise,
int, LI,
int, SI,
int, TI,
int, nvec,
int, Ns,
int, Nt,
int, Nt_inv);
};
@ -172,7 +170,7 @@ template <typename FImpl>
void TDistilVectors<FImpl>::setup(void)
{
Cleanup();
auto &noise = envGet(std::vector<Complex>, NoiseVectorName);
auto &noise = envGet(NoiseTensor, NoiseVectorName);
auto &perambulator = envGet(DistilPeramb, PerambulatorName);
// We expect the perambulator to have been created with these indices
@ -180,7 +178,8 @@ void TDistilVectors<FImpl>::setup(void)
for(int i = 0; i < DistilPeramb::NumIndices; i++ )
assert( sIndexNames[i] == perambulator.IndexNames[i] && "Perambulator indices bad" );
nnoise = static_cast<int>( noise.size() );
nnoise = static_cast<int>( noise.dimension(0) );
LOG(Message) << "NoiseTensor has " << nnoise << " noise vectors" << std::endl;
int LI=par().LI;
int Ns=par().Ns;
int SI=par().SI;
@ -225,7 +224,7 @@ void TDistilVectors<FImpl>::Cleanup(void)
template <typename FImpl>
void TDistilVectors<FImpl>::execute(void)
{
auto &noise = envGet(std::vector<Complex>, NoiseVectorName);
auto &noise = envGet(NoiseTensor, NoiseVectorName);
auto &perambulator = envGet(DistilPeramb, PerambulatorName);
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, LapEvecName);
@ -244,7 +243,7 @@ void TDistilVectors<FImpl>::execute(void)
int LI=par().LI;
int Ns=par().Ns;
int Nt_inv=par().Nt_inv; // TODO: No input, but define through Nt, TI
int Nt=par().Nt;
const int Nt{grid4d->GlobalDimensions()[Tdir]};
int TI=par().TI;
int nvec=par().nvec;
int SI=par().SI;
@ -255,10 +254,10 @@ void TDistilVectors<FImpl>::execute(void)
int t_inv;
if( bMakeSource ) {
auto &rho = envGet(std::vector<FermionField>, SourceName);
for (int inoise = 0; inoise < nnoise; inoise++) {
for (int dk = 0; dk < LI; dk++) {
for (int dt = 0; dt < Nt_inv; dt++) {
for (int ds = 0; ds < SI; ds++) {
for( int inoise = 0; inoise < nnoise; inoise++ ) {
for( int dk = 0; dk < LI; dk++ ) {
for( int dt = 0; dt < Nt_inv; dt++ ) {
for( int ds = 0; ds < SI; ds++ ) {
vecindex = inoise + nnoise * dk + nnoise * LI * ds + nnoise *LI * SI*dt;
rho[vecindex] = zero;
tmp3d_nospin = zero;
@ -268,7 +267,8 @@ void TDistilVectors<FImpl>::execute(void)
for (int ik = dk; ik < nvec; ik += LI){
for (int is = ds; is < Ns; is += SI){
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv,3);
tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
//tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
tmp3d_nospin = evec3d * noise(inoise, t_inv, ik, is);
tmp3d=zero;
pokeSpin(tmp3d,tmp3d_nospin,is);
tmp2=zero;
@ -285,10 +285,10 @@ void TDistilVectors<FImpl>::execute(void)
}
if( bMakeSink ) {
auto &phi = envGet(std::vector<FermionField>, SinkName);
for (int inoise = 0; inoise < nnoise; inoise++) {
for (int dk = 0; dk < LI; dk++) {
for (int dt = 0; dt < Nt_inv; dt++) {
for (int ds = 0; ds < SI; ds++) {
for( int inoise = 0; inoise < nnoise; inoise++ ) {
for( int dk = 0; dk < LI; dk++ ) {
for( int dt = 0; dt < Nt_inv; dt++ ) {
for( int ds = 0; ds < SI; ds++ ) {
vecindex = inoise + nnoise * dk + nnoise * LI * ds + nnoise *LI * SI*dt;
phi[vecindex] = zero;
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
@ -307,7 +307,5 @@ void TDistilVectors<FImpl>::execute(void)
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MDistil_DistilVectors_hpp_

View File

@ -106,11 +106,11 @@ std::vector<std::string> TNoises<FImpl>::getOutput(void)
template <typename FImpl>
void TNoises<FImpl>::setup(void)
{
const DistilParameters & Distil{par().Distil};
const int Nt{env().getGrid()->GlobalDimensions()[Tdir]};
const int nvec{par().nvec};
envCreate(std::vector<Complex>, getName(), 1, nvec*Distil.Ns*Distil.Nt*Distil.nnoise);
const DistilParameters & Distil{par().Distil};
//envCreate(std::vector<Complex>, getName(), 1, nvec*Distil.Ns*Distil.Nt*Distil.nnoise);
envCreate(NoiseTensor, getName(), 1, Distil.nnoise, Nt, nvec, Distil.Ns);
}
// execution ///////////////////////////////////////////////////////////////////
@ -118,11 +118,11 @@ template <typename FImpl>
void TNoises<FImpl>::execute(void)
{
const std::string &UniqueIdentifier{par().UniqueIdentifier};
auto &noise = envGet(std::vector<Complex>, getName());
auto &noise = envGet(NoiseTensor, getName());
const int nvec{par().nvec};
const DistilParameters & Distil{par().Distil};
const int nnoise{Distil.nnoise};
const int Nt{Distil.Nt};
const int Nt{env().getGrid()->GlobalDimensions()[Tdir]};
const int Ns{Distil.Ns};
const int TI{Distil.TI};
const int LI{Distil.LI};
@ -133,27 +133,26 @@ void TNoises<FImpl>::execute(void)
sRNG.SeedUniqueString(UniqueIdentifier + std::to_string(vm().getTrajectory())); //maybe add more??
Real rn;
for (int inoise=0;inoise<nnoise;inoise++) {
for (int t=0;t<Nt;t++) {
for (int ivec=0;ivec<nvec;ivec++) {
for (int is=0;is<Ns;is++) {
if (exact_distillation)
noise[inoise + nnoise*(t + Nt*(ivec+nvec*is))] = 1.;
for( int inoise = 0; inoise < nnoise; inoise++ ) {
for( int t = 0; t < Nt; t++ ) {
for( int ivec = 0; ivec < nvec; ivec++ ) {
for( int is = 0; is < Ns; is++ ) {
if( exact_distillation )
//noise[inoise + nnoise*(t + Nt*(ivec+nvec*is))] = 1.;
noise(inoise, t, ivec, is) = 1.;
else{
random(sRNG,rn);
// We could use a greater number of complex roots of unity
// ... but this seems to work well
noise[inoise + nnoise*(t + Nt*(ivec+nvec*is))] = (rn > 0.5) ? -1 : 1;
//noise[inoise + nnoise*(t + Nt*(ivec+nvec*is))] = (rn > 0.5) ? -1 : 1;
noise(inoise, t, ivec, is) = (rn > 0.5) ? -1 : 1;
}
}
}
}
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MDistil_Noises_hpp_

View File

@ -154,6 +154,7 @@ void TPerambFromSolve<FImpl>::setup(void)
grid4d = env().getGrid();
grid3d = MakeLowerDimGrid(grid4d);
const int Nt{grid4d->GlobalDimensions()[Tdir]};
const int nvec_reduced{par().nvec_reduced};
@ -161,9 +162,9 @@ void TPerambFromSolve<FImpl>::setup(void)
//envCreate(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName(), 1,
// sIndexNames,Distil.Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
envCreate(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName(), 1,
sIndexNames,Distil.Nt,nvec_reduced,LI_reduced,Distil.nnoise,Distil.Nt_inv,Distil.SI);
sIndexNames,Nt,nvec_reduced,LI_reduced,Distil.nnoise,Distil.Nt_inv,Distil.SI);
envCreate(std::vector<Complex>, getName() + "_noise", 1,
nvec*Distil.Ns*Distil.Nt*Distil.nnoise);
nvec*Distil.Ns*Nt*Distil.nnoise);
envTmp(LatticeColourVector, "result_3d",1,LatticeColourVector(grid3d));
envTmp(LatticeColourVector, "evec3d",1,LatticeColourVector(grid3d));
@ -187,6 +188,11 @@ void TPerambFromSolve<FImpl>::Cleanup(void)
template <typename FImpl>
void TPerambFromSolve<FImpl>::execute(void)
{
GridCartesian * grid4d = env().getGrid();
const int Nt{grid4d->GlobalDimensions()[Tdir]};
const int Ntlocal{grid4d->LocalDimensions()[3]};
const int Ntfirst{grid4d->LocalStarts()[3]};
const int nvec_reduced{par().nvec_reduced};
const int LI_reduced{par().LI_reduced};
const int nvec{par().nvec};
@ -195,14 +201,12 @@ void TPerambFromSolve<FImpl>::execute(void)
const int TI{Distil.TI};
const int SI{Distil.SI};
const int nnoise{Distil.nnoise};
const int Nt{Distil.Nt};
const int Nt_inv{Distil.Nt_inv};
const int tsrc{Distil.tsrc};
const int Ns{Distil.Ns};
const bool full_tdil{TI==Nt};
const bool exact_distillation{full_tdil && LI==nvec};
auto &perambulator = envGet(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>,
getName());
auto &perambulator = envGet(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName());
auto &solve = envGet(std::vector<FermionField>, par().solve);
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
@ -210,11 +214,6 @@ void TPerambFromSolve<FImpl>::execute(void)
envGetTmp(LatticeColourVector, result_3d);
envGetTmp(LatticeColourVector, evec3d);
GridCartesian * grid4d = env().getGrid();
int Ntlocal = grid4d->LocalDimensions()[3];
int Ntfirst = grid4d->LocalStarts()[3];
for (int inoise = 0; inoise < nnoise; inoise++) {
for (int dk = 0; dk < LI_reduced; dk++) {
for (int dt = 0; dt < Nt_inv; dt++) {

View File

@ -135,7 +135,9 @@ template <typename FImpl>
void TPerambulator<FImpl>::setup(void)
{
Cleanup();
grid4d = env().getGrid();
grid3d = MakeLowerDimGrid(grid4d);
const int Nt{grid4d->GlobalDimensions()[Tdir]};
const int nvec{par().nvec};
const DistilParameters & Distil{par().Distil};
const int LI{Distil.LI};
@ -145,13 +147,10 @@ void TPerambulator<FImpl>::setup(void)
std::array<std::string,6> sIndexNames{"Nt", "nvec", "LI", "nnoise", "Nt_inv", "SI"};
envCreate(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName(), 1,
sIndexNames,Distil.Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
sIndexNames,Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
envCreate(std::vector<FermionField>, getName() + "_unsmeared_sink", 1,
nnoise*LI*Ns*Nt_inv, envGetGrid(FermionField));
grid4d = env().getGrid();
grid3d = MakeLowerDimGrid(grid4d);//new GridCartesian(latt_size,simd_layout_3,mpi_layout,*grid4d);
envTmpLat(LatticeSpinColourVector, "dist_source");
envTmpLat(LatticeSpinColourVector, "tmp2");
envTmpLat(LatticeSpinColourVector, "result");
@ -165,7 +164,6 @@ void TPerambulator<FImpl>::setup(void)
envTmpLat(FermionField, "v4dtmp");
envTmpLat(FermionField, "v5dtmp", Ls_);
envTmpLat(FermionField, "v5dtmp_sol", Ls_);
}
// clean up any temporaries created by setup (that aren't stored in the environment)
@ -183,16 +181,18 @@ void TPerambulator<FImpl>::Cleanup(void)
template <typename FImpl>
void TPerambulator<FImpl>::execute(void)
{
const int Nt{grid4d->GlobalDimensions()[Tdir]};
const int nvec{par().nvec};
const DistilParameters & Distil{par().Distil};
const int LI{Distil.LI};
const int SI{Distil.SI};
const int TI{Distil.TI};
const int nnoise{Distil.nnoise};
const int Nt{Distil.Nt};
const int Nt_inv{Distil.Nt_inv}; // TODO: PROBABLY BETTER: if (full_tdil) Nt_inv=1; else Nt_inv = TI;
const int tsrc{Distil.tsrc};
const int Ns{Distil.Ns};
const bool full_tdil{TI==Nt};
const bool exact_distillation{full_tdil && LI==nvec};
const int Nt_inv{full_tdil ? 1 : TI}; // TODO: PROBABLY BETTER: if (full_tdil) Nt_inv=1; else Nt_inv = TI;
auto &solver=envGet(Solver, par().solver);
auto &mat = solver.getFMat();
@ -200,13 +200,10 @@ void TPerambulator<FImpl>::execute(void)
envGetTmp(FermionField, v5dtmp);
envGetTmp(FermionField, v5dtmp_sol);
const bool full_tdil{TI==Nt};
const bool exact_distillation{full_tdil && LI==nvec};
const std::string &UniqueIdentifier{par().UniqueIdentifier};
auto &noise = envGet(std::vector<Complex>, par().noise);
//auto &noise = envGet(std::vector<Complex>, par().noise);
auto &noise = envGet(NoiseTensor, par().noise);
auto &perambulator = envGet(Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName());
auto &epack = envGet(Grid::Hadrons::EigenPack<LatticeColourVector>, par().eigenPack);
auto &unsmeared_sink = envGet(std::vector<FermionField>, getName() + "_unsmeared_sink");
@ -245,7 +242,8 @@ void TPerambulator<FImpl>::execute(void)
for (int ik = dk; ik < nvec; ik += LI){
for (int is = ds; is < Ns; is += SI){
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv,3);
tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
//tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
tmp3d_nospin = evec3d * noise(inoise, t_inv, ik, is);
tmp3d=zero;
pokeSpin(tmp3d,tmp3d_nospin,is);
tmp2=zero;

View File

@ -101,17 +101,13 @@ std::vector<std::string> TLoadPerambulator<FImpl>::getOutput(void)
template <typename FImpl>
void TLoadPerambulator<FImpl>::setup(void)
{
GridCartesian * grid4d = env().getGrid();
const int Nt{grid4d->GlobalDimensions()[Tdir]};
const int nvec{par().nvec};
const MDistil::DistilParameters & Distil{par().Distil};
const int LI{Distil.LI};
const int nnoise{Distil.nnoise};
const int Nt_inv{Distil.Nt_inv}; // TODO: PROBABLY BETTER: if (full_tdil) Nt_inv=1; else Nt_inv = TI;
const int Ns{Distil.Ns};
std::array<std::string,6> sIndexNames{"Nt", "nvec", "LI", "nnoise", "Nt_inv", "SI"};
envCreate(MDistil::Perambulator<SpinVector COMMA 6 COMMA sizeof(Real)>, getName(), 1,
sIndexNames,Distil.Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
sIndexNames,Nt,nvec,Distil.LI,Distil.nnoise,Distil.Nt_inv,Distil.SI);
}
// execution ///////////////////////////////////////////////////////////////////

View File

@ -69,8 +69,6 @@ void test_Global(Application &application)
void test_SolverS(Application &application)
{
std::string boundary = "1 1 1 -1";
MAction::DWF::Par actionPar;
actionPar.gauge = "gauge";
actionPar.Ls = 16;
@ -79,14 +77,13 @@ void test_SolverS(Application &application)
actionPar.boundary = boundary;
actionPar.twist = "0. 0. 0. 0.";
application.createModule<MAction::DWF>("DWF_s", actionPar);
MSolver::RBPrecCG::Par solverPar;
solverPar.action = "DWF_s";
solverPar.residual = 1.0e-7;
solverPar.residual = 1.0e-2;
solverPar.maxIteration = 10000;
application.createModule<MSolver::RBPrecCG>("CG_s", solverPar);
}
/////////////////////////////////////////////////////////////
// Test creation of laplacian eigenvectors
/////////////////////////////////////////////////////////////
@ -138,7 +135,6 @@ void test_Perambulators(Application &application)
PerambPar.Distil.TI=8;
PerambPar.nvec=5;
PerambPar.Distil.Ns=4;
PerambPar.Distil.Nt=8;
PerambPar.Distil.Nt_inv=1;
//PerambPar.Solver.mass=0.005;
//PerambPar.Solver.M5=1.8;
@ -166,7 +162,6 @@ void test_MultiPerambulators(Application &application)
PerambPar.Distil.TI=8;
PerambPar.nvec=5;
PerambPar.Distil.Ns=4;
PerambPar.Distil.Nt=8;
PerambPar.Distil.Nt_inv=1;
application.createModule<MDistil::Peramb>("Peramb5",PerambPar);
MDistil::PerambFromSolve::Par SolvePar;
@ -181,7 +176,6 @@ void test_MultiPerambulators(Application &application)
SolvePar.nvec_reduced=2;
SolvePar.LI_reduced=2;
SolvePar.Distil.Ns=4;
SolvePar.Distil.Nt=8;
SolvePar.Distil.Nt_inv=1;
application.createModule<MDistil::PerambFromSolve>("Peramb2",SolvePar);
SolvePar.PerambFileName="Peramb3";
@ -199,7 +193,6 @@ void test_MultiPerambulators(Application &application)
DistilVecPar.TI=8;
DistilVecPar.nvec=2;
DistilVecPar.Ns=4;
DistilVecPar.Nt=8;
DistilVecPar.Nt_inv=1;
application.createModule<MDistil::DistilVectors>("DistilVecs2",DistilVecPar);
DistilVecPar.perambulator="Peramb3";
@ -215,7 +208,7 @@ void test_MultiPerambulators(Application &application)
A2AMesonFieldPar.left="DistilVecs2_rho";
A2AMesonFieldPar.right="DistilVecs2_rho";
A2AMesonFieldPar.output="MesonSinksRho2";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.gammas="Identity";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
@ -247,11 +240,13 @@ void test_Noises(Application &application) {
MDistil::NoisesPar NoisePar;
NoisePar.UniqueIdentifier = "full_dilution";
NoisePar.nvec = 5;
NoisePar.Distil.TI = 8;
NoisePar.Distil.SI = 4;
NoisePar.Distil.LI = 5;
NoisePar.Distil.nnoise = 1;
NoisePar.Distil.Ns = 4;
NoisePar.Distil.Nt = 8;
NoisePar.Distil.Nt = 1;
NoisePar.Distil.Nt_inv = 1;
NoisePar.Distil.tsrc = 0;
application.createModule<MDistil::Noises>("Peramb_noise",NoisePar);
}
@ -269,13 +264,11 @@ void test_DistilVectors(Application &application)
DistilVecPar.perambulator="Peramb";
DistilVecPar.lapevec="LapEvec";
DistilVecPar.tsrc = 0;
//DistilVecPar.nnoise = 1;
DistilVecPar.LI=5;
DistilVecPar.SI=4;
DistilVecPar.TI=8;
DistilVecPar.nvec=5;
DistilVecPar.Ns=4;
DistilVecPar.Nt=8;
DistilVecPar.Nt_inv=1;
application.createModule<MDistil::DistilVectors>("DistilVecs",DistilVecPar);
}
@ -294,7 +287,6 @@ void test_PerambulatorsS(Application &application)
PerambPar.Distil.TI=8;
PerambPar.nvec=5;
PerambPar.Distil.Ns=4;
PerambPar.Distil.Nt=8;
PerambPar.Distil.Nt_inv=1;
//PerambPar.Solver.mass=0.005; //strange mass???
//PerambPar.Solver.M5=1.8;
@ -322,7 +314,6 @@ void test_DistilVectorsS(Application &application)
DistilVecPar.TI=32;
DistilVecPar.nvec=5;
DistilVecPar.Ns=4;
DistilVecPar.Nt=8;
DistilVecPar.Nt_inv=1;
application.createModule<MDistil::DistilVectors>("DistilVecsS",DistilVecPar);
}
@ -338,12 +329,40 @@ void test_MesonSink(Application &application)
A2AMesonFieldPar.left="g5phi";
A2AMesonFieldPar.right="Peramb_unsmeared_sink";
A2AMesonFieldPar.output="DistilFields";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.gammas="Identity";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonSink",A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// MesonFields
/////////////////////////////////////////////////////////////
void test_MesonField(Application &application, const char * pszFileSuffix,
const char * pszObjectLeft = nullptr, const char * pszObjectRight = nullptr )
{
// DistilVectors parameters
if( pszObjectLeft == nullptr )
pszObjectLeft = pszFileSuffix;
if( pszObjectRight == nullptr )
pszObjectRight = pszObjectLeft;
MContraction::A2AMesonField::Par A2AMesonFieldPar;
A2AMesonFieldPar.left="DistilVecs";
A2AMesonFieldPar.right=A2AMesonFieldPar.left;
A2AMesonFieldPar.left.append( pszObjectLeft );
A2AMesonFieldPar.right.append( pszObjectRight );
A2AMesonFieldPar.output="MesonSinks";
A2AMesonFieldPar.output.append( pszFileSuffix );
A2AMesonFieldPar.gammas="Identity";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
std::string sObjectName{"DistilMesonField"};
sObjectName.append( pszFileSuffix );
application.createModule<MContraction::A2AMesonField>(sObjectName, A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// g5*unsmeared
/////////////////////////////////////////////////////////////
@ -360,86 +379,11 @@ void test_g5_sinks(Application &application)
g5_multiplyPar.Nt_inv=1;
application.createModule<MDistil::g5_multiply>("g5phi",g5_multiplyPar);
}
#endif
/////////////////////////////////////////////////////////////
// MesonFields
/////////////////////////////////////////////////////////////
void test_MesonFieldSL(Application &application)
{
// DistilVectors parameters
MContraction::A2AMesonField::Par A2AMesonFieldPar;
A2AMesonFieldPar.left="DistilVecsS_phi";
//A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.right="DistilVecs_phi";
A2AMesonFieldPar.output="DistilFieldsS";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonFieldS",A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// MesonFields - phiphi
/////////////////////////////////////////////////////////////
void test_MesonField(Application &application)
{
// DistilVectors parameters
MContraction::A2AMesonField::Par A2AMesonFieldPar;
A2AMesonFieldPar.left="DistilVecs_phi";
//A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.right="DistilVecs_phi";
A2AMesonFieldPar.output="MesonSinksPhi";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonField",A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// MesonFields - rhorho
/////////////////////////////////////////////////////////////
void test_MesonFieldRho(Application &application)
{
// DistilVectors parameters
MContraction::A2AMesonField::Par A2AMesonFieldPar;
A2AMesonFieldPar.left="DistilVecs_rho";
//A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.output="MesonSinksRho";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonFieldRho",A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// MesonFields - rhorhoall
/////////////////////////////////////////////////////////////
void test_MesonFieldRhoAll(Application &application)
{
// DistilVectors parameters
MContraction::A2AMesonField::Par A2AMesonFieldPar;
//A2AMesonFieldPar.left="DistilVecs_rho_all_tsrc";
//A2AMesonFieldPar.right="DistilVecs_rho_all_tsrc";
A2AMesonFieldPar.left="DistilVecs_rho";
A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.output="MesonSinksRhoAll";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonFieldRhoAll",A2AMesonFieldPar);
}
/////////////////////////////////////////////////////////////
// BaryonFields - phiphiphi - efficient
/////////////////////////////////////////////////////////////
#ifdef DISTIL_PRE_RELEASE
void test_BaryonFieldPhi2(Application &application)
{
// DistilVectors parameters
@ -577,7 +521,6 @@ void test_PerambulatorsSolve(Application &application)
PerambFromSolvePar.Distil.TI=8;
PerambFromSolvePar.nvec=5;
PerambFromSolvePar.Distil.Ns=4;
PerambFromSolvePar.Distil.Nt=8;
PerambFromSolvePar.Distil.Nt_inv=1;
application.createModule<MDistil::PerambFromSolve>("PerambAslashS",PerambFromSolvePar);
}
@ -602,25 +545,6 @@ void test_DistilVectorsAslashSeq(Application &application)
DistilSinkPar.Nt_inv=1;
application.createModule<MDistil::DistilSink>("DistilVecsAslashSeq",DistilSinkPar);
}
/////////////////////////////////////////////////////////////
// MesonFields - aslaaaash
/////////////////////////////////////////////////////////////
void test_MesonFieldAslashSeq(Application &application)
{
// DistilVectors parameters
MContraction::A2AMesonField::Par A2AMesonFieldPar;
A2AMesonFieldPar.left="DistilVecsAslashSeq";
//A2AMesonFieldPar.right="DistilVecs_rho";
A2AMesonFieldPar.right="DistilVecsAslashSeq";
A2AMesonFieldPar.output="MesonSinksAslashSeq";
A2AMesonFieldPar.gammas="all";
A2AMesonFieldPar.mom={"0 0 0"};
A2AMesonFieldPar.cacheBlock=2;
A2AMesonFieldPar.block=4;
application.createModule<MContraction::A2AMesonField>("DistilMesonFieldAslashSeq",A2AMesonFieldPar);
}
bool bNumber( int &ri, const char * & pstr, bool bGobbleWhiteSpace = true )
{
if( bGobbleWhiteSpace )
@ -774,7 +698,7 @@ bool DebugEigenTest()
std::array<std::string,3> as={"Alpha", "Beta", "Gamma"};
MyTensor x(as, 2,1,4);
DebugShowTensor(x, "x");
x.WriteBinary(pszTestFileName);
x.write(pszTestFileName);
// Test initialisation of an array of strings
for( auto a : as )
std::cout << a << std::endl;
@ -787,7 +711,7 @@ bool DebugEigenTest()
// Now see whether we can read a tensor back
std::array<std::string,3> Names2={"Alpha", "Gamma", "Delta"};
MyTensor y(Names2, 2,4,1);
y.ReadBinary(pszTestFileName);
y.read(pszTestFileName);
DebugShowTensor(y, "y");
// Now see whether we can read a tensor back from an hdf5 file
@ -1037,15 +961,6 @@ int main(int argc, char *argv[])
{
#ifdef DEBUG
// Debug only - test of Eigen::Tensor
std::cout << "sizeof(int) = " << sizeof(int)
<< ", sizeof(long) = " << sizeof(long)
<< ", sizeof(size_t) = " << sizeof(size_t)
<< ", sizeof(std::size_t) = " << sizeof(std::size_t)
<< ", sizeof(std::streamsize) = " << sizeof(std::streamsize)
<< ", sizeof(Eigen::Index) = " << sizeof(Eigen::Index)
<< ", sizeof(hsize_t) = " << sizeof(hsize_t)
<< ", sizeof(unsigned long long) = " << sizeof(unsigned long long)
<< std::endl;
//if( DebugEigenTest() ) return 0;
//if(DebugGridTensorTest()) return 0;
//if(ConvertPeramb("PerambL_100_tsrc0.3000","PerambL_100_tsrc0.3000")) return 0;
@ -1105,21 +1020,21 @@ int main(int argc, char *argv[])
test_LapEvec( application );
test_Perambulators( application );
break;
case 3: // 3
default: // 3
test_Global( application );
test_SolverS( application );
test_LapEvec( application );
test_Perambulators( application );
test_DistilVectors( application );
break;
default: // 4
case 4:
test_Global( application );
test_SolverS( application );
test_LapEvec( application );
test_Perambulators( application );
test_DistilVectors( application );
test_MesonField( application );
test_MesonFieldRhoAll( application );
test_MesonField( application, "Phi", "_phi" );
test_MesonField( application, "Rho", "_rho" );
break;
case 5: // 3
test_Global( application );
@ -1129,7 +1044,8 @@ int main(int argc, char *argv[])
test_DistilVectors( application );
test_PerambulatorsS( application );
test_DistilVectorsS( application );
test_MesonFieldSL( application );
test_MesonField( application, "SPhi", "S_phi" );
test_MesonField( application, "SRho", "S_rho" );
break;
#ifdef DISTIL_PRE_RELEASE
case 6: // 3
@ -1156,7 +1072,8 @@ int main(int argc, char *argv[])
test_LapEvec( application );
test_Perambulators( application );
test_DistilVectors( application );
test_MesonField( application );
test_MesonField( application, "Phi", "_phi" );
test_MesonField( application, "Rho", "_rho" );
break;
#ifdef DISTIL_PRE_RELEASE
case 9: // 3
@ -1192,7 +1109,7 @@ int main(int argc, char *argv[])
test_AslashSeq( application );
test_PerambulatorsSolve( application );
test_DistilVectorsAslashSeq( application );
test_MesonFieldAslashSeq( application );
test_MesonField( application, "AslashSeq" );
break;
case 13:
test_Global( application );
@ -1201,10 +1118,9 @@ int main(int argc, char *argv[])
test_MultiPerambulators( application );
break;
}
LOG(Message) << "====== XML creation for test " << iTestNum << " complete ======" << std::endl;
// execution
application.saveParameterFile("test_distil.xml");
LOG(Warning) << "These parameters are designed to run on a laptop usid --grid 4.4.4.8" << std::endl;
application.run();
// epilogue