1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Compare commits

..

9 Commits

Author SHA1 Message Date
e307bb7528 Reorganise to abstract kernels that know about the lattice layout.
Move these back into grid.
2018-09-04 12:30:00 +01:00
5b8b630919 Finished the four quark optimisation for Bag parameters.
To do:

   Abstract the cache blocking from the contraction with lambda functions.
   Share code between PionFieldXX with and without momentum. Share with the Meson field code somehow.
   Assemble the WWVV in a standalone routine.
   Play similar lambda function trick for the four quark operator.
   Hack it first by doing the MesonField Routine in here too.
2018-08-28 14:11:03 +01:00
81287133f3 New files 2018-08-28 11:32:41 +01:00
bd27940f78 Reorder the loop 2018-08-28 11:32:23 +01:00
d45647698d Extra test code 2018-08-28 11:31:19 +01:00
d6ac6e75cc Some query functions 2018-08-28 11:30:51 +01:00
ba34d7b206 Pion Field test module 2018-08-28 11:30:14 +01:00
80003787c9 Use MKL DGEMM 2018-08-28 11:14:27 +01:00
f523dddef0 Remove verbose 2018-08-28 11:14:07 +01:00
554 changed files with 7728 additions and 18317 deletions

26
.gitignore vendored
View File

@ -83,7 +83,6 @@ ltmain.sh
.Trashes
ehthumbs.db
Thumbs.db
.dirstamp
# build directory #
###################
@ -98,8 +97,11 @@ build.sh
# Eigen source #
################
Grid/Eigen
Eigen/*
lib/Eigen/*
# FFTW source #
################
lib/fftw/*
# libtool macros #
##################
@ -110,7 +112,21 @@ m4/libtool.m4
################
gh-pages/
# Buck files #
##############
.buck*
buck-out
BUCK
make-bin-BUCK.sh
# generated sources #
#####################
Grid/qcd/spin/gamma-gen/*.h
Grid/qcd/spin/gamma-gen/*.cc
lib/qcd/spin/gamma-gen/*.h
lib/qcd/spin/gamma-gen/*.cc
lib/version.h
# vs code editor files #
########################
.vscode/
.vscode/settings.json
settings.json

View File

@ -9,11 +9,6 @@ matrix:
- os: osx
osx_image: xcode8.3
compiler: clang
env: PREC=single
- os: osx
osx_image: xcode8.3
compiler: clang
env: PREC=double
before_install:
- export GRIDDIR=`pwd`
@ -21,7 +16,7 @@ before_install:
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export PATH="${GRIDDIR}/clang/bin:${PATH}"; fi
- if [[ "$TRAVIS_OS_NAME" == "linux" ]] && [[ "$CC" == "clang" ]]; then export LD_LIBRARY_PATH="${GRIDDIR}/clang/lib:${LD_LIBRARY_PATH}"; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc openssl; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install libmpc; fi
install:
- export CWD=`pwd`
@ -38,7 +33,6 @@ install:
- which $CXX
- $CXX --version
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export LDFLAGS='-L/usr/local/lib'; fi
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then export EXTRACONF='--with-openssl=/usr/local/opt/openssl'; fi
script:
- ./bootstrap.sh
@ -55,7 +49,12 @@ script:
- make -j4
- make install
- cd $CWD/build
- ../configure --enable-precision=$PREC --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install ${EXTRACONF}
- ../configure --enable-precision=single --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install
- make -j4
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
- echo make clean
- ../configure --enable-precision=double --enable-simd=SSE4 --enable-comms=none --with-lime=$CWD/build/lime/install
- make -j4
- ./benchmarks/Benchmark_dwf --threads 1 --debug-signals
- make check

View File

@ -1,186 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/SchurRedBlack.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
namespace Grid {
namespace QCD {
template<class Field>
class PauliVillarsSolverUnprec
{
public:
ConjugateGradient<Field> & CG;
PauliVillarsSolverUnprec( ConjugateGradient<Field> &_CG) : CG(_CG){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
MdagMLinearOperator<Matrix,Field> HermOp(_Matrix);
_Matrix.SetMass(1.0);
_Matrix.Mdag(src,A);
CG(HermOp,A,sol);
_Matrix.SetMass(m);
};
};
template<class Field>
class PauliVillarsSolverRBprec
{
public:
ConjugateGradient<Field> & CG;
PauliVillarsSolverRBprec( ConjugateGradient<Field> &_CG) : CG(_CG){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
_Matrix.SetMass(1.0);
SchurRedBlackDiagMooeeSolve<Field> SchurSolver(CG);
SchurSolver(_Matrix,src,sol);
_Matrix.SetMass(m);
};
};
template<class Field,class PVinverter> class Reconstruct5DfromPhysical {
private:
PVinverter & PauliVillarsSolver;
public:
/////////////////////////////////////////////////////
// First cut works, 10 Oct 2018.
//
// Must form a plan to get this into production for Zmobius acceleration
// of the Mobius exact AMA corrections.
//
// TODO : understand absence of contact term in eqns in Hantao's thesis
// sol4 is contact term subtracted.
//
// Options
// a) Defect correction approach:
// 1) Compute defect from current soln (initially guess).
// This is ...... outerToInner check !!!!
// 2) Deflated Zmobius solve to get 4d soln
// Ensure deflation is working
// 3) Refine 5d Outer using the inner 4d delta soln
//
// Step 1: localise PV inverse in a routine. [DONE]
// Step 2: Schur based PV inverse [DONE]
// Step 3: Fourier accelerated PV inverse
// Step 4:
/////////////////////////////////////////////////////
Reconstruct5DfromPhysical(PVinverter &_PauliVillarsSolver)
: PauliVillarsSolver(_PauliVillarsSolver)
{
};
template<class Matrix>
void PV(Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
_Matrix.SetMass(1.0);
_Matrix.M(src,sol);
_Matrix.SetMass(m);
}
template<class Matrix>
void PVdag(Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
_Matrix.SetMass(1.0);
_Matrix.Mdag(src,sol);
_Matrix.SetMass(m);
}
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &sol4,const Field &src4, Field &sol5){
int Ls = _Matrix.Ls;
Field psi4(_Matrix.GaugeGrid());
Field psi(_Matrix.FermionGrid());
Field A (_Matrix.FermionGrid());
Field B (_Matrix.FermionGrid());
Field c (_Matrix.FermionGrid());
typedef typename Matrix::Coeff_t Coeff_t;
std::cout << GridLogMessage<< " ************************************************" << std::endl;
std::cout << GridLogMessage<< " Reconstruct5Dprop: c.f. MADWF algorithm " << std::endl;
std::cout << GridLogMessage<< " ************************************************" << std::endl;
///////////////////////////////////////
//Import source, include Dminus factors
///////////////////////////////////////
_Matrix.ImportPhysicalFermionSource(src4,B);
///////////////////////////////////////
// Set up c from src4
///////////////////////////////////////
PauliVillarsSolver(_Matrix,B,A);
_Matrix.Pdag(A,c);
//////////////////////////////////////
// Build Pdag PV^-1 Dm P [-sol4,c2,c3... cL]
//////////////////////////////////////
psi4 = - sol4;
InsertSlice(psi4, psi, 0 , 0);
for (int s=1;s<Ls;s++) {
ExtractSlice(psi4,c,s,0);
InsertSlice(psi4,psi,s,0);
}
/////////////////////////////
// Pdag PV^-1 Dm P
/////////////////////////////
_Matrix.P(psi,B);
_Matrix.M(B,A);
PauliVillarsSolver(_Matrix,A,B);
_Matrix.Pdag(B,A);
//////////////////////////////
// Reinsert surface prop
//////////////////////////////
InsertSlice(sol4,A,0,0);
//////////////////////////////
// Convert from y back to x
//////////////////////////////
_Matrix.P(A,sol5);
}
};
}
}

View File

@ -1,70 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/util/Sha.h
Copyright (C) 2018
Author: Peter Boyle
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
extern "C" {
#include <openssl/sha.h>
}
#pragma once
class GridChecksum
{
public:
static inline uint32_t crc32(void *data,size_t bytes)
{
return ::crc32(0L,(unsigned char *)data,bytes);
}
static inline std::vector<unsigned char> sha256(void *data,size_t bytes)
{
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
SHA256_CTX sha256;
SHA256_Init (&sha256);
SHA256_Update(&sha256, data,bytes);
SHA256_Final (&hash[0], &sha256);
return hash;
}
static inline std::vector<int> sha256_seeds(const std::string &s)
{
std::vector<int> seeds;
std::vector<unsigned char> uchars = sha256((void *)s.c_str(),s.size());
for(int i=0;i<uchars.size();i++) seeds.push_back(uchars[i]);
return seeds;
}
};
/*
int main(int argc,char **argv)
{
std::string s("The quick brown fox jumps over the lazy dog");
auto csum = GridChecksum::sha256_seeds(s);
std::cout << "SHA256 sum is 0x";
for(int i=0;i<csum.size;i++) {
std::cout << std::hex << csum[i];
}
std::cout << std::endl;
}
*/

View File

@ -1,126 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AMatrix.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef A2A_Matrix_hpp_
#define A2A_Matrix_hpp_
#include <Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE
template <typename T, typename MetadataType>
class A2AMatrixIo
{
public:
A2AMatrixIo(void) = default;
A2AMatrixIo(std::string filename, std::string dataname,
const unsigned int nt, const unsigned int ni,
const unsigned int nj);
~A2AMatrixIo(void) = default;
void initFile(const MetadataType &d, const unsigned int chunkSize);
void saveBlock(const T *data, const unsigned int i, const unsigned int j,
const unsigned int blockSizei, const unsigned int blockSizej);
private:
std::string filename_, dataname_;
unsigned int nt_, ni_, nj_;
};
template <typename T, typename MetadataType>
A2AMatrixIo<T, MetadataType>::A2AMatrixIo(std::string filename,
std::string dataname,
const unsigned int nt,
const unsigned int ni,
const unsigned int nj)
: filename_(filename), dataname_(dataname)
, nt_(nt), ni_(ni), nj_(nj)
{}
template <typename T, typename MetadataType>
void A2AMatrixIo<T, MetadataType>::initFile(const MetadataType &d, const unsigned int chunkSize)
{
#ifdef HAVE_HDF5
std::vector<hsize_t> dim = {static_cast<hsize_t>(nt_),
static_cast<hsize_t>(ni_),
static_cast<hsize_t>(nj_)},
chunk = {static_cast<hsize_t>(nt_),
static_cast<hsize_t>(chunkSize),
static_cast<hsize_t>(chunkSize)};
H5NS::DataSpace dataspace(dim.size(), dim.data());
H5NS::DataSet dataset;
H5NS::DSetCreatPropList plist;
// create empty file just with metadata
{
Hdf5Writer writer(filename_);
write(writer, dataname_, d);
}
// create the dataset
Hdf5Reader reader(filename_);
push(reader, dataname_);
auto &group = reader.getGroup();
plist.setChunk(chunk.size(), chunk.data());
dataset = group.createDataSet("data", Hdf5Type<T>::type(), dataspace, plist);
#else
HADRONS_ERROR(Implementation, "all-to-all matrix I/O needs HDF5 library");
#endif
}
template <typename T, typename MetadataType>
void A2AMatrixIo<T, MetadataType>::saveBlock(const T *data,
const unsigned int i,
const unsigned int j,
const unsigned int blockSizei,
const unsigned int blockSizej)
{
#ifdef HAVE_HDF5
Hdf5Reader reader(filename_);
std::vector<hsize_t> count = {nt_, blockSizei, blockSizej},
offset = {0, static_cast<hsize_t>(i),
static_cast<hsize_t>(j)},
stride = {1, 1, 1},
block = {1, 1, 1};
H5NS::DataSpace memspace(count.size(), count.data()), dataspace;
H5NS::DataSet dataset;
size_t shift;
push(reader, dataname_);
auto &group = reader.getGroup();
dataset = group.openDataSet("data");
dataspace = dataset.getSpace();
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data(),
stride.data(), block.data());
dataset.write(data, Hdf5Type<T>::type(), memspace, dataspace);
#else
HADRONS_ERROR(Implementation, "all-to-all matrix I/O needs HDF5 library");
#endif
}
END_HADRONS_NAMESPACE
#endif // A2A_Matrix_hpp_

View File

@ -1,222 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/A2AVectors.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: fionnoh <fionnoh@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef A2A_Vectors_hpp_
#define A2A_Vectors_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Environment.hpp>
#include <Hadrons/Solver.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Classes to generate V & W all-to-all vectors *
******************************************************************************/
template <typename FImpl>
class A2AVectorsSchurDiagTwo
{
public:
FERM_TYPE_ALIASES(FImpl,);
SOLVER_TYPE_ALIASES(FImpl,);
public:
A2AVectorsSchurDiagTwo(FMat &action, Solver &solver);
virtual ~A2AVectorsSchurDiagTwo(void) = default;
void makeLowModeV(FermionField &vout,
const FermionField &evec, const Real &eval);
void makeLowModeV5D(FermionField &vout_4d, FermionField &vout_5d,
const FermionField &evec, const Real &eval);
void makeLowModeW(FermionField &wout,
const FermionField &evec, const Real &eval);
void makeLowModeW5D(FermionField &wout_4d, FermionField &wout_5d,
const FermionField &evec, const Real &eval);
void makeHighModeV(FermionField &vout, const FermionField &noise);
void makeHighModeV5D(FermionField &vout_4d, FermionField &vout_5d,
const FermionField &noise_5d);
void makeHighModeW(FermionField &wout, const FermionField &noise);
void makeHighModeW5D(FermionField &vout_5d, FermionField &wout_5d,
const FermionField &noise_5d);
private:
FMat &action_;
Solver &solver_;
GridBase *fGrid_, *frbGrid_, *gGrid_;
bool is5d_;
FermionField src_o_, sol_e_, sol_o_, tmp_, tmp5_;
SchurDiagTwoOperator<FMat, FermionField> op_;
};
/******************************************************************************
* A2AVectorsSchurDiagTwo template implementation *
******************************************************************************/
template <typename FImpl>
A2AVectorsSchurDiagTwo<FImpl>::A2AVectorsSchurDiagTwo(FMat &action, Solver &solver)
: action_(action)
, solver_(solver)
, fGrid_(action_.FermionGrid())
, frbGrid_(action_.FermionRedBlackGrid())
, gGrid_(action_.GaugeGrid())
, src_o_(frbGrid_)
, sol_e_(frbGrid_)
, sol_o_(frbGrid_)
, tmp_(frbGrid_)
, tmp5_(fGrid_)
, op_(action_)
{}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeV(FermionField &vout, const FermionField &evec, const Real &eval)
{
src_o_ = evec;
src_o_.checkerboard = Odd;
pickCheckerboard(Even, sol_e_, vout);
pickCheckerboard(Odd, sol_o_, vout);
/////////////////////////////////////////////////////
// v_ie = -(1/eval_i) * MeeInv Meo MooInv evec_i
/////////////////////////////////////////////////////
action_.MooeeInv(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
action_.Meooe(tmp_, sol_e_);
assert(sol_e_.checkerboard == Even);
action_.MooeeInv(sol_e_, tmp_);
assert(tmp_.checkerboard == Even);
sol_e_ = (-1.0 / eval) * tmp_;
assert(sol_e_.checkerboard == Even);
/////////////////////////////////////////////////////
// v_io = (1/eval_i) * MooInv evec_i
/////////////////////////////////////////////////////
action_.MooeeInv(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
sol_o_ = (1.0 / eval) * tmp_;
assert(sol_o_.checkerboard == Odd);
setCheckerboard(vout, sol_e_);
assert(sol_e_.checkerboard == Even);
setCheckerboard(vout, sol_o_);
assert(sol_o_.checkerboard == Odd);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeV5D(FermionField &vout_4d, FermionField &vout_5d, const FermionField &evec, const Real &eval)
{
makeLowModeV(vout_5d, evec, eval);
action_.ExportPhysicalFermionSolution(vout_5d, vout_4d);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeW(FermionField &wout, const FermionField &evec, const Real &eval)
{
src_o_ = evec;
src_o_.checkerboard = Odd;
pickCheckerboard(Even, sol_e_, wout);
pickCheckerboard(Odd, sol_o_, wout);
/////////////////////////////////////////////////////
// w_ie = - MeeInvDag MoeDag Doo evec_i
/////////////////////////////////////////////////////
op_.Mpc(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
action_.MeooeDag(tmp_, sol_e_);
assert(sol_e_.checkerboard == Even);
action_.MooeeInvDag(sol_e_, tmp_);
assert(tmp_.checkerboard == Even);
sol_e_ = (-1.0) * tmp_;
/////////////////////////////////////////////////////
// w_io = Doo evec_i
/////////////////////////////////////////////////////
op_.Mpc(src_o_, sol_o_);
assert(sol_o_.checkerboard == Odd);
setCheckerboard(wout, sol_e_);
assert(sol_e_.checkerboard == Even);
setCheckerboard(wout, sol_o_);
assert(sol_o_.checkerboard == Odd);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeW5D(FermionField &wout_4d,
FermionField &wout_5d,
const FermionField &evec,
const Real &eval)
{
makeLowModeW(tmp5_, evec, eval);
action_.DminusDag(tmp5_, wout_5d);
action_.ExportPhysicalFermionSource(wout_5d, wout_4d);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeV(FermionField &vout,
const FermionField &noise)
{
solver_(vout, noise);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeV5D(FermionField &vout_4d,
FermionField &vout_5d,
const FermionField &noise)
{
if (noise._grid->Dimensions() == fGrid_->Dimensions() - 1)
{
action_.ImportPhysicalFermionSource(noise, tmp5_);
}
else
{
tmp5_ = noise;
}
makeHighModeV(vout_5d, tmp5_);
action_.ExportPhysicalFermionSolution(vout_5d, vout_4d);
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeW(FermionField &wout,
const FermionField &noise)
{
wout = noise;
}
template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeW5D(FermionField &wout_4d,
FermionField &wout_5d,
const FermionField &noise)
{
if (noise._grid->Dimensions() == fGrid_->Dimensions() - 1)
{
action_.ImportUnphysicalFermion(noise, wout_5d);
wout_4d = noise;
}
else
{
wout_5d = noise;
action_.ExportPhysicalFermionSource(wout_5d, wout_4d);
}
}
END_HADRONS_NAMESPACE
#endif // A2A_Vectors_hpp_

View File

@ -1,191 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DilutedNoise.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_DilutedNoise_hpp_
#define Hadrons_DilutedNoise_hpp_
#include <Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Abstract container for diluted noise *
******************************************************************************/
template <typename FImpl>
class DilutedNoise
{
public:
typedef typename FImpl::FermionField FermionField;
public:
// constructor/destructor
DilutedNoise(GridCartesian *g);
DilutedNoise(GridCartesian *g, const unsigned int nNoise);
virtual ~DilutedNoise(void) = default;
// access
std::vector<FermionField> & getNoise(void);
const std::vector<FermionField> & getNoise(void) const;
const FermionField & operator[](const unsigned int i) const;
FermionField & operator[](const unsigned int i);
void resize(const unsigned int nNoise);
unsigned int size(void) const;
GridCartesian *getGrid(void) const;
// generate noise (pure virtual)
virtual void generateNoise(GridParallelRNG &rng) = 0;
private:
std::vector<FermionField> noise_;
GridCartesian *grid_;
unsigned int nNoise_;
};
template <typename FImpl>
class TimeDilutedSpinColorDiagonalNoise: public DilutedNoise<FImpl>
{
public:
typedef typename FImpl::FermionField FermionField;
public:
// constructor/destructor
TimeDilutedSpinColorDiagonalNoise(GridCartesian *g);
virtual ~TimeDilutedSpinColorDiagonalNoise(void) = default;
// generate noise
virtual void generateNoise(GridParallelRNG &rng);
private:
unsigned int nt_;
};
/******************************************************************************
* DilutedNoise template implementation *
******************************************************************************/
template <typename FImpl>
DilutedNoise<FImpl>::DilutedNoise(GridCartesian *g)
: grid_(g)
{}
template <typename FImpl>
DilutedNoise<FImpl>::DilutedNoise(GridCartesian *g,
const unsigned int nNoise)
: DilutedNoise(g)
{
resize(nNoise);
}
template <typename FImpl>
std::vector<typename DilutedNoise<FImpl>::FermionField> & DilutedNoise<FImpl>::
getNoise(void)
{
return noise_;
}
template <typename FImpl>
const std::vector<typename DilutedNoise<FImpl>::FermionField> & DilutedNoise<FImpl>::
getNoise(void) const
{
return noise_;
}
template <typename FImpl>
const typename DilutedNoise<FImpl>::FermionField &
DilutedNoise<FImpl>::operator[](const unsigned int i) const
{
return noise_[i];
}
template <typename FImpl>
typename DilutedNoise<FImpl>::FermionField &
DilutedNoise<FImpl>::operator[](const unsigned int i)
{
return noise_[i];
}
template <typename FImpl>
void DilutedNoise<FImpl>::resize(const unsigned int nNoise)
{
nNoise_ = nNoise;
noise_.resize(nNoise, grid_);
}
template <typename FImpl>
unsigned int DilutedNoise<FImpl>::size(void) const
{
return noise_.size();
}
template <typename FImpl>
GridCartesian * DilutedNoise<FImpl>::getGrid(void) const
{
return grid_;
}
/******************************************************************************
* TimeDilutedSpinColorDiagonalNoise template implementation *
******************************************************************************/
template <typename FImpl>
TimeDilutedSpinColorDiagonalNoise<FImpl>::
TimeDilutedSpinColorDiagonalNoise(GridCartesian *g)
: DilutedNoise<FImpl>(g)
{
nt_ = this->getGrid()->GlobalDimensions().back();
this->resize(nt_*Ns*FImpl::Dimension);
}
template <typename FImpl>
void TimeDilutedSpinColorDiagonalNoise<FImpl>::generateNoise(GridParallelRNG &rng)
{
typedef decltype(peekColour((*this)[0], 0)) SpinField;
auto &noise = *this;
auto g = this->getGrid();
auto nd = g->GlobalDimensions().size();
auto nc = FImpl::Dimension;
Complex shift(1., 1.);
Lattice<iScalar<vInteger>> tLat(g);
LatticeComplex eta(g), etaCut(g);
SpinField etas(g);
unsigned int i = 0;
LatticeCoordinate(tLat, nd - 1);
bernoulli(rng, eta);
eta = (2.*eta - shift)*(1./::sqrt(2.));
for (unsigned int t = 0; t < nt_; ++t)
{
etaCut = where((tLat == t), eta, 0.*eta);
for (unsigned int s = 0; s < Ns; ++s)
{
etas = zero;
pokeSpin(etas, etaCut, s);
for (unsigned int c = 0; c < nc; ++c)
{
noise[i] = zero;
pokeColour(noise[i], etas, c);
i++;
}
}
}
}
END_HADRONS_NAMESPACE
#endif // Hadrons_DilutedNoise_hpp_

View File

@ -1,300 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/DiskVector.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_DiskVector_hpp_
#define Hadrons_DiskVector_hpp_
#include <Hadrons/Global.hpp>
#include <deque>
#include <sys/stat.h>
#include <ftw.h>
#include <unistd.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Abstract base class *
******************************************************************************/
template <typename T>
class DiskVectorBase
{
public:
typedef T ObjectType;
// helper for read/write vector access
class RwAccessHelper
{
public:
RwAccessHelper(DiskVectorBase<T> &master, const unsigned int i)
: master_(master), cmaster_(master), i_(i) {}
// operator=: somebody is trying to store a vector element
// write to disk and cache
T &operator=(const T &obj) const
{
#ifdef DV_DEBUG
LOG(Debug) << "diskvector " << &master_ << ": writing to " << i_ << std::endl;
#endif
master_.cacheInsert(i_, obj);
master_.save(master_.filename(i_), obj);
return master_.cachePtr_->at(i_);
}
// implicit cast to const object reference and redirection
// to the const operator[] for read-only operations
operator const T&() const
{
return cmaster_[i_];
}
private:
DiskVectorBase<T> &master_;
const DiskVectorBase<T> &cmaster_;
const unsigned int i_;
};
public:
DiskVectorBase(const std::string dirname, const unsigned int size = 0,
const unsigned int cacheSize = 1, const bool clean = true);
virtual ~DiskVectorBase(void);
const T & operator[](const unsigned int i) const;
RwAccessHelper operator[](const unsigned int i);
private:
virtual void load(T &obj, const std::string filename) const = 0;
virtual void save(const std::string filename, const T &obj) const = 0;
virtual std::string filename(const unsigned int i) const;
void evict(void) const;
void fetch(const unsigned int i) const;
void cacheInsert(const unsigned int i, const T &obj) const;
void clean(void);
private:
std::string dirname_;
unsigned int size_, cacheSize_;
bool clean_;
// using pointers to allow modifications when class is const
// semantic: const means data unmodified, but cache modification allowed
std::unique_ptr<std::map<unsigned int, T>> cachePtr_;
std::unique_ptr<std::deque<unsigned int>> loadsPtr_;
};
/******************************************************************************
* Specialisation for serialisable classes *
******************************************************************************/
template <typename T, typename Reader, typename Writer>
class SerializableDiskVector: public DiskVectorBase<T>
{
public:
using DiskVectorBase<T>::DiskVectorBase;
private:
virtual void load(T &obj, const std::string filename) const
{
Reader reader(filename);
read(reader, basename(filename), obj);
}
virtual void save(const std::string filename, const T &obj) const
{
Writer writer(filename);
write(writer, basename(filename), obj);
}
};
/******************************************************************************
* DiskVectorBase implementation *
******************************************************************************/
#ifdef DV_DEBUG
#define DV_DEBUG_MSG(stream) LOG(Debug) << "diskvector " << this << ": " << stream << std::endl
#endif
template <typename T>
DiskVectorBase<T>::DiskVectorBase(const std::string dirname,
const unsigned int size,
const unsigned int cacheSize,
const bool clean)
: dirname_(dirname), size_(size), cacheSize_(cacheSize), clean_(clean)
, cachePtr_(new std::map<unsigned int, T>())
, loadsPtr_(new std::deque<unsigned int>())
{
struct stat s;
if(stat(dirname.c_str(), &s) == 0)
{
HADRONS_ERROR(Io, "directory '" + dirname + "' already exists")
}
mkdir(dirname);
}
template <typename T>
DiskVectorBase<T>::~DiskVectorBase(void)
{
if (clean_)
{
clean();
}
}
template <typename T>
const T & DiskVectorBase<T>::operator[](const unsigned int i) const
{
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
DV_DEBUG_MSG("accessing " << i << " (RO)");
if (i >= size_)
{
HADRONS_ERROR(Size, "index out of range");
}
if (cache.find(i) == cache.end())
{
// cache miss
DV_DEBUG_MSG("cache miss");
fetch(i);
}
else
{
DV_DEBUG_MSG("cache hit");
auto pos = std::find(loads.begin(), loads.end(), i);
loads.erase(pos);
loads.push_back(i);
}
#ifdef DV_DEBUG
std::string msg;
for (auto &p: loads)
{
msg += std::to_string(p) + " ";
}
DV_DEBUG_MSG("in cache: " << msg);
#endif
return cache.at(i);
}
template <typename T>
typename DiskVectorBase<T>::RwAccessHelper DiskVectorBase<T>::operator[](const unsigned int i)
{
DV_DEBUG_MSG("accessing " << i << " (RW)");
if (i >= size_)
{
HADRONS_ERROR(Size, "index out of range");
}
return RwAccessHelper(*this, i);
}
template <typename T>
std::string DiskVectorBase<T>::filename(const unsigned int i) const
{
return dirname_ + "/elem_" + std::to_string(i);
}
template <typename T>
void DiskVectorBase<T>::evict(void) const
{
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
if (cache.size() >= cacheSize_)
{
DV_DEBUG_MSG("evicting " << loads.front());
cache.erase(loads.front());
loads.pop_front();
}
}
template <typename T>
void DiskVectorBase<T>::fetch(const unsigned int i) const
{
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
struct stat s;
DV_DEBUG_MSG("loading " << i << " from disk");
evict();
if(stat(filename(i).c_str(), &s) != 0)
{
HADRONS_ERROR(Io, "disk vector element " + std::to_string(i) + " uninitialised");
}
load(cache[i], filename(i));
loads.push_back(i);
}
template <typename T>
void DiskVectorBase<T>::cacheInsert(const unsigned int i, const T &obj) const
{
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
evict();
cache[i] = obj;
loads.push_back(i);
#ifdef DV_DEBUG
std::string msg;
for (auto &p: loads)
{
msg += std::to_string(p) + " ";
}
DV_DEBUG_MSG("in cache: " << msg);
#endif
}
#ifdef DV_DEBUG
#undef DV_DEBUG_MSG
#endif
template <typename T>
void DiskVectorBase<T>::clean(void)
{
auto unlink = [](const char *fpath, const struct stat *sb,
int typeflag, struct FTW *ftwbuf)
{
int rv = remove(fpath);
if (rv)
{
HADRONS_ERROR(Io, "cannot remove '" + std::string(fpath) + "': "
+ std::string(std::strerror(errno)));
}
return rv;
};
nftw(dirname_.c_str(), unlink, 64, FTW_DEPTH | FTW_PHYS);
}
END_HADRONS_NAMESPACE
#endif // Hadrons_DiskVector_hpp_

View File

@ -1,190 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Module.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
/******************************************************************************
* ModuleBase implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
ModuleBase::ModuleBase(const std::string name)
: name_(name)
{}
// access //////////////////////////////////////////////////////////////////////
std::string ModuleBase::getName(void) const
{
return name_;
}
// get factory registration name if available
std::string ModuleBase::getRegisteredName(void)
{
HADRONS_ERROR(Definition, "module '" + getName() + "' has no registered type"
+ " in the factory");
}
// execution ///////////////////////////////////////////////////////////////////
void ModuleBase::operator()(void)
{
resetTimers();
startTimer("_total");
startTimer("_setup");
setup();
stopTimer("_setup");
startTimer("_execute");
execute();
stopAllTimers();
}
// timers //////////////////////////////////////////////////////////////////////
void ModuleBase::startTimer(const std::string &name)
{
if (!name.empty())
{
timer_[name].Start();
}
}
GridTime ModuleBase::getTimer(const std::string &name)
{
GridTime t;
if (!name.empty())
{
try
{
bool running = timer_.at(name).isRunning();
if (running) stopTimer(name);
t = timer_.at(name).Elapsed();
if (running) startTimer(name);
}
catch (std::out_of_range &)
{
t = GridTime::zero();
}
}
else
{
t = GridTime::zero();
}
return t;
}
double ModuleBase::getDTimer(const std::string &name)
{
return static_cast<double>(getTimer(name).count());
}
void ModuleBase::startCurrentTimer(const std::string &name)
{
if (!name.empty())
{
stopCurrentTimer();
startTimer(name);
currentTimer_ = name;
}
}
void ModuleBase::stopTimer(const std::string &name)
{
if (timer_.at(name).isRunning())
{
timer_.at(name).Stop();
}
}
void ModuleBase::stopCurrentTimer(void)
{
if (!currentTimer_.empty())
{
stopTimer(currentTimer_);
currentTimer_ = "";
}
}
void ModuleBase::stopAllTimers(void)
{
for (auto &t: timer_)
{
stopTimer(t.first);
}
currentTimer_ = "";
}
void ModuleBase::resetTimers(void)
{
timer_.clear();
currentTimer_ = "";
}
std::map<std::string, GridTime> ModuleBase::getTimings(void)
{
std::map<std::string, GridTime> timing;
for (auto &t: timer_)
{
timing[t.first] = t.second.Elapsed();
}
return timing;
}
std::string ModuleBase::makeSeedString(void)
{
std::string seed;
if (!vm().getRunId().empty())
{
seed += vm().getRunId() + "-";
}
seed += getName() + "-" + std::to_string(vm().getTrajectory());
return seed;
}
GridParallelRNG & ModuleBase::rng4d(void)
{
auto &r = *env().get4dRng();
if (makeSeedString() != seed_)
{
seed_ = makeSeedString();
LOG(Message) << "Seeding 4D RNG " << &r << " with string '"
<< seed_ << "'" << std::endl;
r.SeedUniqueString(seed_);
}
return r;
}

View File

@ -1,66 +0,0 @@
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/WardIdentity.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
#include <Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MSink/Smear.hpp>
#include <Hadrons/Modules/MSink/Point.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqGamma.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqConserved.hpp>
#include <Hadrons/Modules/MLoop/NoiseLoop.hpp>
#include <Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Hadrons/Modules/MScalar/VPCounterTerms.hpp>
#include <Hadrons/Modules/MScalar/ScalarVP.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/ShiftProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Hadrons/Modules/MScalarSUN/TrPhi.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/MobiusDWF.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TMobiusDWF<FIMPL>;
template class Grid::Hadrons::MAction::TMobiusDWF<FIMPLF>;

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ScaledDWF.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TScaledDWF<FIMPL>;
template class Grid::Hadrons::MAction::TScaledDWF<FIMPLF>;

View File

@ -1,445 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AMesonField.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MContraction_A2AMesonField_hpp_
#define Hadrons_MContraction_A2AMesonField_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/A2AVectors.hpp>
#include <Hadrons/A2AMatrix.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp>
#define MF_PARALLEL_IO
#ifndef MF_IO_TYPE
#define MF_IO_TYPE ComplexF
#endif
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* All-to-all meson field creation *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
class A2AMesonFieldPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonFieldPar,
int, cacheBlock,
int, block,
std::string, v,
std::string, w,
std::string, output,
std::string, gammas,
std::vector<std::string>, mom);
};
class A2AMesonFieldMetadata: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonFieldMetadata,
std::vector<RealF>, momentum,
Gamma::Algebra, gamma);
};
template <typename FImpl>
class TA2AMesonField : public Module<A2AMesonFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
SOLVER_TYPE_ALIASES(FImpl,);
typedef Eigen::TensorMap<Eigen::Tensor<Complex, 5, Eigen::RowMajor>> MesonField;
typedef Eigen::TensorMap<Eigen::Tensor<MF_IO_TYPE, 5, Eigen::RowMajor>> MesonFieldIo;
typedef A2AMatrixIo<MF_IO_TYPE, A2AMesonFieldMetadata> MatrixIo;
struct IoHelper
{
MatrixIo io;
A2AMesonFieldMetadata metadata;
size_t offset;
unsigned int i, j, blockSizei, blockSizej;
};
public:
// constructor
TA2AMesonField(const std::string name);
// destructor
virtual ~TA2AMesonField(void){};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
private:
// IO
std::string ioname(const unsigned int m, const unsigned int g) const;
std::string filename(const unsigned int m, const unsigned int g) const;
void saveBlock(const MF_IO_TYPE *data, IoHelper &h);
private:
bool hasPhase_{false};
std::string momphName_;
std::vector<Gamma::Algebra> gamma_;
std::vector<std::vector<Real>> mom_;
std::vector<IoHelper> nodeIo_;
};
MODULE_REGISTER(A2AMesonField, ARG(TA2AMesonField<FIMPL>), MContraction);
MODULE_REGISTER(ZA2AMesonField, ARG(TA2AMesonField<ZFIMPL>), MContraction);
/******************************************************************************
* TA2AMesonField implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TA2AMesonField<FImpl>::TA2AMesonField(const std::string name)
: Module<A2AMesonFieldPar>(name)
, momphName_(name + "_momph")
{
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TA2AMesonField<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().v, par().w};
return in;
}
template <typename FImpl>
std::vector<std::string> TA2AMesonField<FImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMesonField<FImpl>::setup(void)
{
gamma_.clear();
mom_.clear();
if (par().gammas == "all")
{
gamma_ = {
Gamma::Algebra::Gamma5,
Gamma::Algebra::Identity,
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaXGamma5,
Gamma::Algebra::GammaYGamma5,
Gamma::Algebra::GammaZGamma5,
Gamma::Algebra::GammaTGamma5,
Gamma::Algebra::SigmaXY,
Gamma::Algebra::SigmaXZ,
Gamma::Algebra::SigmaXT,
Gamma::Algebra::SigmaYZ,
Gamma::Algebra::SigmaYT,
Gamma::Algebra::SigmaZT
};
}
else
{
gamma_ = strToVec<Gamma::Algebra>(par().gammas);
}
for (auto &pstr: par().mom)
{
auto p = strToVec<Real>(pstr);
if (p.size() != env().getNd() - 1)
{
HADRONS_ERROR(Size, "Momentum has " + std::to_string(p.size())
+ " components instead of "
+ std::to_string(env().getNd() - 1));
}
mom_.push_back(p);
}
envCache(std::vector<ComplexField>, momphName_, 1,
par().mom.size(), envGetGrid(ComplexField));
envTmpLat(ComplexField, "coor");
// preallocate memory for meson field block
auto tgp = env().getDim().back()*gamma_.size()*mom_.size();
envTmp(Vector<MF_IO_TYPE>, "mfBuf", 1, tgp*par().block*par().block);
envTmp(Vector<Complex>, "mfCache", 1, tgp*par().cacheBlock*par().cacheBlock);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMesonField<FImpl>::execute(void)
{
auto &v = envGet(std::vector<FermionField>, par().v);
auto &w = envGet(std::vector<FermionField>, par().w);
int nt = env().getDim().back();
int N_i = w.size();
int N_j = v.size();
int ngamma = gamma_.size();
int nmom = mom_.size();
int block = par().block;
int cacheBlock = par().cacheBlock;
LOG(Message) << "Computing all-to-all meson fields" << std::endl;
LOG(Message) << "W: '" << par().w << "' V: '" << par().v << "'" << std::endl;
LOG(Message) << "Momenta:" << std::endl;
for (auto &p: mom_)
{
LOG(Message) << " " << p << std::endl;
}
LOG(Message) << "Spin bilinears:" << std::endl;
for (auto &g: gamma_)
{
LOG(Message) << " " << g << std::endl;
}
LOG(Message) << "Meson field size: " << nt << "*" << N_i << "*" << N_j
<< " (filesize " << sizeString(nt*N_i*N_j*sizeof(MF_IO_TYPE))
<< "/momentum/bilinear)" << std::endl;
///////////////////////////////////////////////
// Momentum setup
///////////////////////////////////////////////
auto &ph = envGet(std::vector<ComplexField>, momphName_);
if (!hasPhase_)
{
startTimer("Momentum phases");
for (unsigned int j = 0; j < nmom; ++j)
{
Complex i(0.0,1.0);
std::vector<Real> p;
envGetTmp(ComplexField, coor);
ph[j] = zero;
for(unsigned int mu = 0; mu < mom_[j].size(); mu++)
{
LatticeCoordinate(coor, mu);
ph[j] = ph[j] + (mom_[j][mu]/env().getDim(mu))*coor;
}
ph[j] = exp((Real)(2*M_PI)*i*ph[j]);
}
hasPhase_ = true;
stopTimer("Momentum phases");
}
//////////////////////////////////////////////////////////////////////////
// i,j is first loop over SchurBlock factors reusing 5D matrices
// ii,jj is second loop over cacheBlock factors for high perf contractoin
// iii,jjj are loops within cacheBlock
// Total index is sum of these i+ii+iii etc...
//////////////////////////////////////////////////////////////////////////
double flops;
double bytes;
double vol = env().getVolume();
double t_kernel = 0.0;
double nodes = env().getGrid()->NodeCount();
double tot_kernel;
envGetTmp(Vector<MF_IO_TYPE>, mfBuf);
envGetTmp(Vector<Complex>, mfCache);
double t0 = usecond();
int NBlock_i = N_i/block + (((N_i % block) != 0) ? 1 : 0);
int NBlock_j = N_j/block + (((N_j % block) != 0) ? 1 : 0);
for(int i=0;i<N_i;i+=block)
for(int j=0;j<N_j;j+=block)
{
// Get the W and V vectors for this block^2 set of terms
int N_ii = MIN(N_i-i,block);
int N_jj = MIN(N_j-j,block);
LOG(Message) << "Meson field block "
<< j/block + NBlock_j*i/block + 1
<< "/" << NBlock_i*NBlock_j << " [" << i <<" .. "
<< i+N_ii-1 << ", " << j <<" .. " << j+N_jj-1 << "]"
<< std::endl;
MesonFieldIo mfBlock(mfBuf.data(),nmom,ngamma,nt,N_ii,N_jj);
// Series of cache blocked chunks of the contractions within this block
flops = 0.0;
bytes = 0.0;
for(int ii=0;ii<N_ii;ii+=cacheBlock)
for(int jj=0;jj<N_jj;jj+=cacheBlock)
{
int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock);
MesonField mfCacheBlock(mfCache.data(),nmom,ngamma,nt,N_iii,N_jjj);
startTimer("contraction: total");
makeMesonFieldBlock(mfCacheBlock, &w[i+ii], &v[j+jj], gamma_, ph,
env().getNd() - 1, this);
stopTimer("contraction: total");
// flops for general N_c & N_s
flops += vol * ( 2 * 8.0 + 6.0 + 8.0*nmom) * N_iii*N_jjj*ngamma;
bytes += vol * (12.0 * sizeof(Complex) ) * N_iii*N_jjj
+ vol * ( 2.0 * sizeof(Complex) *nmom ) * N_iii*N_jjj* ngamma;
startTimer("cache copy");
parallel_for_nest5(int m =0;m< nmom;m++)
for(int g =0;g< ngamma;g++)
for(int t =0;t< nt;t++)
for(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
{
mfBlock(m,g,t,ii+iii,jj+jjj) = mfCacheBlock(m,g,t,iii,jjj);
}
stopTimer("cache copy");
}
// perf
tot_kernel = getDTimer("contraction: colour trace & mom.")
+ getDTimer("contraction: local space sum");
t_kernel = tot_kernel - t_kernel;
LOG(Message) << "Kernel perf " << flops/t_kernel/1.0e3/nodes
<< " Gflop/s/node " << std::endl;
LOG(Message) << "Kernel perf " << bytes/t_kernel*1.0e6/1024/1024/1024/nodes
<< " GB/s/node " << std::endl;
t_kernel = tot_kernel;
// IO
if (!par().output.empty())
{
double blockSize, ioTime;
unsigned int myRank = env().getGrid()->ThisRank(),
nRank = env().getGrid()->RankCount();
LOG(Message) << "Writing block to disk" << std::endl;
ioTime = -getDTimer("IO: write block");
startTimer("IO: total");
makeFileDir(filename(0, 0), env().getGrid());
#ifdef MF_PARALLEL_IO
env().getGrid()->Barrier();
nodeIo_.clear();
for(int f = myRank; f < nmom*ngamma; f += nRank)
{
const unsigned int m = f/ngamma, g = f % ngamma;
IoHelper h;
h.io = MatrixIo(filename(m, g), ioname(m, g), nt, N_i, N_j);
for (auto pmu: mom_[m])
{
h.metadata.momentum.push_back(pmu);
}
h.metadata.gamma = gamma_[g];
h.i = i;
h.j = j;
h.blockSizei = mfBlock.dimension(3);
h.blockSizej = mfBlock.dimension(4);
h.offset = (m*ngamma + g)*nt*h.blockSizei*h.blockSizej;
nodeIo_.push_back(h);
}
// parallel IO
for (auto &h: nodeIo_)
{
saveBlock(mfBlock.data(), h);
}
env().getGrid()->Barrier();
#else
// serial IO
for(int m = 0; m < nmom; m++)
for(int g = 0; g < ngamma; g++)
{
IoHelper h;
h.io = MatrixIo(filename(m, g), ioname(m, g), nt, N_i, N_j);
for (auto pmu: mom_[m])
{
h.metadata.momentum.push_back(pmu);
}
h.metadata.gamma = gamma_[g];
h.i = i;
h.j = j;
h.blockSizei = mfBlock.dimension(3);
h.blockSizej = mfBlock.dimension(4);
h.offset = (m*ngamma + g)*nt*h.blockSizei*h.blockSizej;
saveBlock(mfBlock.data(), h);
}
#endif
stopTimer("IO: total");
blockSize = static_cast<double>(nmom*ngamma*nt*N_ii*N_jj*sizeof(MF_IO_TYPE));
ioTime += getDTimer("IO: write block");
LOG(Message) << "HDF5 IO done " << sizeString(blockSize) << " in "
<< ioTime << " us ("
<< blockSize/ioTime*1.0e6/1024/1024
<< " MB/s)" << std::endl;
}
}
}
// IO
template <typename FImpl>
std::string TA2AMesonField<FImpl>::ioname(unsigned int m, unsigned int g) const
{
std::stringstream ss;
ss << gamma_[g] << "_";
for (unsigned int mu = 0; mu < mom_[m].size(); ++mu)
{
ss << mom_[m][mu] << ((mu == mom_[m].size() - 1) ? "" : "_");
}
return ss.str();
}
template <typename FImpl>
std::string TA2AMesonField<FImpl>::filename(unsigned int m, unsigned int g) const
{
return par().output + "." + std::to_string(vm().getTrajectory())
+ "/" + ioname(m, g) + ".h5";
}
template <typename FImpl>
void TA2AMesonField<FImpl>::saveBlock(const MF_IO_TYPE *data, IoHelper &h)
{
if ((h.i == 0) and (h.j == 0))
{
startTimer("IO: file creation");
h.io.initFile(h.metadata, par().block);
stopTimer("IO: file creation");
}
startTimer("IO: write block");
h.io.saveBlock(data + h.offset, h.i, h.j, h.blockSizei, h.blockSizej);
stopTimer("IO: write block");
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_A2AMesonField_hpp_

View File

@ -1,224 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MContraction_A2AMesonFieldKernels_hpp_
#define Hadrons_MContraction_A2AMesonFieldKernels_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MContraction)
////////////////////////////////////////////////////////////////////////////////
// Cache blocked arithmetic routine
// Could move to Grid ???
////////////////////////////////////////////////////////////////////////////////
template <typename Field, typename MesonField>
void makeMesonFieldBlock(MesonField &mat,
const Field *lhs_wi,
const Field *rhs_vj,
std::vector<Gamma::Algebra> gamma,
const std::vector<LatticeComplex> &mom,
int orthogdim,
ModuleBase *caller = nullptr)
{
typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef iSpinMatrix<vector_type> SpinMatrix_v;
typedef iSpinMatrix<scalar_type> SpinMatrix_s;
int Lblock = mat.dimension(3);
int Rblock = mat.dimension(4);
GridBase *grid = lhs_wi[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
int Ngamma = gamma.size();
int Nmom = mom.size();
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
int MFrvol = rd*Lblock*Rblock*Nmom;
int MFlvol = ld*Lblock*Rblock*Nmom;
Vector<SpinMatrix_v > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++)
{
lvSum[r] = zero;
}
Vector<SpinMatrix_s > lsSum(MFlvol);
parallel_for (int r = 0; r < MFlvol; r++)
{
lsSum[r]=scalar_type(0.0);
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
if (caller) caller->startTimer("contraction: colour trace & mom.");
// Nested parallelism would be ok
// Wasting cores here. Test case r
parallel_for(int r=0;r<rd;r++)
{
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++)
for(int b=0;b<e2;b++)
{
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++)
{
auto left = conjugate(lhs_wi[i]._odata[ss]);
for(int j=0;j<Rblock;j++)
{
SpinMatrix_v vv;
auto right = rhs_vj[j]._odata[ss];
for(int s1=0;s1<Ns;s1++)
for(int s2=0;s2<Ns;s2++)
{
vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
+ left()(s2)(1) * right()(s1)(1)
+ left()(s2)(2) * right()(s1)(2);
}
// After getting the sitewise product do the mom phase loop
int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r;
for ( int m=0;m<Nmom;m++)
{
int idx = m+base;
auto phase = mom[m]._odata[ss];
mac(&lvSum[idx],&vv,&phase);
}
}
}
}
}
if (caller) caller->stopTimer("contraction: colour trace & mom.");
// Sum across simd lanes in the plane, breaking out orthog dir.
if (caller) caller->startTimer("contraction: local space sum");
parallel_for(int rt=0;rt<rd;rt++)
{
std::vector<int> icoor(Nd);
std::vector<SpinMatrix_s> extracted(Nsimd);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt;
extract(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++)
{
grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
}
}
}
if (caller) caller->stopTimer("contraction: local space sum");
// ld loop and local only??
if (caller) caller->startTimer("contraction: spin trace");
int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++)
{
for(int pt=0;pt<pd;pt++)
{
int t = lt + pt*ld;
if (pt == pc)
{
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt;
for(int mu=0;mu<Ngamma;mu++)
{
// this is a bit slow
mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gamma[mu]));
}
}
}
else
{
const scalar_type zz(0.0);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int mu=0;mu<Ngamma;mu++)
for(int m=0;m<Nmom;m++)
{
mat(m,mu,t,i,j) =zz;
}
}
}
}
if (caller) caller->stopTimer("contraction: spin trace");
////////////////////////////////////////////////////////////////////
// This global sum is taking as much as 50% of time on 16 nodes
// Vector size is 7 x 16 x 32 x 16 x 16 x sizeof(complex) = 2MB - 60MB depending on volume
// Healthy size that should suffice
////////////////////////////////////////////////////////////////////
if (caller) caller->startTimer("contraction: global sum");
grid->GlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock);
if (caller) caller->stopTimer("contraction: global sum");
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif //Hadrons_MContraction_A2AMesonField_hpp_

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/Random.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MGauge/Random.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MGauge;
template class Grid::Hadrons::MGauge::TRandom<GIMPL>;

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/StoutSmearing.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MGauge;
template class Grid::Hadrons::MGauge::TStoutSmearing<GIMPL>;

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/Unit.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MGauge/Unit.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MGauge;
template class Grid::Hadrons::MGauge::TUnit<GIMPL>;

View File

@ -1,38 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MIO/LoadCosmHol.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MIO;
template class Grid::Hadrons::MIO::TLoadCosmHol<ScalarNxNAdjImplR<2>>;
template class Grid::Hadrons::MIO::TLoadCosmHol<ScalarNxNAdjImplR<3>>;
template class Grid::Hadrons::MIO::TLoadCosmHol<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MIO::TLoadCosmHol<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MIO::TLoadCosmHol<ScalarNxNAdjImplR<6>>;

View File

@ -1,146 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MIO/LoadCosmHol.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MIO_LoadCosmHol_hpp_
#define Hadrons_MIO_LoadCosmHol_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Load scalar SU(N) configurations *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MIO)
class LoadCosmHolPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(LoadCosmHolPar,
std::string, file);
};
class ScalarActionParameters: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(ScalarActionParameters,
double, mass_squared,
double, lambda,
double, g);
};
template <typename SImpl>
class TLoadCosmHol: public Module<LoadCosmHolPar>
{
public:
typedef typename SImpl::Field Field;
public:
// constructor
TLoadCosmHol(const std::string name);
// destructor
virtual ~TLoadCosmHol(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(LoadCosmHolSU2, TLoadCosmHol<ScalarNxNAdjImplR<2>>, MIO);
MODULE_REGISTER_TMP(LoadCosmHolSU3, TLoadCosmHol<ScalarNxNAdjImplR<3>>, MIO);
MODULE_REGISTER_TMP(LoadCosmHolSU4, TLoadCosmHol<ScalarNxNAdjImplR<4>>, MIO);
MODULE_REGISTER_TMP(LoadCosmHolSU5, TLoadCosmHol<ScalarNxNAdjImplR<5>>, MIO);
MODULE_REGISTER_TMP(LoadCosmHolSU6, TLoadCosmHol<ScalarNxNAdjImplR<6>>, MIO);
/******************************************************************************
* TLoadCosmHol implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename SImpl>
TLoadCosmHol<SImpl>::TLoadCosmHol(const std::string name)
: Module<LoadCosmHolPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename SImpl>
std::vector<std::string> TLoadCosmHol<SImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename SImpl>
std::vector<std::string> TLoadCosmHol<SImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename SImpl>
void TLoadCosmHol<SImpl>::setup(void)
{
envCreateLat(Field, getName());
}
// execution ///////////////////////////////////////////////////////////////////
template <typename SImpl>
void TLoadCosmHol<SImpl>::execute(void)
{
ScalarActionParameters md;
std::string filename = par().file + "."
+ std::to_string(vm().getTrajectory());
ScidacReader reader;
const unsigned int N = SImpl::Group::Dimension;
auto &phi = envGet(Field, getName());
LOG(Message) << "Loading CosmHol configuration from file '" << filename
<< "'" << std::endl;
reader.open(filename);
reader.readScidacFieldRecord(phi, md);
reader.close();
LOG(Message) << "tr(phi^2) = "
<< -TensorRemove(sum(trace(phi*phi))).real()/env().getVolume()
<< std::endl;
LOG(Message) << "Configuration parameters:" << std::endl;
LOG(Message) << " N = " << N << std::endl;
LOG(Message) << " m^2 = " << md.mass_squared << std::endl;
LOG(Message) << "lambda = " << md.lambda << std::endl;
LOG(Message) << " g = " << md.g << std::endl;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MIO_LoadCosmHol_hpp_

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MIO/LoadNersc.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MIO;
template class Grid::Hadrons::MIO::TLoadNersc<GIMPL>;

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MNoise;
template class Grid::Hadrons::MNoise::TTimeDilutedSpinColorDiagonal<FIMPL>;
template class Grid::Hadrons::MNoise::TTimeDilutedSpinColorDiagonal<ZFIMPL>;

View File

@ -1,114 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MNoise_TimeDilutedSpinColorDiagonal_hpp_
#define Hadrons_MNoise_TimeDilutedSpinColorDiagonal_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/DilutedNoise.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Generate time diluted spin-color diagonal noise *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MNoise)
template <typename FImpl>
class TTimeDilutedSpinColorDiagonal: public Module<NoPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
TTimeDilutedSpinColorDiagonal(const std::string name);
// destructor
virtual ~TTimeDilutedSpinColorDiagonal(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(TimeDilutedSpinColorDiagonal, TTimeDilutedSpinColorDiagonal<FIMPL>, MNoise);
MODULE_REGISTER_TMP(ZTimeDilutedSpinColorDiagonal, TTimeDilutedSpinColorDiagonal<ZFIMPL>, MNoise);
/******************************************************************************
* TTimeDilutedSpinColorDiagonal implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TTimeDilutedSpinColorDiagonal<FImpl>::TTimeDilutedSpinColorDiagonal(const std::string name)
: Module<NoPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TTimeDilutedSpinColorDiagonal<FImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImpl>
std::vector<std::string> TTimeDilutedSpinColorDiagonal<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TTimeDilutedSpinColorDiagonal<FImpl>::setup(void)
{
envCreateDerived(DilutedNoise<FImpl>,
TimeDilutedSpinColorDiagonalNoise<FImpl>,
getName(), 1, envGetGrid(FermionField));
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TTimeDilutedSpinColorDiagonal<FImpl>::execute(void)
{
auto &noise = envGet(DilutedNoise<FImpl>, getName());
LOG(Message) << "Generating time-diluted, spin-color diagonal noise" << std::endl;
noise.generateNoise(rng4d());
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MNoise_TimeDilutedSpinColorDiagonal_hpp_

View File

@ -1,38 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalarSUN/StochFreeField.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MScalarSUN;
template class Grid::Hadrons::MScalarSUN::TStochFreeField<ScalarNxNAdjImplR<2>>;
template class Grid::Hadrons::MScalarSUN::TStochFreeField<ScalarNxNAdjImplR<3>>;
template class Grid::Hadrons::MScalarSUN::TStochFreeField<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MScalarSUN::TStochFreeField<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MScalarSUN::TStochFreeField<ScalarNxNAdjImplR<6>>;

View File

@ -1,38 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalarSUN/TimeMomProbe.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MScalarSUN;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<2>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<3>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<6>>;

View File

@ -1,38 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalarSUN/TwoPointNPR.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MScalarSUN;
template class Grid::Hadrons::MScalarSUN::TTwoPointNPR<ScalarNxNAdjImplR<2>>;
template class Grid::Hadrons::MScalarSUN::TTwoPointNPR<ScalarNxNAdjImplR<3>>;
template class Grid::Hadrons::MScalarSUN::TTwoPointNPR<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MScalarSUN::TTwoPointNPR<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MScalarSUN::TTwoPointNPR<ScalarNxNAdjImplR<6>>;

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSolver/A2AVectors.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: fionnoh <fionnoh@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MSolver;
template class Grid::Hadrons::MSolver::TA2AVectors<FIMPL, FermionEigenPack<FIMPL>>;
template class Grid::Hadrons::MSolver::TA2AVectors<ZFIMPL, FermionEigenPack<ZFIMPL>>;

View File

@ -1,245 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSolver/A2AVectors.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: fionnoh <fionnoh@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MSolver_A2AVectors_hpp_
#define Hadrons_MSolver_A2AVectors_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/Solver.hpp>
#include <Hadrons/EigenPack.hpp>
#include <Hadrons/A2AVectors.hpp>
#include <Hadrons/DilutedNoise.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Create all-to-all V & W vectors *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MSolver)
class A2AVectorsPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AVectorsPar,
std::string, noise,
std::string, action,
std::string, eigenPack,
std::string, solver);
};
template <typename FImpl, typename Pack>
class TA2AVectors : public Module<A2AVectorsPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
SOLVER_TYPE_ALIASES(FImpl,);
typedef HADRONS_DEFAULT_SCHUR_A2A<FImpl> A2A;
public:
// constructor
TA2AVectors(const std::string name);
// destructor
virtual ~TA2AVectors(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
private:
std::string solverName_;
unsigned int Nl_{0};
};
MODULE_REGISTER_TMP(A2AVectors,
ARG(TA2AVectors<FIMPL, FermionEigenPack<FIMPL>>), MSolver);
MODULE_REGISTER_TMP(ZA2AVectors,
ARG(TA2AVectors<ZFIMPL, FermionEigenPack<ZFIMPL>>), MSolver);
/******************************************************************************
* TA2AVectors implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl, typename Pack>
TA2AVectors<FImpl, Pack>::TA2AVectors(const std::string name)
: Module<A2AVectorsPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl, typename Pack>
std::vector<std::string> TA2AVectors<FImpl, Pack>::getInput(void)
{
std::string sub_string;
std::vector<std::string> in;
if (!par().eigenPack.empty())
{
in.push_back(par().eigenPack);
sub_string = (!par().eigenPack.empty()) ? "_subtract" : "";
}
in.push_back(par().solver + sub_string);
in.push_back(par().noise);
return in;
}
template <typename FImpl, typename Pack>
std::vector<std::string> TA2AVectors<FImpl, Pack>::getOutput(void)
{
std::vector<std::string> out = {getName() + "_v", getName() + "_w"};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl, typename Pack>
void TA2AVectors<FImpl, Pack>::setup(void)
{
bool hasLowModes = (!par().eigenPack.empty());
std::string sub_string = (hasLowModes) ? "_subtract" : "";
auto &noise = envGet(DilutedNoise<FImpl>, par().noise);
auto &action = envGet(FMat, par().action);
auto &solver = envGet(Solver, par().solver + sub_string);
int Ls = env().getObjectLs(par().action);
if (hasLowModes)
{
auto &epack = envGet(Pack, par().eigenPack);
Nl_ = epack.evec.size();
}
envCreate(std::vector<FermionField>, getName() + "_v", 1,
Nl_ + noise.size(), envGetGrid(FermionField));
envCreate(std::vector<FermionField>, getName() + "_w", 1,
Nl_ + noise.size(), envGetGrid(FermionField));
if (Ls > 1)
{
envTmpLat(FermionField, "f5", Ls);
}
envTmp(A2A, "a2a", 1, action, solver);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl, typename Pack>
void TA2AVectors<FImpl, Pack>::execute(void)
{
std::string sub_string = (Nl_ > 0) ? "_subtract" : "";
auto &action = envGet(FMat, par().action);
auto &solver = envGet(Solver, par().solver + sub_string);
auto &noise = envGet(DilutedNoise<FImpl>, par().noise);
auto &v = envGet(std::vector<FermionField>, getName() + "_v");
auto &w = envGet(std::vector<FermionField>, getName() + "_w");
int Ls = env().getObjectLs(par().action);
envGetTmp(A2A, a2a);
if (Nl_ > 0)
{
LOG(Message) << "Computing all-to-all vectors "
<< " using eigenpack '" << par().eigenPack << "' ("
<< Nl_ << " low modes) and noise '"
<< par().noise << "' (" << noise.size()
<< " noise vectors)" << std::endl;
}
else
{
LOG(Message) << "Computing all-to-all vectors "
<< " using noise '" << par().noise << "' (" << noise.size()
<< " noise vectors)" << std::endl;
}
// Low modes
for (unsigned int il = 0; il < Nl_; il++)
{
auto &epack = envGet(Pack, par().eigenPack);
startTimer("V low mode");
LOG(Message) << "V vector i = " << il << " (low mode)" << std::endl;
if (Ls == 1)
{
a2a.makeLowModeV(v[il], epack.evec[il], epack.eval[il]);
}
else
{
envGetTmp(FermionField, f5);
a2a.makeLowModeV5D(v[il], f5, epack.evec[il], epack.eval[il]);
}
stopTimer("V low mode");
startTimer("W low mode");
LOG(Message) << "W vector i = " << il << " (low mode)" << std::endl;
if (Ls == 1)
{
a2a.makeLowModeW(w[il], epack.evec[il], epack.eval[il]);
}
else
{
envGetTmp(FermionField, f5);
a2a.makeLowModeW5D(w[il], f5, epack.evec[il], epack.eval[il]);
}
stopTimer("W low mode");
}
// High modes
for (unsigned int ih = 0; ih < noise.size(); ih++)
{
startTimer("V high mode");
LOG(Message) << "V vector i = " << Nl_ + ih
<< " (" << ((Nl_ > 0) ? "high " : "")
<< "stochastic mode)" << std::endl;
if (Ls == 1)
{
a2a.makeHighModeV(v[Nl_ + ih], noise[ih]);
}
else
{
envGetTmp(FermionField, f5);
a2a.makeHighModeV5D(v[Nl_ + ih], f5, noise[ih]);
}
stopTimer("V high mode");
startTimer("W high mode");
LOG(Message) << "W vector i = " << Nl_ + ih
<< " (" << ((Nl_ > 0) ? "high " : "")
<< "stochastic mode)" << std::endl;
if (Ls == 1)
{
a2a.makeHighModeW(w[Nl_ + ih], noise[ih]);
}
else
{
envGetTmp(FermionField, f5);
a2a.makeHighModeW5D(w[Nl_ + ih], f5, noise[ih]);
}
stopTimer("W high mode");
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MSolver_A2AVectors_hpp_

View File

@ -1,85 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSolver/Guesser.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MSolver_Guesser_hpp_
#define Hadrons_MSolver_Guesser_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/EigenPack.hpp>
BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MSolver)
template <typename FImpl, int nBasis>
std::shared_ptr<LinearFunction<typename FImpl::FermionField>>
makeGuesser(const std::string epackName)
{
typedef typename FImpl::FermionField FermionField;
typedef FermionEigenPack<FImpl> EPack;
typedef CoarseFermionEigenPack<FImpl, nBasis> CoarseEPack;
typedef DeflatedGuesser<FermionField> FineGuesser;
typedef LocalCoherenceDeflatedGuesser<
FermionField, typename CoarseEPack::CoarseField> CoarseGuesser;
std::shared_ptr<LinearFunction<typename FImpl::FermionField>> guesserPt;
DEFINE_ENV_LAMBDA;
if (epackName.empty())
{
guesserPt.reset(new ZeroGuesser<FermionField>());
}
else
{
try
{
auto &epack = envGetDerived(EPack, CoarseEPack, epackName);
LOG(Message) << "using low-mode deflation with coarse eigenpack '"
<< epackName << "' ("
<< epack.evecCoarse.size() << " modes)" << std::endl;
guesserPt.reset(new CoarseGuesser(epack.evec, epack.evecCoarse,
epack.evalCoarse));
}
catch (Exceptions::ObjectType &e)
{
auto &epack = envGet(EPack, epackName);
LOG(Message) << "using low-mode deflation with eigenpack '"
<< epackName << "' ("
<< epack.evec.size() << " modes)" << std::endl;
guesserPt.reset(new FineGuesser(epack.evec, epack.eval));
}
}
return guesserPt;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MSolver;
template class Grid::Hadrons::MSolver::TMixedPrecisionRBPrecCG<FIMPLF, FIMPLD, HADRONS_DEFAULT_LANCZOS_NBASIS>;
template class Grid::Hadrons::MSolver::TMixedPrecisionRBPrecCG<ZFIMPLF, ZFIMPLD, HADRONS_DEFAULT_LANCZOS_NBASIS>;

View File

@ -1,197 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MSolver_MixedPrecisionRBPrecCG_hpp_
#define Hadrons_MSolver_MixedPrecisionRBPrecCG_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/Solver.hpp>
#include <Hadrons/EigenPack.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Mixed precision schur red-black preconditioned CG *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MSolver)
class MixedPrecisionRBPrecCGPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(MixedPrecisionRBPrecCGPar,
std::string , innerAction,
std::string , outerAction,
unsigned int, maxInnerIteration,
unsigned int, maxOuterIteration,
double , residual,
std::string , eigenPack);
};
template <typename FImplInner, typename FImplOuter, int nBasis>
class TMixedPrecisionRBPrecCG: public Module<MixedPrecisionRBPrecCGPar>
{
public:
FERM_TYPE_ALIASES(FImplInner, Inner);
FERM_TYPE_ALIASES(FImplOuter, Outer);
SOLVER_TYPE_ALIASES(FImplOuter,);
typedef HADRONS_DEFAULT_SCHUR_OP<FMatInner, FermionFieldInner> SchurFMatInner;
typedef HADRONS_DEFAULT_SCHUR_OP<FMatOuter, FermionFieldOuter> SchurFMatOuter;
private:
template <typename Field>
class OperatorFunctionWrapper: public OperatorFunction<Field>
{
public:
OperatorFunctionWrapper(LinearFunction<Field> &fn): fn_(fn) {};
virtual ~OperatorFunctionWrapper(void) = default;
virtual void operator()(LinearOperatorBase<Field> &op,
const Field &in, Field &out)
{
fn_(in, out);
}
private:
LinearFunction<Field> &fn_;
};
public:
// constructor
TMixedPrecisionRBPrecCG(const std::string name);
// destructor
virtual ~TMixedPrecisionRBPrecCG(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getReference(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(MixedPrecisionRBPrecCG,
ARG(TMixedPrecisionRBPrecCG<FIMPLF, FIMPLD, HADRONS_DEFAULT_LANCZOS_NBASIS>), MSolver);
MODULE_REGISTER_TMP(ZMixedPrecisionRBPrecCG,
ARG(TMixedPrecisionRBPrecCG<ZFIMPLF, ZFIMPLD, HADRONS_DEFAULT_LANCZOS_NBASIS>), MSolver);
/******************************************************************************
* TMixedPrecisionRBPrecCG implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImplInner, typename FImplOuter, int nBasis>
TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::TMixedPrecisionRBPrecCG(const std::string name)
: Module<MixedPrecisionRBPrecCGPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImplInner, typename FImplOuter, int nBasis>
std::vector<std::string> TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImplInner, typename FImplOuter, int nBasis>
std::vector<std::string> TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::getReference(void)
{
std::vector<std::string> ref = {par().innerAction, par().outerAction};
if (!par().eigenPack.empty())
{
ref.push_back(par().eigenPack);
}
return ref;
}
template <typename FImplInner, typename FImplOuter, int nBasis>
std::vector<std::string> TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::getOutput(void)
{
std::vector<std::string> out = {getName(), getName() + "_subtract"};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImplInner, typename FImplOuter, int nBasis>
void TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::setup(void)
{
LOG(Message) << "Setting up Schur red-black preconditioned mixed-precision "
<< "CG for inner/outer action '" << par().innerAction
<< "'/'" << par().outerAction << "', residual "
<< par().residual << ", and maximum inner/outer iteration "
<< par().maxInnerIteration << "/" << par().maxOuterIteration
<< std::endl;
auto Ls = env().getObjectLs(par().innerAction);
auto &imat = envGet(FMatInner, par().innerAction);
auto &omat = envGet(FMatOuter, par().outerAction);
auto guesserPt = makeGuesser<FImplOuter, nBasis>(par().eigenPack);
auto makeSolver = [&imat, &omat, guesserPt, Ls, this](bool subGuess)
{
return [&imat, &omat, guesserPt, subGuess, Ls, this]
(FermionFieldOuter &sol, const FermionFieldOuter &source)
{
typedef typename FermionFieldInner::vector_type VTypeInner;
SchurFMatInner simat(imat);
SchurFMatOuter somat(omat);
MixedPrecisionConjugateGradient<FermionFieldOuter, FermionFieldInner>
mpcg(par().residual, par().maxInnerIteration,
par().maxOuterIteration,
env().template getRbGrid<VTypeInner>(Ls),
simat, somat);
OperatorFunctionWrapper<FermionFieldOuter> wmpcg(mpcg);
HADRONS_DEFAULT_SCHUR_SOLVE<FermionFieldOuter> schurSolver(wmpcg);
schurSolver.subtractGuess(subGuess);
schurSolver(omat, source, sol, *guesserPt);
};
};
auto solver = makeSolver(false);
envCreate(Solver, getName(), Ls, solver, omat);
auto solver_subtract = makeSolver(true);
envCreate(Solver, getName() + "_subtract", Ls, solver_subtract, omat);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImplInner, typename FImplOuter, int nBasis>
void TMixedPrecisionRBPrecCG<FImplInner, FImplOuter, nBasis>
::execute(void)
{}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MSolver_MixedPrecisionRBPrecCG_hpp_

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/PrecisionCast.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MUtilities;
template class Grid::Hadrons::MUtilities::TPrecisionCast<GIMPLD::GaugeField, GIMPLF::GaugeField>;
template class Grid::Hadrons::MUtilities::TPrecisionCast<FIMPLD::FermionField, FIMPLF::FermionField>;

View File

@ -1,124 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/PrecisionCast.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MUtilities_PrecisionCast_hpp_
#define Hadrons_MUtilities_PrecisionCast_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Precision cast module *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MUtilities)
class PrecisionCastPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(PrecisionCastPar,
std::string, field);
};
template <typename FieldIn, typename FieldOut>
class TPrecisionCast: public Module<PrecisionCastPar>
{
public:
// constructor
TPrecisionCast(const std::string name);
// destructor
virtual ~TPrecisionCast(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(GaugeSinglePrecisionCast,
ARG(TPrecisionCast<GIMPLD::GaugeField, GIMPLF::GaugeField>),
MUtilities);
MODULE_REGISTER_TMP(FermionSinglePrecisionCast,
ARG(TPrecisionCast<FIMPLD::FermionField, FIMPLF::FermionField>),
MUtilities);
/******************************************************************************
* TPrecisionCast implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FieldIn, typename FieldOut>
TPrecisionCast<FieldIn, FieldOut>::TPrecisionCast(const std::string name)
: Module<PrecisionCastPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FieldIn, typename FieldOut>
std::vector<std::string> TPrecisionCast<FieldIn, FieldOut>::getInput(void)
{
std::vector<std::string> in = {par().field};
return in;
}
template <typename FieldIn, typename FieldOut>
std::vector<std::string> TPrecisionCast<FieldIn, FieldOut>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FieldIn, typename FieldOut>
void TPrecisionCast<FieldIn, FieldOut>::setup(void)
{
envCreateLat(FieldOut, getName());
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FieldIn, typename FieldOut>
void TPrecisionCast<FieldIn, FieldOut>::execute(void)
{
LOG(Message) << "Casting field '" << par().field << "'" << std::endl;
LOG(Message) << "In type: " << typeName<FieldIn>() << std::endl;
LOG(Message) << "Out type: " << typeName<FieldOut>() << std::endl;
auto &in = envGet(FieldIn, par().field);
auto &out = envGet(FieldOut, getName());
precisionChange(out, in);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MUtilities_PrecisionCast_hpp_

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/RandomVectors.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MUtilities;
template class Grid::Hadrons::MUtilities::TRandomVectors<FIMPL::FermionField>;

View File

@ -1,128 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MUtilities/RandomVectors.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MUtilities_RandomVectors_hpp_
#define Hadrons_MUtilities_RandomVectors_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Module generating random lattices for testing purposes *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MUtilities)
class RandomVectorsPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(RandomVectorsPar,
unsigned int, size,
unsigned int, Ls);
};
template <typename Field>
class TRandomVectors: public Module<RandomVectorsPar>
{
public:
// constructor
TRandomVectors(const std::string name);
// destructor
virtual ~TRandomVectors(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(RandomFermions, TRandomVectors<FIMPL::FermionField>, MUtilities);
/******************************************************************************
* TRandomVectors implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename Field>
TRandomVectors<Field>::TRandomVectors(const std::string name)
: Module<RandomVectorsPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename Field>
std::vector<std::string> TRandomVectors<Field>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename Field>
std::vector<std::string> TRandomVectors<Field>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename Field>
void TRandomVectors<Field>::setup(void)
{
if (par().Ls > 1)
{
envCreate(std::vector<Field>, getName(), par().Ls, par().size,
envGetGrid(Field, par().Ls));
}
else
{
envCreate(std::vector<Field>, getName(), 1, par().size, envGetGrid(Field));
}
}
// execution ///////////////////////////////////////////////////////////////////
template <typename Field>
void TRandomVectors<Field>::execute(void)
{
LOG(Message) << "Generating " << par().size << " random vectors" << std::endl;
auto &vec = envGet(std::vector<Field>, getName());
for (unsigned int i = 0; i < vec.size(); ++i)
{
random(rng4d(), vec[i]);
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MUtilities_RandomVectors_hpp_

View File

@ -1,179 +0,0 @@
#include <Hadrons/EigenPack.hpp>
#include <Hadrons/Environment.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
template <typename FOut, typename FIn>
void convert(const std::string outFilename, const std::string inFilename,
const unsigned int Ls, const bool rb, const unsigned int size,
const bool multiFile)
{
assert(outFilename != inFilename);
typedef EigenPack<FOut> EPOut;
typedef EigenPack<FIn> EPIn;
typedef typename FOut::vector_type VTypeOut;
typedef typename FIn::vector_type VTypeIn;
std::shared_ptr<GridCartesian> gInBase, gOutBase, gIn5, gOut5;
std::shared_ptr<GridRedBlackCartesian> rbgIn, rbgOut;
GridBase *gIn, *gOut;
auto dim = GridDefaultLatt();
unsigned int nd = dim.size();
auto simdOut = GridDefaultSimd(nd, VTypeOut::Nsimd());
auto simdIn = GridDefaultSimd(nd, VTypeIn::Nsimd());
gOutBase.reset(SpaceTimeGrid::makeFourDimGrid(dim, simdOut, GridDefaultMpi()));
gInBase.reset(SpaceTimeGrid::makeFourDimGrid(dim, simdIn, GridDefaultMpi()));
if (rb)
{
if (Ls > 1)
{
rbgOut.reset(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, gOutBase.get()));
rbgIn.reset(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, gInBase.get()));
}
else
{
rbgOut.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(gOutBase.get()));
rbgIn.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(gInBase.get()));
}
gOut = rbgOut.get();
gIn = rbgIn.get();
}
else
{
if (Ls > 1)
{
gOut5.reset(SpaceTimeGrid::makeFiveDimGrid(Ls, gOutBase.get()));
gIn5.reset(SpaceTimeGrid::makeFiveDimGrid(Ls, gInBase.get()));
gOut = gOut5.get();
gIn = gIn5.get();
}
else
{
gOut = gOutBase.get();
gIn = gInBase.get();
}
}
FOut bufOut(gOut);
FIn bufIn(gIn), testIn(gIn);
LOG(Message) << "==== EIGENPACK CONVERSION" << std::endl;
LOG(Message) << "Lattice : " << gIn->GlobalDimensions() << std::endl;
LOG(Message) << "Checkerboarded: " << (rb ? "yes" : "no") << std::endl;
LOG(Message) << "In path : " << inFilename << std::endl;
LOG(Message) << "In type : " << typeName<FIn>() << std::endl;
LOG(Message) << "Out path : " << outFilename << std::endl;
LOG(Message) << "Out type : " << typeName<FOut>() << std::endl;
LOG(Message) << "#vectors : " << size << std::endl;
LOG(Message) << "Multifile : " << (multiFile ? "yes" : "no") << std::endl;
if (multiFile)
{
for(unsigned int k = 0; k < size; ++k)
{
ScidacWriter binWriter(gOut->IsBoss());
ScidacReader binReader;
PackRecord record;
VecRecord vecRecord;
std::string outV = outFilename + "/v" + std::to_string(k) + ".bin";
std::string inV = inFilename + "/v" + std::to_string(k) + ".bin";
LOG(Message) << "==== Converting vector " << k << std::endl;
LOG(Message) << "In : " << inV << std::endl;
LOG(Message) << "Out: " << outV << std::endl;
makeFileDir(outV, gOut);
binWriter.open(outV);
binReader.open(inV);
EPIn::readHeader(record, binReader);
EPOut::writeHeader(binWriter, record);
EPIn::readElement(bufIn, vecRecord, binReader);
precisionChange(bufOut, bufIn);
precisionChange(testIn, bufOut);
testIn -= bufIn;
LOG(Message) << "Diff norm^2: " << norm2(testIn) << std::endl;
EPOut::writeElement(binWriter, bufOut, vecRecord);
binWriter.close();
binReader.close();
}
}
else
{
ScidacWriter binWriter(gOut->IsBoss());
ScidacReader binReader;
PackRecord record;
makeFileDir(outFilename, gOut);
binWriter.open(outFilename);
binReader.open(inFilename);
EPIn::readHeader(record, binReader);
EPOut::writeHeader(binWriter, record);
for(unsigned int k = 0; k < size; ++k)
{
VecRecord vecRecord;
LOG(Message) << "==== Converting vector " << k << std::endl;
EPIn::readElement(bufIn, vecRecord, binReader);
precisionChange(bufOut, bufIn);
precisionChange(testIn, bufOut);
testIn -= bufIn;
LOG(Message) << "Diff norm^2: " << norm2(testIn) << std::endl;
EPOut::writeElement(binWriter, bufOut, vecRecord);
}
binWriter.close();
binReader.close();
}
}
#ifndef FOUT
#warning "FOUT undefined (set to WilsonImplF::FermionField by default)"
#define FOUT WilsonImplF::FermionField
#endif
#ifndef FIN
#warning "FIN undefined (set to WilsonImplD::FermionField by default)"
#define FIN WilsonImplD::FermionField
#endif
int main(int argc, char *argv[])
{
// parse command line
std::string outFilename, inFilename;
unsigned int size, Ls;
bool rb, multiFile;
if (argc < 7)
{
std::cerr << "usage: " << argv[0] << " <out eigenpack> <in eigenpack> <Ls> <red-black (0|1)> <#vector> <multifile (0|1)> [Grid options]";
std::cerr << std::endl;
std::exit(EXIT_FAILURE);
}
outFilename = argv[1];
inFilename = argv[2];
Ls = std::stoi(std::string(argv[3]));
rb = (std::string(argv[4]) != "0");
size = std::stoi(std::string(argv[5]));
multiFile = (std::string(argv[6]) != "0");
// initialization
Grid_init(&argc, &argv);
initLogger();
// execution
try
{
convert<FOUT, FIN>(outFilename, inFilename, Ls, rb, size, multiFile);
}
catch (const std::exception& e)
{
Exceptions::abort(e);
}
// epilogue
LOG(Message) << "Grid is finalizing now" << std::endl;
Grid_finalize();
return EXIT_SUCCESS;
}

View File

@ -1,10 +0,0 @@
AM_LDFLAGS += -L../../Hadrons
bin_PROGRAMS = HadronsXmlRun HadronsFermionEP64To32
HadronsXmlRun_SOURCES = HadronsXmlRun.cc
HadronsXmlRun_LDADD = -lHadrons -lGrid
HadronsFermionEP64To32_SOURCES = EigenPackCast.cc
HadronsFermionEP64To32_CXXFLAGS = $(AM_CXXFLAGS) -DFIN=WilsonImplD::FermionField -DFOUT=WilsonImplF::FermionField
HadronsFermionEP64To32_LDADD = -lHadrons -lGrid

View File

@ -1,10 +1,15 @@
# additional include paths necessary to compile the C++ library
SUBDIRS = Grid Hadrons benchmarks tests
SUBDIRS = lib benchmarks tests extras
include $(top_srcdir)/doxygen.inc
bin_SCRIPTS=grid-config
BUILT_SOURCES = version.h
version.h:
echo "`git log -n 1 --format=format:"#define GITHASH \\"%H:%d\\"%n" HEAD`" > $(srcdir)/lib/version.h
.PHONY: bench check tests doxygen-run doxygen-doc $(DX_PS_GOAL) $(DX_PDF_GOAL)
tests-local: all

View File

@ -76,9 +76,9 @@ int main (int argc, char ** argv)
std::vector<int> seeds5({5,6,7,8});
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
LatticeFermion src (FGrid); random(RNG5,src);

View File

@ -3,12 +3,7 @@
EIGEN_URL='http://bitbucket.org/eigen/eigen/get/3.3.5.tar.bz2'
echo "-- deploying Eigen source..."
ARC=`basename ${EIGEN_URL}`
wget ${EIGEN_URL} --no-check-certificate && ./scripts/update_eigen.sh ${ARC} && rm ${ARC}
# patch for non-portable includes in Eigen 3.3.5
# apparently already fixed in Eigen HEAD so it should not be
# a problem in the future (A.P.)
patch Grid/Eigen/unsupported/CXX11/Tensor scripts/eigen-3.3.5.Tensor.patch
wget ${EIGEN_URL} --no-check-certificate && ./scripts/update_eigen.sh `basename ${EIGEN_URL}` && rm `basename ${EIGEN_URL}`
echo '-- generating Make.inc files...'
./scripts/filelist

View File

@ -6,8 +6,8 @@ AC_CANONICAL_TARGET
AM_INIT_AUTOMAKE([subdir-objects 1.13])
AM_EXTRA_RECURSIVE_TARGETS([tests bench])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_SRCDIR([Grid/Grid.h])
AC_CONFIG_HEADERS([Grid/Config.h],[sed -i 's|PACKAGE_|GRID_|' Grid/Config.h])
AC_CONFIG_SRCDIR([lib/Grid.h])
AC_CONFIG_HEADERS([lib/Config.h],[sed -i 's|PACKAGE_|GRID_|' lib/Config.h])
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
################ Get git info
@ -41,7 +41,7 @@ AC_TYPE_UINT64_T
############### OpenMP
AC_OPENMP
ac_openmp=no
if test "${ac_cv_prog_cxx_openmp}X" != "noX"; then
if test "${OPENMP_CXXFLAGS}X" != "X"; then
ac_openmp=yes
AM_CXXFLAGS="$OPENMP_CXXFLAGS $AM_CXXFLAGS"
AM_LDFLAGS="$OPENMP_CXXFLAGS $AM_LDFLAGS"
@ -88,13 +88,6 @@ AC_ARG_WITH([lime],
[AM_CXXFLAGS="-I$with_lime/include $AM_CXXFLAGS"]
[AM_LDFLAGS="-L$with_lime/lib $AM_LDFLAGS"])
############### OpenSSL
AC_ARG_WITH([openssl],
[AS_HELP_STRING([--with-openssl=prefix],
[try this for a non-standard install prefix of the OpenSSL library])],
[AM_CXXFLAGS="-I$with_openssl/include $AM_CXXFLAGS"]
[AM_LDFLAGS="-L$with_openssl/lib $AM_LDFLAGS"])
############### lapack
AC_ARG_ENABLE([lapack],
[AC_HELP_STRING([--enable-lapack=yes|no|prefix], [enable LAPACK])],
@ -195,13 +188,9 @@ AC_SEARCH_LIBS([fftw_execute], [fftw3],
AC_SEARCH_LIBS([limeCreateReader], [lime],
[AC_DEFINE([HAVE_LIME], [1], [Define to 1 if you have the `LIME' library])]
[have_lime=true],
[AC_MSG_ERROR(LIME library was not found in your system.)])
AC_SEARCH_LIBS([SHA256_Init], [crypto],
[AC_DEFINE([HAVE_CRYPTO], [1], [Define to 1 if you have the `OpenSSL' library])]
[have_crypto=true],
[AC_MSG_ERROR(OpenSSL library was not found in your system.)])
AC_CHECK_HEADER([openssl/sha.h], [], [AC_MSG_ERROR(OpenSSL library found but without headers.)], [AC_INCLUDES_DEFAULT([])])
[AC_MSG_WARN(C-LIME library was not found in your system.
In order to use ILGG file format please install or provide the correct path to your installation
Info at: http://usqcd.jlab.org/usqcd-docs/c-lime/)])
AC_SEARCH_LIBS([crc32], [z],
[AC_DEFINE([HAVE_ZLIB], [1], [Define to 1 if you have the `LIBZ' library])]
@ -491,9 +480,9 @@ GRID_LIBS=$LIBS
GRID_SHORT_SHA=`git rev-parse --short HEAD`
GRID_SHA=`git rev-parse HEAD`
GRID_BRANCH=`git rev-parse --abbrev-ref HEAD`
AM_CXXFLAGS="-I${abs_srcdir} $AM_CXXFLAGS"
AM_CFLAGS="-I${abs_srcdir} $AM_CFLAGS"
AM_LDFLAGS="-L${cwd}/Grid $AM_LDFLAGS"
AM_CXXFLAGS="-I${abs_srcdir}/include -I${abs_srcdir}/Eigen/ -I${abs_srcdir}/Eigen/unsupported $AM_CXXFLAGS"
AM_CFLAGS="-I${abs_srcdir}/include -I${abs_srcdir}/Eigen/ -I${abs_srcdir}/Eigen/unsupported $AM_CFLAGS"
AM_LDFLAGS="-L${cwd}/lib $AM_LDFLAGS"
AC_SUBST([AM_CFLAGS])
AC_SUBST([AM_CXXFLAGS])
AC_SUBST([AM_LDFLAGS])
@ -547,7 +536,7 @@ AC_SUBST([GRID_SUMMARY])
AC_CONFIG_FILES([grid-config], [chmod +x grid-config])
AC_CONFIG_FILES(Makefile)
AC_CONFIG_FILES(Grid/Makefile)
AC_CONFIG_FILES(lib/Makefile)
AC_CONFIG_FILES(tests/Makefile)
AC_CONFIG_FILES(tests/IO/Makefile)
AC_CONFIG_FILES(tests/core/Makefile)
@ -561,8 +550,8 @@ AC_CONFIG_FILES(tests/smearing/Makefile)
AC_CONFIG_FILES(tests/qdpxx/Makefile)
AC_CONFIG_FILES(tests/testu01/Makefile)
AC_CONFIG_FILES(benchmarks/Makefile)
AC_CONFIG_FILES(Hadrons/Makefile)
AC_CONFIG_FILES(Hadrons/Utilities/Makefile)
AC_CONFIG_FILES(extras/Makefile)
AC_CONFIG_FILES(extras/Hadrons/Makefile)
AC_OUTPUT
echo ""

Binary file not shown.

View File

@ -1,20 +0,0 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = Grid
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@ -1,170 +0,0 @@
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Grid'
copyright = '2018, Peter Boyle, Guido Cossu, Antonin Portelli, Azusa Yamaguchi'
author = 'Peter Boyle, Guido Cossu, Antonin Portelli, Azusa Yamaguchi'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'manual'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
primary_domain = 'cpp'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_use_smartypants = False
smart_quotes = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Griddoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
#'papersize': 'a4paper',
'extraclassoptions': 'openany,oneside',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '8pt',
# Additional stuff for the LaTeX preamble.
#
# Latex figure (float) alignment
#
'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Grid.tex', ' Grid Documentation ',
'\includegraphics[width=.4\\textwidth]{logo.png} \\\\Peter Boyle, Guido Cossu, Antonin Portelli, Azusa Yamaguchi', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'grid', 'Grid Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Grid', 'Grid Documentation',
author, 'Grid', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True

View File

@ -1,232 +0,0 @@
Interfacing with external software
========================================
Grid provides a number of important modules, such as solvers and
eigensolvers, that are highly optimized for complex vector/SIMD
architectures, such as the Intel Xeon Phi KNL and Skylake processors.
This growing library, with appropriate interfacing, can be accessed
from existing code. Here we describe interfacing issues and provide
examples.
MPI initialization
--------------------
Grid supports threaded MPI sends and receives and, if running with
more than one thread, requires the MPI_THREAD_MULTIPLE mode of message
passing. If the user initializes MPI before starting Grid, the
appropriate initialization call is::
MPI_Init_thread(argc, argv, MPI_THREAD_MULTIPLE, &provided);
assert(MPI_THREAD_MULTIPLE == provided);
Grid Initialization
---------------------
Grid itself is initialized with a call::
Grid_init(&argc, &argv);
Command line options include::
--mpi n.n.n.n : default MPI decomposition
--threads n : default number of OMP threads
--grid n.n.n.n : default Grid size
where `argc` and `argv` are constructed to simulate the command-line
options described above. At a minimum one usually provides the
`--grid` and `--mpi` parameters. The former specifies the lattice
dimensions and the latter specifies the grid of processors (MPI
ranks). If these parameters are not specified with the `Grid_init`
call, they need to be supplied later when creating Grid fields.
The following Grid procedures are useful for verifying that Grid
"default" values are properly initialized.
============================================================= ===========================================================================================================
Grid procedure returns
============================================================= ===========================================================================================================
std::vector<int> GridDefaultLatt(); lattice size
std::vector<int> GridDefaultSimd(int Nd,vComplex::Nsimd()); SIMD layout
std::vector<int> GridDefaultMpi(); MPI layout
int Grid::GridThread::GetThreads(); number of threads
============================================================= ===========================================================================================================
MPI coordination
----------------
Grid wants to use its own numbering of MPI ranks and its own
assignment of the lattice coordinates with each rank. Obviously, the
calling program and Grid must agree on these conventions. One should
use Grid's Cartesian communicator class to discover the processor
assignments. For a four-dimensional processor grid one can define::
static Grid::CartesianCommunicator *grid_cart = NULL;
grid_cart = new Grid::CartesianCommunicator(processors);
where `processors` is of type `std::vector<int>`, with values matching
the MPI processor-layout dimensions specified with the `--mpi`
argument in the `Grid_Init` call. Then each MPI rank can obtain its
processor coordinate using the Cartesian communicator instantiated
above. For example, in four dimensions::
std::vector<int> pePos(4);
for(int i=0; i<4; i++)
pePos[i] = grid_cart->_processor_coor[i];
and each MPI process can get its world rank from its processor
coordinates using::
int peRank = grid_cart->RankFromProcessorCoor(pePos)
Conversely, each MPI process can get its processor coordinates from
its world rank using::
grid_cart->ProcessorCoorFromRank(peRank, pePos);
If the calling program initialized MPI before initializing Grid, it is
then important for each MPI process in the calling program to reset
its rank number so it agrees with Grid::
MPI_Comm comm;
MPI_Comm_split(MPI_COMM_THISJOB,jobid,peRank,&comm);
MPI_COMM_THISJOB = comm;
where `MPI_COMM_THISJOB` is initially a copy of `MPI_COMM_WORLD` (with
`jobid = 0`), or it is a split communicator with `jobid` equal to the
index number of the subcommunicator. Once this is done,::
MPI_Comm_rank(MPI_COMM_THISJOB, &myrank);
returns a rank that agrees with Grid's `peRank`.
QMP coordination
----------------
If the calling program uses the SciDAC QMP message-passing package, a
call to QMP_comm_split() instead can be used to reassign the ranks.
In the example below, `peGrid` gives the processor-grid dimensions,
usually set on the command line with `-qmp-geom`.
**Example**::
int NDIM = QMP_get_allocated_number_of_dimensions();
Grid::Grid_init(argc,argv);
FgridBase::grid_initted=true;
std::vector<int> processors;
for(int i=0;i<NDIM;i++) processors.push_back(peGrid[i]);
Grid::CartesianCommunicator grid_cart(processors);
std::vector<int> pePos(NDIM);
for(int i=NDIM-1;i>=0;i--)
pePos[i] = grid_cart._processor_coor[i];
int peRank = grid_cart->RankFromProcessorCoor(pePos);
QMP_comm_split(QMP_comm_get_default(),0,peRank,&qmp_comm);
QMP_comm_set_default(qmp_comm);
Mapping fields between Grid and user layouts
---------------------------------------------
In order to map data between calling-program and Grid layouts, it is
important to know how the lattice sites are distributed across the
processor grid. A lattice site with coordinates `r[mu]` is assigned
to the processor with processor coordinates `pePos[mu]` according to
the rule::
pePos[mu] = r[mu]/dim[mu]
where `dim[mu]` is the lattice dimension in the `mu` direction. For
performance reasons, it is important that the external data layout
follow the same rule. Then data mapping can be done without
requiring costly communication between ranks. We assume this is the
case here.
When mapping data to and from Grid, one must choose a lattice object
defined on the appropriate grid, whether it be a full lattice (4D
`GridCartesian`), one of the checkerboards (4D
`GridRedBlackCartesian`), a five-dimensional full grid (5D
`GridCartesian`), or a five-dimensional checkerboard (5D
`GridRedBlackCartesian`). For example, an improved staggered-fermion
color-vector field `cv` on a single checkerboard would be constructed
using
**Example**::
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
GridCartesian Grid(latt_size,simd_layout,mpi_layout);
GridRedBlackCartesian RBGrid(&Grid);
typename ImprovedStaggeredFermion::FermionField cv(RBGrid);
The example above assumes that the grid default values were set in the
`Grid_init` call. If not, they can be set at this point and passed
when `GridCartesian` is instantiated here. To map data within an MPI
rank, the external code must iterate over the sites belonging to that
rank (full or checkerboard as appropriate). Note that the site
coordinates are specified relative to the origin of the lattice
subvolume on that rank. To import data into Grid, the external data on
a single site with coordinates `r` is first copied into the
appropriate Grid scalar object `s`. Then it is copied into the Grid
lattice field `l` with `pokeLocalSite`::
pokeLocalSite(const sobj &s, Lattice<vobj> &l, Coordinate &r);
To export data from Grid, the reverse operation starts with::
peekLocalSite(const sobj &s, Lattice<vobj> &l, Coordinate &r);
and then copies the single-site data from `s` into the corresponding
external type.
Here is an example that maps a single site's worth of data in a MILC
color-vector field to a Grid scalar ColourVector object `cVec` and from
there to the lattice colour-vector field `cv`, as defined above.
**Example**::
indexToCoords(idx,r);
ColourVector cVec;
for(int col=0; col<Nc; col++)
cVec()()(col) =
Complex(src[idx].c[col].real, src[idx].c[col].imag);
pokeLocalSite(cVec, cv, r);
Here the `indexToCoords()` function is a MILC mapping of the MILC site
index `idx` to the 4D lattice coordinate `r`.
Grid provides block- and multiple-rhs conjugate-gradient solvers. For
this purpose it uses a 5D lattice. To map data to and from Grid data
types, the index for the right-hand-side vector becomes the zeroth
coordinate of a five-dimensional vector `r5`. The remaining
components of `r5` contain the 4D space-time coordinates. The
`pokeLocalSite/peekLocalSite` operations then accept the coordinate
`r5`, provided the destination/source lattice object is also 5D. In
the example below data from a single site specified by `idx`,
belonging to a set of `Ls` MILC color-vector fields, are copied into a
Grid 5D fermion field `cv5`.
**Example**::
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt();
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid) typename ImprovedStaggeredFermion5D::FermionField cv5(FrbGrid);
std::vector<int> r(4);
indexToCoords(idx,r);
std::vector<int> r5(1,0);
for( int d = 0; d < 4; d++ ) r5.push_back(r[d]);
for( int j = 0; j < Ls; j++ ){
r5[0] = j;
ColourVector cVec;
for(int col=0; col<Nc; col++){
cVec()()(col) =
Complex(src[j][idx].c[col].real, src[j][idx].c[col].imag);
}
pokeLocalSite(cVec, *(out->cv), r5);
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 216 KiB

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,146 @@
#ifndef A2A_Reduction_hpp_
#define A2A_Reduction_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Environment.hpp>
#include <Grid/Hadrons/Solver.hpp>
BEGIN_HADRONS_NAMESPACE
////////////////////////////////////////////
// A2A Meson Field Inner Product
////////////////////////////////////////////
template <class FermionField>
void sliceInnerProductMesonField(std::vector<std::vector<ComplexD>> &mat,
const std::vector<Lattice<FermionField>> &lhs,
const std::vector<Lattice<FermionField>> &rhs,
int orthogdim)
{
typedef typename FermionField::scalar_type scalar_type;
typedef typename FermionField::vector_type vector_type;
int Lblock = lhs.size();
int Rblock = rhs.size();
GridBase *grid = lhs[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
assert(mat.size() == Lblock * Rblock);
for (int t = 0; t < mat.size(); t++)
{
assert(mat[t].size() == Nt);
}
int fd = grid->_fdimensions[orthogdim];
int ld = grid->_ldimensions[orthogdim];
int rd = grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
std::vector<vector_type, alignedAllocator<vector_type>> lvSum(rd * Lblock * Rblock);
for(int r=0;r<rd * Lblock * Rblock;r++)
{
lvSum[r]=zero;
}
std::vector<scalar_type> lsSum(ld * Lblock * Rblock, scalar_type(0.0));
int e1 = grid->_slice_nblock[orthogdim];
int e2 = grid->_slice_block[orthogdim];
int stride = grid->_slice_stride[orthogdim];
// std::cout << GridLogMessage << " Entering first parallel loop " << std::endl;
// Parallelise over t-direction doesn't expose as much parallelism as needed for KNL
parallel_for(int r = 0; r < rd; r++)
{
int so = r * grid->_ostride[orthogdim]; // base offset for start of plane
for (int n = 0; n < e1; n++)
{
for (int b = 0; b < e2; b++)
{
int ss = so + n * stride + b;
for (int i = 0; i < Lblock; i++)
{
auto left = conjugate(lhs[i]._odata[ss]);
for (int j = 0; j < Rblock; j++)
{
int idx = i + Lblock * j + Lblock * Rblock * r;
auto right = rhs[j]._odata[ss];
vector_type vv = left()(0)(0) * right()(0)(0)
+ left()(0)(1) * right()(0)(1)
+ left()(0)(2) * right()(0)(2)
+ left()(1)(0) * right()(1)(0)
+ left()(1)(1) * right()(1)(1)
+ left()(1)(2) * right()(1)(2)
+ left()(2)(0) * right()(2)(0)
+ left()(2)(1) * right()(2)(1)
+ left()(2)(2) * right()(2)(2)
+ left()(3)(0) * right()(3)(0)
+ left()(3)(1) * right()(3)(1)
+ left()(3)(2) * right()(3)(2);
lvSum[idx] = lvSum[idx] + vv;
}
}
}
}
}
// std::cout << GridLogMessage << " Entering second parallel loop " << std::endl;
// Sum across simd lanes in the plane, breaking out orthog dir.
parallel_for(int rt = 0; rt < rd; rt++)
{
std::vector<int> icoor(Nd);
for (int i = 0; i < Lblock; i++)
{
for (int j = 0; j < Rblock; j++)
{
iScalar<vector_type> temp;
std::vector<iScalar<scalar_type>> extracted(Nsimd);
temp._internal = lvSum[i + Lblock * j + Lblock * Rblock * rt];
extract(temp, extracted);
for (int idx = 0; idx < Nsimd; idx++)
{
grid->iCoorFromIindex(icoor, idx);
int ldx = rt + icoor[orthogdim] * rd;
int ij_dx = i + Lblock * j + Lblock * Rblock * ldx;
lsSum[ij_dx] = lsSum[ij_dx] + extracted[idx]._internal;
}
}
}
}
// std::cout << GridLogMessage << " Entering non parallel loop " << std::endl;
for (int t = 0; t < fd; t++)
{
int pt = t/ld; // processor plane
int lt = t%ld;
for (int i = 0; i < Lblock; i++)
{
for (int j = 0; j < Rblock; j++)
{
if (pt == grid->_processor_coor[orthogdim])
{
int ij_dx = i + Lblock * j + Lblock * Rblock * lt;
mat[i + j * Lblock][t] = lsSum[ij_dx];
}
else
{
mat[i + j * Lblock][t] = scalar_type(0.0);
}
}
}
}
// std::cout << GridLogMessage << " Done " << std::endl;
// defer sum over nodes.
return;
}
END_HADRONS_NAMESPACE
#endif // A2A_Reduction_hpp_

View File

@ -0,0 +1,210 @@
#ifndef A2A_Vectors_hpp_
#define A2A_Vectors_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Environment.hpp>
#include <Grid/Hadrons/Solver.hpp>
BEGIN_HADRONS_NAMESPACE
////////////////////////////////
// A2A Modes
////////////////////////////////
template <class Field, class Matrix, class Solver>
class A2AModesSchurDiagTwo
{
private:
const std::vector<Field> *evec;
const std::vector<RealD> *eval;
Matrix &action;
Solver &solver;
std::vector<Field> w_high_5d, v_high_5d, w_high_4d, v_high_4d;
const int Nl, Nh;
const bool return_5d;
public:
int getNl (void ) {return Nl;}
int getNh (void ) {return Nh;}
int getN (void ) {return Nh+Nl;}
A2AModesSchurDiagTwo(const std::vector<Field> *_evec, const std::vector<RealD> *_eval,
Matrix &_action,
Solver &_solver,
std::vector<Field> _w_high_5d, std::vector<Field> _v_high_5d,
std::vector<Field> _w_high_4d, std::vector<Field> _v_high_4d,
const int _Nl, const int _Nh,
const bool _return_5d)
: evec(_evec), eval(_eval),
action(_action),
solver(_solver),
w_high_5d(_w_high_5d), v_high_5d(_v_high_5d),
w_high_4d(_w_high_4d), v_high_4d(_v_high_4d),
Nl(_Nl), Nh(_Nh),
return_5d(_return_5d){};
void high_modes(Field &source_5d, Field &w_source_5d, Field &source_4d, int i)
{
int i5d;
LOG(Message) << "A2A high modes for i = " << i << std::endl;
i5d = 0;
if (return_5d) i5d = i;
this->high_mode_v(action, solver, source_5d, v_high_5d[i5d], v_high_4d[i]);
this->high_mode_w(w_source_5d, source_4d, w_high_5d[i5d], w_high_4d[i]);
}
void return_v(int i, Field &vout_5d, Field &vout_4d)
{
if (i < Nl)
{
this->low_mode_v(action, evec->at(i), eval->at(i), vout_5d, vout_4d);
}
else
{
vout_4d = v_high_4d[i - Nl];
if (!(return_5d)) i = Nl;
vout_5d = v_high_5d[i - Nl];
}
}
void return_w(int i, Field &wout_5d, Field &wout_4d)
{
if (i < Nl)
{
this->low_mode_w(action, evec->at(i), eval->at(i), wout_5d, wout_4d);
}
else
{
wout_4d = w_high_4d[i - Nl];
if (!(return_5d)) i = Nl;
wout_5d = w_high_5d[i - Nl];
}
}
void low_mode_v(Matrix &action, const Field &evec, const RealD &eval, Field &vout_5d, Field &vout_4d)
{
GridBase *grid = action.RedBlackGrid();
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
src_o = evec;
src_o.checkerboard = Odd;
pickCheckerboard(Even, sol_e, vout_5d);
pickCheckerboard(Odd, sol_o, vout_5d);
/////////////////////////////////////////////////////
// v_ie = -(1/eval_i) * MeeInv Meo MooInv evec_i
/////////////////////////////////////////////////////
action.MooeeInv(src_o, tmp);
assert(tmp.checkerboard == Odd);
action.Meooe(tmp, sol_e);
assert(sol_e.checkerboard == Even);
action.MooeeInv(sol_e, tmp);
assert(tmp.checkerboard == Even);
sol_e = (-1.0 / eval) * tmp;
assert(sol_e.checkerboard == Even);
/////////////////////////////////////////////////////
// v_io = (1/eval_i) * MooInv evec_i
/////////////////////////////////////////////////////
action.MooeeInv(src_o, tmp);
assert(tmp.checkerboard == Odd);
sol_o = (1.0 / eval) * tmp;
assert(sol_o.checkerboard == Odd);
setCheckerboard(vout_5d, sol_e);
assert(sol_e.checkerboard == Even);
setCheckerboard(vout_5d, sol_o);
assert(sol_o.checkerboard == Odd);
action.ExportPhysicalFermionSolution(vout_5d, vout_4d);
}
void low_mode_w(Matrix &action, const Field &evec, const RealD &eval, Field &wout_5d, Field &wout_4d)
{
GridBase *grid = action.RedBlackGrid();
SchurDiagTwoOperator<Matrix, Field> _HermOpEO(action);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
GridBase *fgrid = action.Grid();
Field tmp_wout(fgrid);
src_o = evec;
src_o.checkerboard = Odd;
pickCheckerboard(Even, sol_e, tmp_wout);
pickCheckerboard(Odd, sol_o, tmp_wout);
/////////////////////////////////////////////////////
// w_ie = - MeeInvDag MoeDag Doo evec_i
/////////////////////////////////////////////////////
_HermOpEO.Mpc(src_o, tmp);
assert(tmp.checkerboard == Odd);
action.MeooeDag(tmp, sol_e);
assert(sol_e.checkerboard == Even);
action.MooeeInvDag(sol_e, tmp);
assert(tmp.checkerboard == Even);
sol_e = (-1.0) * tmp;
/////////////////////////////////////////////////////
// w_io = Doo evec_i
/////////////////////////////////////////////////////
_HermOpEO.Mpc(src_o, sol_o);
assert(sol_o.checkerboard == Odd);
setCheckerboard(tmp_wout, sol_e);
assert(sol_e.checkerboard == Even);
setCheckerboard(tmp_wout, sol_o);
assert(sol_o.checkerboard == Odd);
action.DminusDag(tmp_wout, wout_5d);
action.ExportPhysicalFermionSource(wout_5d, wout_4d);
}
void high_mode_v(Matrix &action, Solver &solver, const Field &source, Field &vout_5d, Field &vout_4d)
{
GridBase *fgrid = action.Grid();
solver(vout_5d, source); // Note: solver is solver(out, in)
action.ExportPhysicalFermionSolution(vout_5d, vout_4d);
}
void high_mode_w(const Field &w_source_5d, const Field &source_4d, Field &wout_5d, Field &wout_4d)
{
wout_5d = w_source_5d;
wout_4d = source_4d;
}
};
// TODO: A2A for coarse eigenvectors
// template <class FineField, class CoarseField, class Matrix, class Solver>
// class A2ALMSchurDiagTwoCoarse : public A2AModesSchurDiagTwo<FineField, Matrix, Solver>
// {
// private:
// const std::vector<FineField> &subspace;
// const std::vector<CoarseField> &evec_coarse;
// const std::vector<RealD> &eval_coarse;
// Matrix &action;
// public:
// A2ALMSchurDiagTwoCoarse(const std::vector<FineField> &_subspace, const std::vector<CoarseField> &_evec_coarse, const std::vector<RealD> &_eval_coarse, Matrix &_action)
// : subspace(_subspace), evec_coarse(_evec_coarse), eval_coarse(_eval_coarse), action(_action){};
// void operator()(int i, FineField &vout, FineField &wout)
// {
// FineField prom_evec(subspace[0]._grid);
// blockPromote(evec_coarse[i], prom_evec, subspace);
// this->low_mode_v(action, prom_evec, eval_coarse[i], vout);
// this->low_mode_w(action, prom_evec, eval_coarse[i], wout);
// }
// };
END_HADRONS_NAMESPACE
#endif // A2A_Vectors_hpp_

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.cc
Source file: extras/Hadrons/Application.cc
Copyright (C) 2015-2018
@ -26,16 +26,16 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Application.hpp>
#include <Hadrons/GeneticScheduler.hpp>
#include <Hadrons/Modules.hpp>
#include <Grid/Hadrons/Application.hpp>
#include <Grid/Hadrons/GeneticScheduler.hpp>
#include <Grid/Hadrons/Modules.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
#define BIG_SEP "================"
#define SEP "----------------"
#define BIG_SEP "==============="
#define SEP "---------------"
/******************************************************************************
* Application implementation *
@ -54,18 +54,18 @@ Application::Application(void)
loc[d] /= mpi[d];
locVol_ *= loc[d];
}
LOG(Message) << "====== HADRONS APPLICATION INITIALISATION ======" << std::endl;
LOG(Message) << "====== HADRONS APPLICATION STARTING ======" << std::endl;
LOG(Message) << "** Dimensions" << std::endl;
LOG(Message) << "Global lattice: " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << "Global lattice : " << dim << std::endl;
LOG(Message) << "MPI partition : " << mpi << std::endl;
LOG(Message) << "Local lattice : " << loc << std::endl;
LOG(Message) << std::endl;
LOG(Message) << "** Default parameters (and associated C macros)" << std::endl;
LOG(Message) << "** Default parameters (and associated C macro)" << std::endl;
LOG(Message) << "ASCII output precision : " << MACOUT(DEFAULT_ASCII_PREC) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPLBASE) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPLBASE) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPLBASE) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPLBASE) << std::endl;
LOG(Message) << "Fermion implementation : " << MACOUTS(FIMPL) << std::endl;
LOG(Message) << "z-Fermion implementation: " << MACOUTS(ZFIMPL) << std::endl;
LOG(Message) << "Scalar implementation : " << MACOUTS(SIMPL) << std::endl;
LOG(Message) << "Gauge implementation : " << MACOUTS(GIMPL) << std::endl;
LOG(Message) << "Eigenvector base size : "
<< MACOUT(HADRONS_DEFAULT_LANCZOS_NBASIS) << std::endl;
LOG(Message) << "Schur decomposition : " << MACOUTS(HADRONS_DEFAULT_SCHUR) << std::endl;
@ -88,6 +88,7 @@ Application::Application(const std::string parameterFileName)
void Application::setPar(const Application::GlobalPar &par)
{
par_ = par;
env().setSeed(strToVec<int>(par_.seed));
}
const Application::GlobalPar & Application::getPar(void)
@ -98,26 +99,14 @@ const Application::GlobalPar & Application::getPar(void)
// execute /////////////////////////////////////////////////////////////////////
void Application::run(void)
{
LOG(Message) << "====== HADRONS APPLICATION START ======" << std::endl;
if (!parameterFileName_.empty() and (vm().getNModule() == 0))
{
parseParameterFile(parameterFileName_);
}
if (getPar().runId.empty())
{
HADRONS_ERROR(Definition, "run id is empty");
}
LOG(Message) << "RUN ID '" << getPar().runId << "'" << std::endl;
vm().setRunId(getPar().runId);
vm().printContent();
env().printContent();
schedule();
printSchedule();
if (!getPar().graphFile.empty())
{
makeFileDir(getPar().graphFile, env().getGrid());
vm().dumpModuleGraph(getPar().graphFile);
}
configLoop();
}
@ -229,12 +218,11 @@ void Application::loadSchedule(const std::string filename)
program_.push_back(vm().getModuleAddress(name));
}
loadedSchedule_ = true;
scheduled_ = true;
}
void Application::printSchedule(void)
{
if (!scheduled_ and !loadedSchedule_)
if (!scheduled_)
{
HADRONS_ERROR(Definition, "Computation not scheduled");
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Application.hpp
Source file: extras/Hadrons/Application.hpp
Copyright (C) 2015-2018
@ -29,9 +29,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_Application_hpp_
#define Hadrons_Application_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/VirtualMachine.hpp>
#include <Hadrons/Module.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/VirtualMachine.hpp>
#include <Grid/Hadrons/Module.hpp>
BEGIN_HADRONS_NAMESPACE
@ -55,8 +55,7 @@ public:
GRID_SERIALIZABLE_CLASS_MEMBERS(GlobalPar,
TrajRange, trajCounter,
VirtualMachine::GeneticPar, genetic,
std::string, runId,
std::string, graphFile);
std::string, seed);
};
public:
// constructors

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/EigenPack.hpp
Source file: extras/Hadrons/EigenPack.hpp
Copyright (C) 2015-2018
@ -28,7 +28,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_EigenPack_hpp_
#define Hadrons_EigenPack_hpp_
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/algorithms/iterative/Deflation.h>
#include <Grid/algorithms/iterative/LocalCoherenceLanczos.h>
@ -39,31 +39,22 @@ BEGIN_HADRONS_NAMESPACE
#define HADRONS_DEFAULT_LANCZOS_NBASIS 60
#endif
#define HADRONS_DUMP_EP_METADATA \
LOG(Message) << "Eigenpack metadata:" << std::endl;\
LOG(Message) << "* operator" << std::endl;\
LOG(Message) << record.operatorXml << std::endl;\
LOG(Message) << "* solver" << std::endl;\
LOG(Message) << record.solverXml << std::endl;
struct PackRecord
{
std::string operatorXml, solverXml;
};
struct VecRecord: Serializable
{
GRID_SERIALIZABLE_CLASS_MEMBERS(VecRecord,
unsigned int, index,
double, eval);
VecRecord(void): index(0), eval(0.) {}
};
template <typename F>
class EigenPack
{
public:
typedef F Field;
struct PackRecord
{
std::string operatorXml, solverXml;
};
struct VecRecord: Serializable
{
GRID_SERIALIZABLE_CLASS_MEMBERS(VecRecord,
unsigned int, index,
double, eval);
VecRecord(void): index(0), eval(0.) {}
};
public:
std::vector<RealD> eval;
std::vector<F> evec;
@ -90,16 +81,11 @@ public:
for(int k = 0; k < evec.size(); ++k)
{
basicReadSingle(evec[k], eval[k], evecFilename(fileStem, k, traj), k);
if (k == 0)
{
HADRONS_DUMP_EP_METADATA;
}
}
}
else
{
basicRead(evec, eval, evecFilename(fileStem, -1, traj), evec.size());
HADRONS_DUMP_EP_METADATA;
}
}
@ -117,39 +103,6 @@ public:
basicWrite(evecFilename(fileStem, -1, traj), evec, eval, evec.size());
}
}
static void readHeader(PackRecord &record, ScidacReader &binReader)
{
std::string recordXml;
binReader.readLimeObject(recordXml, SCIDAC_FILE_XML);
XmlReader xmlReader(recordXml, true, "eigenPackPar");
xmlReader.push();
xmlReader.readCurrentSubtree(record.operatorXml);
xmlReader.nextElement();
xmlReader.readCurrentSubtree(record.solverXml);
}
template <typename T>
static void readElement(T &evec, VecRecord &vecRecord, ScidacReader &binReader)
{
binReader.readScidacFieldRecord(evec, vecRecord);
}
static void writeHeader(ScidacWriter &binWriter, PackRecord &record)
{
XmlWriter xmlWriter("", "eigenPackPar");
xmlWriter.pushXmlString(record.operatorXml);
xmlWriter.pushXmlString(record.solverXml);
binWriter.writeLimeObject(1, 1, xmlWriter, "parameters", SCIDAC_FILE_XML);
}
template <typename T>
static void writeElement(ScidacWriter &binWriter, T &evec, VecRecord &vecRecord)
{
binWriter.writeScidacFieldRecord(evec, vecRecord, DEFAULT_ASCII_PREC);
}
protected:
std::string evecFilename(const std::string stem, const int vec, const int traj)
{
@ -166,19 +119,19 @@ protected:
}
template <typename T>
void basicRead(std::vector<T> &evec, std::vector<RealD> &eval,
void basicRead(std::vector<T> &evec, std::vector<double> &eval,
const std::string filename, const unsigned int size)
{
ScidacReader binReader;
ScidacReader binReader;
binReader.open(filename);
readHeader(record, binReader);
binReader.skipPastObjectRecord(SCIDAC_FILE_XML);
for(int k = 0; k < size; ++k)
{
VecRecord vecRecord;
LOG(Message) << "Reading eigenvector " << k << std::endl;
readElement(evec[k], vecRecord, binReader);
binReader.readScidacFieldRecord(evec[k], vecRecord);
if (vecRecord.index != k)
{
HADRONS_ERROR(Io, "Eigenvector " + std::to_string(k) + " has a"
@ -191,16 +144,16 @@ protected:
}
template <typename T>
void basicReadSingle(T &evec, RealD &eval, const std::string filename,
void basicReadSingle(T &evec, double &eval, const std::string filename,
const unsigned int index)
{
ScidacReader binReader;
VecRecord vecRecord;
binReader.open(filename);
readHeader(record, binReader);
binReader.skipPastObjectRecord(SCIDAC_FILE_XML);
LOG(Message) << "Reading eigenvector " << index << std::endl;
readElement(evec, vecRecord, binReader);
binReader.readScidacFieldRecord(evec, vecRecord);
if (vecRecord.index != index)
{
HADRONS_ERROR(Io, "Eigenvector " + std::to_string(index) + " has a"
@ -213,13 +166,16 @@ protected:
template <typename T>
void basicWrite(const std::string filename, std::vector<T> &evec,
const std::vector<RealD> &eval, const unsigned int size)
const std::vector<double> &eval, const unsigned int size)
{
ScidacWriter binWriter(evec[0]._grid->IsBoss());
XmlWriter xmlWriter("", "eigenPackPar");
makeFileDir(filename, evec[0]._grid);
xmlWriter.pushXmlString(record.operatorXml);
xmlWriter.pushXmlString(record.solverXml);
binWriter.open(filename);
writeHeader(binWriter, record);
binWriter.writeLimeObject(1, 1, xmlWriter, "parameters", SCIDAC_FILE_XML);
for(int k = 0; k < size; ++k)
{
VecRecord vecRecord;
@ -227,25 +183,28 @@ protected:
vecRecord.index = k;
vecRecord.eval = eval[k];
LOG(Message) << "Writing eigenvector " << k << std::endl;
writeElement(binWriter, evec[k], vecRecord);
binWriter.writeScidacFieldRecord(evec[k], vecRecord, DEFAULT_ASCII_PREC);
}
binWriter.close();
}
template <typename T>
void basicWriteSingle(const std::string filename, T &evec,
const RealD eval, const unsigned int index)
const double eval, const unsigned int index)
{
ScidacWriter binWriter(evec._grid->IsBoss());
XmlWriter xmlWriter("", "eigenPackPar");
VecRecord vecRecord;
makeFileDir(filename, evec._grid);
xmlWriter.pushXmlString(record.operatorXml);
xmlWriter.pushXmlString(record.solverXml);
binWriter.open(filename);
writeHeader(binWriter, record);
binWriter.writeLimeObject(1, 1, xmlWriter, "parameters", SCIDAC_FILE_XML);
vecRecord.index = index;
vecRecord.eval = eval;
LOG(Message) << "Writing eigenvector " << index << std::endl;
writeElement(binWriter, evec, vecRecord);
binWriter.writeScidacFieldRecord(evec, vecRecord, DEFAULT_ASCII_PREC);
binWriter.close();
}
};
@ -359,8 +318,6 @@ using CoarseFermionEigenPack = CoarseEigenPack<
typename FImpl::SiteComplex,
nBasis>::CoarseField>;
#undef HADRONS_DUMP_EP_METADATA
END_HADRONS_NAMESPACE
#endif // Hadrons_EigenPack_hpp_

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.cc
Source file: extras/Hadrons/Environment.cc
Copyright (C) 2015-2018
@ -26,16 +26,16 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Environment.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Environment.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
#define ERROR_NO_ADDRESS(address)\
HADRONS_ERROR_REF(ObjectDefinition, "no object with address " + std::to_string(address), address);
HADRONS_ERROR(Definition, "no object with address " + std::to_string(address));
/******************************************************************************
* Environment implementation *
@ -43,18 +43,137 @@ HADRONS_ERROR_REF(ObjectDefinition, "no object with address " + std::to_string(a
// constructor /////////////////////////////////////////////////////////////////
Environment::Environment(void)
{
dim_ = GridDefaultLatt();
nd_ = dim_.size();
createGrid<vComplex>(1);
dim_ = GridDefaultLatt();
nd_ = dim_.size();
grid4d_.reset(SpaceTimeGrid::makeFourDimGrid(
dim_, GridDefaultSimd(nd_, vComplex::Nsimd()),
GridDefaultMpi()));
gridRb4d_.reset(SpaceTimeGrid::makeFourDimRedBlackGrid(grid4d_.get()));
vol_ = 1.;
for (auto d: dim_)
{
vol_ *= d;
}
rng4d_.reset(new GridParallelRNG(getGrid()));
rng4d_.reset(new GridParallelRNG(grid4d_.get()));
}
// grids ///////////////////////////////////////////////////////////////////////
void Environment::createGrid(const unsigned int Ls)
{
if ((Ls > 1) and (grid5d_.find(Ls) == grid5d_.end()))
{
auto g = getGrid();
grid5d_[Ls].reset(SpaceTimeGrid::makeFiveDimGrid(Ls, g));
gridRb5d_[Ls].reset(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, g));
}
}
void Environment::createCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls)
{
int nd = getNd();
std::vector<int> fineDim = getDim(), coarseDim;
unsigned int cLs;
auto key4d = blockSize, key5d = blockSize;
createGrid(Ls);
coarseDim.resize(nd);
for (int d = 0; d < coarseDim.size(); d++)
{
coarseDim[d] = fineDim[d]/blockSize[d];
if (coarseDim[d]*blockSize[d] != fineDim[d])
{
HADRONS_ERROR(Size, "Fine dimension " + std::to_string(d)
+ " (" + std::to_string(fineDim[d])
+ ") not divisible by coarse dimension ("
+ std::to_string(coarseDim[d]) + ")");
}
}
if (blockSize.size() > nd)
{
cLs = Ls/blockSize[nd];
if (cLs*blockSize[nd] != Ls)
{
HADRONS_ERROR(Size, "Fine Ls (" + std::to_string(Ls)
+ ") not divisible by coarse Ls ("
+ std::to_string(cLs) + ")");
}
key4d.resize(nd);
key5d.push_back(Ls);
}
gridCoarse4d_[key4d].reset(
SpaceTimeGrid::makeFourDimGrid(coarseDim,
GridDefaultSimd(nd, vComplex::Nsimd()), GridDefaultMpi()));
if (Ls > 1)
{
gridCoarse5d_[key5d].reset(
SpaceTimeGrid::makeFiveDimGrid(cLs, gridCoarse4d_[key4d].get()));
}
}
GridCartesian * Environment::getGrid(const unsigned int Ls) const
{
try
{
if (Ls == 1)
{
return grid4d_.get();
}
else
{
return grid5d_.at(Ls).get();
}
}
catch(std::out_of_range &)
{
HADRONS_ERROR(Definition, "no grid with Ls= " + std::to_string(Ls));
}
}
GridRedBlackCartesian * Environment::getRbGrid(const unsigned int Ls) const
{
try
{
if (Ls == 1)
{
return gridRb4d_.get();
}
else
{
return gridRb5d_.at(Ls).get();
}
}
catch(std::out_of_range &)
{
HADRONS_ERROR(Definition, "no red-black grid with Ls= " + std::to_string(Ls));
}
}
GridCartesian * Environment::getCoarseGrid(
const std::vector<int> &blockSize, const unsigned int Ls) const
{
auto key = blockSize;
try
{
if (Ls == 1)
{
key.resize(getNd());
return gridCoarse4d_.at(key).get();
}
else
{
key.push_back(Ls);
return gridCoarse5d_.at(key).get();
}
}
catch(std::out_of_range &)
{
HADRONS_ERROR(Definition, "no coarse grid with Ls= " + std::to_string(Ls));
}
}
unsigned int Environment::getNd(void) const
{
return nd_;
@ -76,6 +195,11 @@ double Environment::getVolume(void) const
}
// random number generator /////////////////////////////////////////////////////
void Environment::setSeed(const std::vector<int> &seed)
{
rng4d_->SeedFixedIntegers(seed);
}
GridParallelRNG * Environment::get4dRng(void) const
{
return rng4d_.get();
@ -96,8 +220,7 @@ void Environment::addObject(const std::string name, const int moduleAddress)
}
else
{
HADRONS_ERROR_REF(ObjectDefinition, "object '" + name + "' already exists",
getObjectAddress(name));
HADRONS_ERROR(Definition, "object '" + name + "' already exists");
}
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Environment.hpp
Source file: extras/Hadrons/Environment.hpp
Copyright (C) 2015-2018
@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_Environment_hpp_
#define Hadrons_Environment_hpp_
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE
@ -63,9 +63,6 @@ inline Environment & env(void) const\
return Environment::getInstance();\
}
#define DEFINE_ENV_LAMBDA \
auto env = [](void)->Environment &{return Environment::getInstance();}
class Environment
{
SINGLETON(Environment);
@ -86,33 +83,21 @@ private:
int module{-1};
std::unique_ptr<Object> data{nullptr};
};
typedef std::pair<size_t, unsigned int> FineGridKey;
typedef std::pair<size_t, std::vector<int>> CoarseGridKey;
public:
// grids
template <typename VType = vComplex>
void createGrid(const unsigned int Ls);
template <typename VType = vComplex>
void createCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls);
template <typename VType = vComplex>
GridCartesian * getGrid(void);
template <typename VType = vComplex>
GridRedBlackCartesian * getRbGrid(void);
template <typename VType = vComplex>
GridCartesian * getCoarseGrid(const std::vector<int> &blockSize);
template <typename VType = vComplex>
GridCartesian * getGrid(const unsigned int Ls);
template <typename VType = vComplex>
GridRedBlackCartesian * getRbGrid(const unsigned int Ls);
template <typename VType = vComplex>
const unsigned int Ls = 1);
GridCartesian * getGrid(const unsigned int Ls = 1) const;
GridRedBlackCartesian * getRbGrid(const unsigned int Ls = 1) const;
GridCartesian * getCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls);
const unsigned int Ls = 1) const;
std::vector<int> getDim(void) const;
int getDim(const unsigned int mu) const;
unsigned int getNd(void) const;
double getVolume(void) const;
// random number generator
void setSeed(const std::vector<int> &seed);
GridParallelRNG * get4dRng(void) const;
// general memory management
void addObject(const std::string name,
@ -170,22 +155,22 @@ public:
void printContent(void) const;
private:
// general
double vol_;
bool protect_{true};
double vol_;
bool protect_{true};
// grids
std::vector<int> dim_;
std::map<FineGridKey, GridPt> grid4d_;
std::map<FineGridKey, GridPt> grid5d_;
std::map<FineGridKey, GridRbPt> gridRb4d_;
std::map<FineGridKey, GridRbPt> gridRb5d_;
std::map<CoarseGridKey, GridPt> gridCoarse4d_;
std::map<CoarseGridKey, GridPt> gridCoarse5d_;
unsigned int nd_;
std::vector<int> dim_;
GridPt grid4d_;
std::map<unsigned int, GridPt> grid5d_;
GridRbPt gridRb4d_;
std::map<unsigned int, GridRbPt> gridRb5d_;
std::map<std::vector<int>, GridPt> gridCoarse4d_;
std::map<std::vector<int>, GridPt> gridCoarse5d_;
unsigned int nd_;
// random number generator
RngPt rng4d_;
RngPt rng4d_;
// object store
std::vector<ObjInfo> object_;
std::map<std::string, unsigned int> objectAddress_;
std::vector<ObjInfo> object_;
std::map<std::string, unsigned int> objectAddress_;
};
/******************************************************************************
@ -219,219 +204,6 @@ void Holder<T>::reset(T *pt)
/******************************************************************************
* Environment template implementation *
******************************************************************************/
// grids ///////////////////////////////////////////////////////////////////////
#define HADRONS_DUMP_GRID(...)\
LOG(Debug) << "New grid " << (__VA_ARGS__) << std::endl;\
LOG(Debug) << " - cb : " << (__VA_ARGS__)->_isCheckerBoarded << std::endl;\
LOG(Debug) << " - fdim: " << (__VA_ARGS__)->_fdimensions << std::endl;\
LOG(Debug) << " - gdim: " << (__VA_ARGS__)->_gdimensions << std::endl;\
LOG(Debug) << " - ldim: " << (__VA_ARGS__)->_ldimensions << std::endl;\
LOG(Debug) << " - rdim: " << (__VA_ARGS__)->_rdimensions << std::endl;
template <typename VType>
void Environment::createGrid(const unsigned int Ls)
{
size_t hash = typeHash<VType>();
if (grid4d_.find({hash, 1}) == grid4d_.end())
{
grid4d_[{hash, 1}].reset(
SpaceTimeGrid::makeFourDimGrid(getDim(),
GridDefaultSimd(getNd(), VType::Nsimd()),
GridDefaultMpi()));
HADRONS_DUMP_GRID(grid4d_[{hash, 1}].get());
gridRb4d_[{hash, 1}].reset(
SpaceTimeGrid::makeFourDimRedBlackGrid(grid4d_[{hash, 1}].get()));
HADRONS_DUMP_GRID(gridRb4d_[{hash, 1}].get());
}
if (grid5d_.find({hash, Ls}) == grid5d_.end())
{
auto g = grid4d_[{hash, 1}].get();
grid5d_[{hash, Ls}].reset(SpaceTimeGrid::makeFiveDimGrid(Ls, g));
HADRONS_DUMP_GRID(grid5d_[{hash, Ls}].get());
gridRb5d_[{hash, Ls}].reset(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, g));
HADRONS_DUMP_GRID(gridRb5d_[{hash, Ls}].get());
}
}
template <typename VType>
void Environment::createCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls)
{
int nd = getNd();
std::vector<int> fineDim = getDim(), coarseDim(nd);
unsigned int cLs;
auto key4d = blockSize, key5d = blockSize;
size_t hash = typeHash<VType>();
createGrid(Ls);
for (int d = 0; d < coarseDim.size(); d++)
{
coarseDim[d] = fineDim[d]/blockSize[d];
if (coarseDim[d]*blockSize[d] != fineDim[d])
{
HADRONS_ERROR(Size, "Fine dimension " + std::to_string(d)
+ " (" + std::to_string(fineDim[d])
+ ") not divisible by coarse dimension ("
+ std::to_string(coarseDim[d]) + ")");
}
}
if (blockSize.size() > nd)
{
cLs = Ls/blockSize[nd];
if (cLs*blockSize[nd] != Ls)
{
HADRONS_ERROR(Size, "Fine Ls (" + std::to_string(Ls)
+ ") not divisible by coarse Ls ("
+ std::to_string(cLs) + ")");
}
}
else
{
cLs = Ls;
}
key4d.resize(nd);
key5d.push_back(Ls);
CoarseGridKey hkey4d = {hash, key4d}, hkey5d = {hash, key5d};
if (gridCoarse4d_.find(hkey4d) == gridCoarse4d_.end())
{
gridCoarse4d_[hkey4d].reset(
SpaceTimeGrid::makeFourDimGrid(coarseDim,
GridDefaultSimd(nd, VType::Nsimd()), GridDefaultMpi()));
HADRONS_DUMP_GRID(gridCoarse4d_[hkey4d].get());
}
if (gridCoarse5d_.find(hkey5d) == gridCoarse5d_.end())
{
gridCoarse5d_[hkey5d].reset(
SpaceTimeGrid::makeFiveDimGrid(cLs, gridCoarse4d_[hkey4d].get()));
HADRONS_DUMP_GRID(gridCoarse5d_[hkey5d].get());
}
}
#undef HADRONS_DUMP_GRID
template <typename VType>
GridCartesian * Environment::getGrid(void)
{
FineGridKey key = {typeHash<VType>(), 1};
auto it = grid4d_.find(key);
if (it != grid4d_.end())
{
return it->second.get();
}
else
{
createGrid<VType>(1);
return grid4d_.at(key).get();
}
}
template <typename VType>
GridRedBlackCartesian * Environment::getRbGrid(void)
{
FineGridKey key = {typeHash<VType>(), 1};
auto it = gridRb4d_.find(key);
if (it != gridRb4d_.end())
{
return it->second.get();
}
else
{
createGrid<VType>(1);
return gridRb4d_.at(key).get();
}
}
template <typename VType>
GridCartesian * Environment::getCoarseGrid(const std::vector<int> &blockSize)
{
std::vector<int> s = blockSize;
s.resize(getNd());
CoarseGridKey key = {typeHash<VType>(), s};
auto it = gridCoarse4d_.find(key);
if (it != gridCoarse4d_.end())
{
return it->second.get();
}
else
{
createCoarseGrid<VType>(blockSize, 1);
return gridCoarse4d_.at(key).get();
}
}
template <typename VType>
GridCartesian * Environment::getGrid(const unsigned int Ls)
{
FineGridKey key = {typeHash<VType>(), Ls};
auto it = grid5d_.find(key);
if (it != grid5d_.end())
{
return it->second.get();
}
else
{
createGrid<VType>(Ls);
return grid5d_.at(key).get();
}
}
template <typename VType>
GridRedBlackCartesian * Environment::getRbGrid(const unsigned int Ls)
{
FineGridKey key = {typeHash<VType>(), Ls};
auto it = gridRb5d_.find(key);
if (it != gridRb5d_.end())
{
return it->second.get();
}
else
{
createGrid<VType>(Ls);
return gridRb5d_.at(key).get();
}
}
template <typename VType>
GridCartesian * Environment::getCoarseGrid(const std::vector<int> &blockSize,
const unsigned int Ls)
{
std::vector<int> s = blockSize;
s.push_back(Ls);
CoarseGridKey key = {typeHash<VType>(), s};
auto it = gridCoarse5d_.find(key);
if (it != gridCoarse5d_.end())
{
return it->second.get();
}
else
{
createCoarseGrid<VType>(blockSize, Ls);
return gridCoarse5d_.at(key).get();
}
}
// general memory management ///////////////////////////////////////////////////
template <typename B, typename T, typename ... Ts>
void Environment::createDerivedObject(const std::string name,
@ -459,21 +231,21 @@ void Environment::createDerivedObject(const std::string name,
object_[address].Ls = Ls;
object_[address].data.reset(new Holder<B>(new T(std::forward<Ts>(args)...)));
object_[address].size = MemoryProfiler::stats->maxAllocated - initMem;
object_[address].type = typeIdPt<B>();
object_[address].derivedType = typeIdPt<T>();
object_[address].type = &typeid(B);
object_[address].derivedType = &typeid(T);
if (MemoryProfiler::stats == &memStats)
{
MemoryProfiler::stats = nullptr;
}
}
// object already exists, no error if it is a cache, error otherwise
else if ((object_[address].storage != Storage::cache) or
(object_[address].storage != storage) or
(object_[address].name != name) or
(typeHash(object_[address].type) != typeHash<B>()) or
(typeHash(object_[address].derivedType) != typeHash<T>()))
else if ((object_[address].storage != Storage::cache) or
(object_[address].storage != storage) or
(object_[address].name != name) or
(object_[address].type != &typeid(B)) or
(object_[address].derivedType != &typeid(T)))
{
HADRONS_ERROR_REF(ObjectDefinition, "object '" + name + "' already allocated", address);
HADRONS_ERROR(Definition, "object '" + name + "' already allocated");
}
}
@ -507,31 +279,28 @@ T * Environment::getDerivedObject(const unsigned int address) const
}
else
{
HADRONS_ERROR_REF(ObjectType, "object with address " +
std::to_string(address) +
HADRONS_ERROR(Definition, "object with address " + std::to_string(address) +
" cannot be casted to '" + typeName(&typeid(T)) +
"' (has type '" + typeName(&typeid(h->get())) + "')", address);
"' (has type '" + typeName(&typeid(h->get())) + "')");
}
}
}
else
{
HADRONS_ERROR_REF(ObjectType, "object with address " +
std::to_string(address) +
HADRONS_ERROR(Definition, "object with address " + std::to_string(address) +
" does not have type '" + typeName(&typeid(B)) +
"' (has type '" + getObjectType(address) + "')", address);
"' (has type '" + getObjectType(address) + "')");
}
}
else
{
HADRONS_ERROR_REF(ObjectDefinition, "object with address " +
std::to_string(address) + " is empty", address);
HADRONS_ERROR(Definition, "object with address " + std::to_string(address) +
" is empty");
}
}
else
{
HADRONS_ERROR_REF(ObjectDefinition, "no object with address " +
std::to_string(address), address);
HADRONS_ERROR(Definition, "no object with address " + std::to_string(address));
}
}
@ -569,8 +338,7 @@ bool Environment::isObjectOfType(const unsigned int address) const
}
else
{
HADRONS_ERROR_REF(ObjectDefinition, "no object with address "
+ std::to_string(address), address);
HADRONS_ERROR(Definition, "no object with address " + std::to_string(address));
}
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.cc
Source file: extras/Hadrons/Exceptions.cc
Copyright (C) 2015-2018
@ -26,50 +26,38 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Exceptions.hpp>
#include <Hadrons/VirtualMachine.hpp>
#include <Hadrons/Module.hpp>
#include <Grid/Hadrons/Exceptions.hpp>
#include <Grid/Hadrons/VirtualMachine.hpp>
#include <Grid/Hadrons/Module.hpp>
#ifndef ERR_SUFF
#define ERR_SUFF " (" + loc + ")"
#endif
#define CTOR_EXC(name, init) \
#define CONST_EXC(name, init) \
name::name(std::string msg, std::string loc)\
:init\
{}
#define CTOR_EXC_REF(name, init) \
name::name(std::string msg, std::string loc, const unsigned int address)\
:init\
{}
using namespace Grid;
using namespace Hadrons;
using namespace Exceptions;
// backtrace cache
std::vector<std::string> Grid::Hadrons::Exceptions::backtraceStr;
// logic errors
CTOR_EXC(Logic, logic_error(msg + ERR_SUFF))
CTOR_EXC(Definition, Logic("definition error: " + msg, loc))
CTOR_EXC(Implementation, Logic("implementation error: " + msg, loc))
CTOR_EXC(Range, Logic("range error: " + msg, loc))
CTOR_EXC(Size, Logic("size error: " + msg, loc))
CONST_EXC(Logic, logic_error(msg + ERR_SUFF))
CONST_EXC(Definition, Logic("definition error: " + msg, loc))
CONST_EXC(Implementation, Logic("implementation error: " + msg, loc))
CONST_EXC(Range, Logic("range error: " + msg, loc))
CONST_EXC(Size, Logic("size error: " + msg, loc))
// runtime errors
CTOR_EXC(Runtime, runtime_error(msg + ERR_SUFF))
CTOR_EXC(Argument, Runtime("argument error: " + msg, loc))
CTOR_EXC(Io, Runtime("IO error: " + msg, loc))
CTOR_EXC(Memory, Runtime("memory error: " + msg, loc))
CTOR_EXC(Parsing, Runtime("parsing error: " + msg, loc))
CTOR_EXC(Program, Runtime("program error: " + msg, loc))
CTOR_EXC(System, Runtime("system error: " + msg, loc))
// virtual machine errors
CTOR_EXC_REF(ObjectDefinition, RuntimeRef("object definition error: " + msg, loc, address));
CTOR_EXC_REF(ObjectType, RuntimeRef("object type error: " + msg, loc, address));
CONST_EXC(Runtime, runtime_error(msg + ERR_SUFF))
CONST_EXC(Argument, Runtime("argument error: " + msg, loc))
CONST_EXC(Io, Runtime("IO error: " + msg, loc))
CONST_EXC(Memory, Runtime("memory error: " + msg, loc))
CONST_EXC(Parsing, Runtime("parsing error: " + msg, loc))
CONST_EXC(Program, Runtime("program error: " + msg, loc))
CONST_EXC(System, Runtime("system error: " + msg, loc))
// abort functions
void Grid::Hadrons::Exceptions::abort(const std::exception& e)
@ -86,15 +74,6 @@ void Grid::Hadrons::Exceptions::abort(const std::exception& e)
<< std::endl;
}
LOG(Error) << e.what() << std::endl;
if (!backtraceStr.empty())
{
LOG(Error) << "-- BACKTRACE --------------" << std::endl;
for (auto &s: backtraceStr)
{
LOG(Error) << s << std::endl;
}
LOG(Error) << "---------------------------" << std::endl;
}
LOG(Error) << "Aborting program" << std::endl;
Grid_finalize();

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Exceptions.hpp
Source file: extras/Hadrons/Exceptions.hpp
Copyright (C) 2015-2018
@ -30,41 +30,15 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define Hadrons_Exceptions_hpp_
#include <stdexcept>
#include <execinfo.h>
#ifndef Hadrons_Global_hpp_
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
#endif
#define HADRONS_SRC_LOC std::string(__FUNCTION__) + " at " \
+ std::string(__FILE__) + ":" + std::to_string(__LINE__)
#define HADRONS_BACKTRACE_MAX 128
#ifdef HAVE_EXECINFO_H
#define HADRONS_CACHE_BACKTRACE \
{\
void* _callstack[HADRONS_BACKTRACE_MAX];\
int _i, _frames = backtrace(_callstack, HADRONS_BACKTRACE_MAX);\
char** _strs = backtrace_symbols(_callstack, _frames);\
Grid::Hadrons::Exceptions::backtraceStr.clear();\
for (_i = 0; _i < _frames; ++_i)\
{\
Hadrons::Exceptions::backtraceStr.push_back(std::string(_strs[_i]));\
}\
free(_strs);\
}
#else
#define HADRONS_CACHE_BACKTRACE \
Grid::Hadrons::Exceptions::backtraceStr.clear();\
Grid::Hadrons::Exceptions::backtraceStr.push_back("<backtrace not supported>");
#endif
#define HADRONS_ERROR(exc, msg)\
HADRONS_CACHE_BACKTRACE \
throw(Exceptions::exc(msg, HADRONS_SRC_LOC));
#define HADRONS_ERROR_REF(exc, msg, address)\
HADRONS_CACHE_BACKTRACE \
throw(Exceptions::exc(msg, HADRONS_SRC_LOC, address));
#define DECL_EXC(name, base) \
class name: public base\
{\
@ -72,20 +46,10 @@ public:\
name(std::string msg, std::string loc);\
}
#define DECL_EXC_REF(name, base) \
class name: public base\
{\
public:\
name(std::string msg, std::string loc, const unsigned int address);\
}
BEGIN_HADRONS_NAMESPACE
namespace Exceptions
{
// backtrace cache
extern std::vector<std::string> backtraceStr;
// logic errors
DECL_EXC(Logic, std::logic_error);
DECL_EXC(Definition, Logic);
@ -102,24 +66,6 @@ namespace Exceptions
DECL_EXC(Program, Runtime);
DECL_EXC(System, Runtime);
// virtual machine errors
class RuntimeRef: public Runtime
{
public:
RuntimeRef(std::string msg, std::string loc, const unsigned int address)
: Runtime(msg, loc), address_(address)
{}
unsigned int getAddress(void) const
{
return address_;
}
private:
unsigned int address_;
};
DECL_EXC_REF(ObjectDefinition, RuntimeRef);
DECL_EXC_REF(ObjectType, RuntimeRef);
// abort functions
void abort(const std::exception& e);
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Factory.hpp
Source file: extras/Hadrons/Factory.hpp
Copyright (C) 2015-2018
@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_Factory_hpp_
#define Hadrons_Factory_hpp_
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/GeneticScheduler.hpp
Source file: extras/Hadrons/GeneticScheduler.hpp
Copyright (C) 2015-2018
@ -29,8 +29,8 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_GeneticScheduler_hpp_
#define Hadrons_GeneticScheduler_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Graph.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Graph.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.cc
Source file: extras/Hadrons/Global.cc
Copyright (C) 2015-2018
@ -26,7 +26,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
using namespace Grid;
using namespace QCD;
@ -72,11 +72,6 @@ void Hadrons::initLogger(void)
}
// type utilities //////////////////////////////////////////////////////////////
size_t Hadrons::typeHash(const std::type_info *info)
{
return info->hash_code();
}
constexpr unsigned int maxNameSize = 1024u;
std::string Hadrons::typeName(const std::type_info *info)
@ -178,31 +173,3 @@ void Hadrons::makeFileDir(const std::string filename, GridBase *g)
}
}
}
void Hadrons::printTimeProfile(const std::map<std::string, GridTime> &timing,
GridTime total)
{
typedef decltype(total.count()) Count;
std::map<Count, std::string, std::greater<Count>> rtiming;
const double dtotal = static_cast<double>(total.count());
auto cf = std::cout.flags();
auto p = std::cout.precision();
unsigned int width = 0;
for (auto &t: timing)
{
width = std::max(width, static_cast<unsigned int>(t.first.length()));
rtiming[t.second.count()] = t.first;
}
for (auto &rt: rtiming)
{
LOG(Message) << std::setw(width) << rt.second << ": "
<< rt.first << " us (" << std::fixed
<< std::setprecision(1)
<< static_cast<double>(rt.first)/dtotal*100 << "%)"
<< std::endl;
}
std::cout.flags(cf);
std::cout.precision(p);
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Global.hpp
Source file: extras/Hadrons/Global.hpp
Copyright (C) 2015-2018
@ -62,56 +62,35 @@ using Grid::operator>>;
#define END_MODULE_NAMESPACE }
#define _HADRONS_IMPL(impl, sub) impl##sub
#define HADRONS_IMPL(impl, sub) _HADRONS_IMPL(impl, sub)
#ifndef FIMPLBASE
#define FIMPLBASE WilsonImpl
#ifndef FIMPL
#define FIMPL WilsonImplR
#endif
#define FIMPL HADRONS_IMPL(FIMPLBASE, R)
#define FIMPLF HADRONS_IMPL(FIMPLBASE, F)
#define FIMPLD HADRONS_IMPL(FIMPLBASE, D)
#ifndef ZFIMPLBASE
#define ZFIMPLBASE ZWilsonImpl
#ifndef ZFIMPL
#define ZFIMPL ZWilsonImplR
#endif
#define ZFIMPL HADRONS_IMPL(ZFIMPLBASE, R)
#define ZFIMPLF HADRONS_IMPL(ZFIMPLBASE, F)
#define ZFIMPLD HADRONS_IMPL(ZFIMPLBASE, D)
#ifndef SIMPLBASE
#define SIMPLBASE ScalarImplC
#ifndef SIMPL
#define SIMPL ScalarImplCR
#endif
#define SIMPL HADRONS_IMPL(SIMPLBASE, R)
#define SIMPLF HADRONS_IMPL(SIMPLBASE, F)
#define SIMPLD HADRONS_IMPL(SIMPLBASE, D)
#ifndef GIMPLBASE
#define GIMPLBASE PeriodicGimpl
#ifndef GIMPL
#define GIMPL PeriodicGimplR
#endif
#define GIMPL HADRONS_IMPL(GIMPLBASE, R)
#define GIMPLF HADRONS_IMPL(GIMPLBASE, F)
#define GIMPLD HADRONS_IMPL(GIMPLBASE, D)
BEGIN_HADRONS_NAMESPACE
// type aliases
#define BASIC_TYPE_ALIASES(Impl, suffix)\
typedef typename Impl::Field ScalarField##suffix;\
typedef typename Impl::PropagatorField PropagatorField##suffix;\
typedef typename Impl::SitePropagator::scalar_object SitePropagator##suffix;\
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;
#define FERM_TYPE_ALIASES(FImpl, suffix)\
BASIC_TYPE_ALIASES(FImpl, suffix);\
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef typename FImpl::ComplexField ComplexField##suffix;
typedef FermionOperator<FImpl> FMat##suffix; \
typedef typename FImpl::FermionField FermionField##suffix; \
typedef typename FImpl::PropagatorField PropagatorField##suffix; \
typedef typename FImpl::SitePropagator::scalar_object SitePropagator##suffix; \
typedef std::vector<SitePropagator##suffix> SlicedPropagator##suffix;
#define GAUGE_TYPE_ALIASES(GImpl, suffix)\
typedef typename GImpl::GaugeField GaugeField##suffix;
#define GAUGE_TYPE_ALIASES(FImpl, suffix)\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;
#define SCALAR_TYPE_ALIASES(SImpl, suffix)\
typedef typename SImpl::Field ScalarField##suffix;\
typedef typename SImpl::Field PropagatorField##suffix;
#define SOLVER_TYPE_ALIASES(FImpl, suffix)\
typedef Solver<FImpl> Solver##suffix;
@ -120,6 +99,10 @@ typedef Solver<FImpl> Solver##suffix;
typedef std::function<SlicedPropagator##suffix\
(const PropagatorField##suffix &)> SinkFn##suffix;
#define FG_TYPE_ALIASES(FImpl, suffix)\
FERM_TYPE_ALIASES(FImpl, suffix)\
GAUGE_TYPE_ALIASES(FImpl, suffix)
// logger
class HadronsLogger: public Logger
{
@ -172,28 +155,14 @@ const std::type_info * typeIdPt(const T &x)
return &typeid(x);
}
std::string typeName(const std::type_info *info);
template <typename T>
const std::type_info * typeIdPt(void)
{
return &typeid(T);
}
size_t typeHash(const std::type_info *info);
template <typename T>
size_t typeHash(const T &x)
{
return typeHash(typeIdPt(x));
}
template <typename T>
size_t typeHash(void)
{
return typeHash(typeIdPt<T>());
}
std::string typeName(const std::type_info *info);
template <typename T>
std::string typeName(const T &x)
{
@ -237,19 +206,13 @@ void makeFileDir(const std::string filename, GridBase *g);
#define _HADRONS_SCHUR_SOLVE_(conv) SchurRedBlack##conv##Solve
#define HADRONS_SCHUR_SOLVE(conv) _HADRONS_SCHUR_SOLVE_(conv)
#define HADRONS_DEFAULT_SCHUR_SOLVE HADRONS_SCHUR_SOLVE(HADRONS_DEFAULT_SCHUR)
#define _HADRONS_SCHUR_A2A_(conv) A2AVectorsSchur##conv
#define HADRONS_SCHUR_A2A(conv) _HADRONS_SCHUR_A2A_(conv)
#define HADRONS_DEFAULT_SCHUR_A2A HADRONS_SCHUR_A2A(HADRONS_DEFAULT_SCHUR)
// stringify macro
#define _HADRONS_STR(x) #x
#define HADRONS_STR(x) _HADRONS_STR(x)
// pretty print time profile
void printTimeProfile(const std::map<std::string, GridTime> &timing, GridTime total);
END_HADRONS_NAMESPACE
#include <Hadrons/Exceptions.hpp>
#include <Grid/Hadrons/Exceptions.hpp>
#endif // Hadrons_Global_hpp_

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Graph.hpp
Source file: extras/Hadrons/Graph.hpp
Copyright (C) 2015-2018
@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_Graph_hpp_
#define Hadrons_Graph_hpp_
#include <Hadrons/Global.hpp>
#include <Grid/Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/HadronsXmlRun.cc
Source file: extras/Hadrons/HadronsXmlRun.cc
Copyright (C) 2015-2018
@ -26,7 +26,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Application.hpp>
#include <Grid/Hadrons/Application.hpp>
using namespace Grid;
using namespace QCD;

View File

@ -1,6 +1,5 @@
SUBDIRS = . Utilities
lib_LIBRARIES = libHadrons.a
bin_PROGRAMS = HadronsXmlRun
include modules.inc
@ -12,14 +11,12 @@ libHadrons_a_SOURCES = \
Global.cc \
Module.cc \
VirtualMachine.cc
libHadrons_adir = $(includedir)/Hadrons
libHadrons_adir = $(pkgincludedir)/Hadrons
nobase_libHadrons_a_HEADERS = \
$(modules_hpp) \
A2AVectors.hpp \
A2AMatrix.hpp \
AllToAllVectors.hpp \
AllToAllReduction.hpp \
Application.hpp \
DilutedNoise.hpp \
DiskVector.hpp \
EigenPack.hpp \
Environment.hpp \
Exceptions.hpp \
@ -32,3 +29,6 @@ nobase_libHadrons_a_HEADERS = \
ModuleFactory.hpp \
Solver.hpp \
VirtualMachine.hpp
HadronsXmlRun_SOURCES = HadronsXmlRun.cc
HadronsXmlRun_LDADD = libHadrons.a -lGrid

View File

@ -2,12 +2,11 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AMesonField.cc
Source file: extras/Hadrons/Module.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -26,11 +25,37 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Grid/Hadrons/Module.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TA2AMesonField<FIMPL>;
template class Grid::Hadrons::MContraction::TA2AMesonField<ZFIMPL>;
/******************************************************************************
* ModuleBase implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
ModuleBase::ModuleBase(const std::string name)
: name_(name)
{}
// access //////////////////////////////////////////////////////////////////////
std::string ModuleBase::getName(void) const
{
return name_;
}
// get factory registration name if available
std::string ModuleBase::getRegisteredName(void)
{
HADRONS_ERROR(Definition, "module '" + getName() + "' has no registered type"
+ " in the factory");
}
// execution ///////////////////////////////////////////////////////////////////
void ModuleBase::operator()(void)
{
setup();
execute();
}

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Module.hpp
Source file: extras/Hadrons/Module.hpp
Copyright (C) 2015-2018
@ -29,8 +29,8 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_Module_hpp_
#define Hadrons_Module_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/VirtualMachine.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/VirtualMachine.hpp>
BEGIN_HADRONS_NAMESPACE
@ -65,26 +65,7 @@ extern template class base;\
MODULE_REGISTER(mod, ARG(base), ns);
#define ARG(...) __VA_ARGS__
#define HADRONS_MACRO_REDIRECT_12(arg1, arg2, macro, ...) macro
#define HADRONS_MACRO_REDIRECT_23(arg1, arg2, arg3, macro, ...) macro
#define envGetGrid4(latticeType)\
env().template getGrid<typename latticeType::vector_type>()
#define envGetGrid5(latticeType, Ls)\
env().template getGrid<typename latticeType::vector_type>(Ls)
#define envGetGrid(...)\
HADRONS_MACRO_REDIRECT_12(__VA_ARGS__, envGetGrid5, envGetGrid4)(__VA_ARGS__)
#define envGetRbGrid4(latticeType)\
env().template getRbGrid<typename latticeType::vector_type>()
#define envGetRbGrid5(latticeType, Ls)\
env().template getRbGrid<typename latticeType::vector_type>(Ls)
#define envGetRbGrid(...)\
HADRONS_MACRO_REDIRECT_12(__VA_ARGS__, envGetRbGrid5, envGetRbGrid4)(__VA_ARGS__)
#define MACRO_REDIRECT(arg1, arg2, arg3, macro, ...) macro
#define envGet(type, name)\
*env().template getObject<type>(name)
@ -105,38 +86,38 @@ env().template createObject<type>(name, Environment::Storage::object, Ls, __VA_A
env().template createDerivedObject<base, type>(name, Environment::Storage::object, Ls, __VA_ARGS__)
#define envCreateLat4(type, name)\
envCreate(type, name, 1, envGetGrid(type))
envCreate(type, name, 1, env().getGrid())
#define envCreateLat5(type, name, Ls)\
envCreate(type, name, Ls, envGetGrid(type, Ls))
envCreate(type, name, Ls, env().getGrid(Ls))
#define envCreateLat(...)\
HADRONS_MACRO_REDIRECT_23(__VA_ARGS__, envCreateLat5, envCreateLat4)(__VA_ARGS__)
MACRO_REDIRECT(__VA_ARGS__, envCreateLat5, envCreateLat4)(__VA_ARGS__)
#define envCache(type, name, Ls, ...)\
env().template createObject<type>(name, Environment::Storage::cache, Ls, __VA_ARGS__)
#define envCacheLat4(type, name)\
envCache(type, name, 1, envGetGrid(type))
envCache(type, name, 1, env().getGrid())
#define envCacheLat5(type, name, Ls)\
envCache(type, name, Ls, envGetGrid(type, Ls))
envCache(type, name, Ls, env().getGrid(Ls))
#define envCacheLat(...)\
HADRONS_MACRO_REDIRECT_23(__VA_ARGS__, envCacheLat5, envCacheLat4)(__VA_ARGS__)
MACRO_REDIRECT(__VA_ARGS__, envCacheLat5, envCacheLat4)(__VA_ARGS__)
#define envTmp(type, name, Ls, ...)\
env().template createObject<type>(getName() + "_tmp_" + name, \
Environment::Storage::temporary, Ls, __VA_ARGS__)
#define envTmpLat4(type, name)\
envTmp(type, name, 1, envGetGrid(type))
envTmp(type, name, 1, env().getGrid())
#define envTmpLat5(type, name, Ls)\
envTmp(type, name, Ls, envGetGrid(type, Ls))
envTmp(type, name, Ls, env().getGrid(Ls))
#define envTmpLat(...)\
HADRONS_MACRO_REDIRECT_23(__VA_ARGS__, envTmpLat5, envTmpLat4)(__VA_ARGS__)
MACRO_REDIRECT(__VA_ARGS__, envTmpLat5, envTmpLat4)(__VA_ARGS__)
#define saveResult(ioStem, name, result)\
if (env().getGrid()->IsBoss() and !ioStem.empty())\
@ -180,28 +161,13 @@ public:
virtual void execute(void) = 0;
// execution
void operator()(void);
// timers
void startTimer(const std::string &name);
GridTime getTimer(const std::string &name);
double getDTimer(const std::string &name);
void startCurrentTimer(const std::string &name);
void stopTimer(const std::string &name);
void stopCurrentTimer(void);
void stopAllTimers(void);
void resetTimers(void);
std::map<std::string, GridTime> getTimings(void);
protected:
// environment shortcut
DEFINE_ENV_ALIAS;
// virtual machine shortcut
DEFINE_VM_ALIAS;
// RNG seeded from module string
GridParallelRNG &rng4d(void);
private:
std::string makeSeedString(void);
private:
std::string name_, currentTimer_, seed_;
std::map<std::string, GridStopWatch> timer_;
std::string name_;
};
// derived class, templating the parameter class

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/ModuleFactory.hpp
Source file: extras/Hadrons/ModuleFactory.hpp
Copyright (C) 2015-2018
@ -29,9 +29,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_ModuleFactory_hpp_
#define Hadrons_ModuleFactory_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Factory.hpp>
#include <Hadrons/Module.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Factory.hpp>
#include <Grid/Hadrons/Module.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -0,0 +1,62 @@
#include <Grid/Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/ShiftProbe.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Grid/Hadrons/Modules/MScalarSUN/TrPhi.hpp>
#include <Grid/Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Grid/Hadrons/Modules/MScalar/Scalar.hpp>
#include <Grid/Hadrons/Modules/MScalar/ScalarVP.hpp>
#include <Grid/Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Grid/Hadrons/Modules/MScalar/VPCounterTerms.hpp>
#include <Grid/Hadrons/Modules/MLoop/NoiseLoop.hpp>
#include <Grid/Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Grid/Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Grid/Hadrons/Modules/MIO/LoadBinary.hpp>
#include <Grid/Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Grid/Hadrons/Modules/MSink/Smear.hpp>
#include <Grid/Hadrons/Modules/MSink/Point.hpp>
#include <Grid/Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Grid/Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Grid/Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Grid/Hadrons/Modules/MGauge/Random.hpp>
#include <Grid/Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Grid/Hadrons/Modules/MGauge/Unit.hpp>
#include <Grid/Hadrons/Modules/MGauge/StochEm.hpp>
#include <Grid/Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Grid/Hadrons/Modules/MUtilities/TestSeqGamma.hpp>
#include <Grid/Hadrons/Modules/MUtilities/TestSeqConserved.hpp>
#include <Grid/Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Grid/Hadrons/Modules/MSource/Z2.hpp>
#include <Grid/Hadrons/Modules/MSource/Wall.hpp>
#include <Grid/Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Grid/Hadrons/Modules/MSource/Point.hpp>
#include <Grid/Hadrons/Modules/MContraction/MesonFieldGamma.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
#include <Grid/Hadrons/Modules/MContraction/Baryon.hpp>
#include <Grid/Hadrons/Modules/MContraction/A2APionField.hpp>
#include <Grid/Hadrons/Modules/MContraction/Meson.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakNeutral4ptDisc.hpp>
#include <Grid/Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Grid/Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp>
#include <Grid/Hadrons/Modules/MContraction/A2AMeson.hpp>
#include <Grid/Hadrons/Modules/MContraction/WardIdentity.hpp>
#include <Grid/Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Grid/Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Grid/Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Grid/Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Grid/Hadrons/Modules/MAction/Wilson.hpp>
#include <Grid/Hadrons/Modules/MAction/DWF.hpp>
#include <Grid/Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Grid/Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Grid/Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Grid/Hadrons/Modules/MSolver/A2AVectors.hpp>

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/DWF.cc
Source file: extras/Hadrons/Modules/MAction/DWF.cc
Copyright (C) 2015-2018
@ -25,11 +25,11 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Grid/Hadrons/Modules/MAction/DWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TDWF<FIMPL>;
template class Grid::Hadrons::MAction::TDWF<FIMPLF>;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/DWF.hpp
Source file: extras/Hadrons/Modules/MAction/DWF.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MAction_DWF_hpp_
#define Hadrons_MAction_DWF_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -56,7 +56,7 @@ template <typename FImpl>
class TDWF: public Module<DWFPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TDWF(const std::string name);
@ -72,8 +72,8 @@ protected:
virtual void execute(void);
};
extern template class TDWF<FIMPL>;
MODULE_REGISTER_TMP(DWF, TDWF<FIMPL>, MAction);
MODULE_REGISTER_TMP(DWFF, TDWF<FIMPLF>, MAction);
/******************************************************************************
* DWF template implementation *
@ -112,11 +112,12 @@ void TDWF<FImpl>::setup(void)
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
auto &grb4 = *envGetRbGrid(FermionField);
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
env().createGrid(par().Ls);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &g4 = *env().getGrid();
auto &grb4 = *env().getRbGrid();
auto &g5 = *env().getGrid(par().Ls);
auto &grb5 = *env().getRbGrid(par().Ls);
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename DomainWallFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, DomainWallFermion<FImpl>, getName(), par().Ls, U, g5,

View File

@ -0,0 +1,7 @@
#include <Grid/Hadrons/Modules/MAction/MobiusDWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TMobiusDWF<FIMPL>;

View File

@ -1,36 +1,9 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/MobiusDWF.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MAction_MobiusDWF_hpp_
#define Hadrons_MAction_MobiusDWF_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -56,7 +29,7 @@ template <typename FImpl>
class TMobiusDWF: public Module<MobiusDWFPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TMobiusDWF(const std::string name);
@ -72,7 +45,6 @@ public:
};
MODULE_REGISTER_TMP(MobiusDWF, TMobiusDWF<FIMPL>, MAction);
MODULE_REGISTER_TMP(MobiusDWFF, TMobiusDWF<FIMPLF>, MAction);
/******************************************************************************
* TMobiusDWF implementation *
@ -112,11 +84,12 @@ void TMobiusDWF<FImpl>::setup(void)
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
auto &grb4 = *envGetRbGrid(FermionField);
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
env().createGrid(par().Ls);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &g4 = *env().getGrid();
auto &grb4 = *env().getRbGrid();
auto &g5 = *env().getGrid(par().Ls);
auto &grb5 = *env().getRbGrid(par().Ls);
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename MobiusFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, MobiusFermion<FImpl>, getName(), par().Ls, U, g5,

View File

@ -0,0 +1,7 @@
#include <Grid/Hadrons/Modules/MAction/ScaledDWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TScaledDWF<FIMPL>;

View File

@ -1,36 +1,9 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ScaledDWF.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MAction_ScaledDWF_hpp_
#define Hadrons_MAction_ScaledDWF_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -55,7 +28,7 @@ template <typename FImpl>
class TScaledDWF: public Module<ScaledDWFPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TScaledDWF(const std::string name);
@ -71,7 +44,6 @@ public:
};
MODULE_REGISTER_TMP(ScaledDWF, TScaledDWF<FIMPL>, MAction);
MODULE_REGISTER_TMP(ScaledDWFF, TScaledDWF<FIMPLF>, MAction);
/******************************************************************************
* TScaledDWF implementation *
@ -111,11 +83,12 @@ void TScaledDWF<FImpl>::setup(void)
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
auto &grb4 = *envGetRbGrid(FermionField);
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
env().createGrid(par().Ls);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &g4 = *env().getGrid();
auto &grb4 = *env().getRbGrid();
auto &g5 = *env().getGrid(par().Ls);
auto &grb5 = *env().getRbGrid(par().Ls);
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename MobiusFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, ScaledShamirFermion<FImpl>, getName(), par().Ls, U, g5,

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/Wilson.cc
Source file: extras/Hadrons/Modules/MAction/Wilson.cc
Copyright (C) 2015-2018
@ -25,11 +25,11 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Grid/Hadrons/Modules/MAction/Wilson.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TWilson<FIMPL>;
template class Grid::Hadrons::MAction::TWilson<FIMPLF>;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/Wilson.hpp
Source file: extras/Hadrons/Modules/MAction/Wilson.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MAction_Wilson_hpp_
#define Hadrons_MAction_Wilson_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -54,7 +54,7 @@ template <typename FImpl>
class TWilson: public Module<WilsonPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TWilson(const std::string name);
@ -71,7 +71,6 @@ protected:
};
MODULE_REGISTER_TMP(Wilson, TWilson<FIMPL>, MAction);
MODULE_REGISTER_TMP(WilsonF, TWilson<FIMPLF>, MAction);
/******************************************************************************
* TWilson template implementation *
@ -108,9 +107,9 @@ void TWilson<FImpl>::setup(void)
LOG(Message) << "Fermion boundary conditions: " << par().boundary
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &grid = *envGetGrid(FermionField);
auto &gridRb = *envGetRbGrid(FermionField);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &grid = *env().getGrid();
auto &gridRb = *env().getRbGrid();
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename WilsonFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, WilsonFermion<FImpl>, getName(), 1, U, grid, gridRb,

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/WilsonClover.cc
Source file: extras/Hadrons/Modules/MAction/WilsonClover.cc
Copyright (C) 2015-2018
@ -25,11 +25,11 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Grid/Hadrons/Modules/MAction/WilsonClover.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TWilsonClover<FIMPL>;
template class Grid::Hadrons::MAction::TWilsonClover<FIMPLF>;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/WilsonClover.hpp
Source file: extras/Hadrons/Modules/MAction/WilsonClover.hpp
Copyright (C) 2015-2018
@ -31,9 +31,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MAction_WilsonClover_hpp_
#define Hadrons_MAction_WilsonClover_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -59,7 +59,7 @@ template <typename FImpl>
class TWilsonClover: public Module<WilsonCloverPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TWilsonClover(const std::string name);
@ -75,10 +75,9 @@ public:
};
MODULE_REGISTER_TMP(WilsonClover, TWilsonClover<FIMPL>, MAction);
MODULE_REGISTER_TMP(WilsonCloverF, TWilsonClover<FIMPLF>, MAction);
/******************************************************************************
* TWilsonClover template implementation *
* TWilsonClover template implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
@ -114,14 +113,16 @@ void TWilsonClover<FImpl>::setup(void)
LOG(Message) << "Clover term csw_r: " << par().csw_r
<< " csw_t: " << par().csw_t
<< std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &grid = *envGetGrid(FermionField);
auto &gridRb = *envGetRbGrid(FermionField);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &grid = *env().getGrid();
auto &gridRb = *env().getRbGrid();
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename WilsonCloverFermion<FImpl>::ImplParams implParams(boundary);
envCreateDerived(FMat, WilsonCloverFermion<FImpl>, getName(), 1, U, grid,
gridRb, par().mass, par().csw_r, par().csw_t,
par().clover_anisotropy, implParams);
envCreateDerived(FMat, WilsonCloverFermion<FImpl>, getName(), 1, U, grid, gridRb, par().mass,
par().csw_r,
par().csw_t,
par().clover_anisotropy,
implParams);
}
// execution ///////////////////////////////////////////////////////////////////

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ZMobiusDWF.cc
Source file: extras/Hadrons/Modules/MAction/ZMobiusDWF.cc
Copyright (C) 2015-2018
@ -25,11 +25,11 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Grid/Hadrons/Modules/MAction/ZMobiusDWF.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TZMobiusDWF<ZFIMPL>;
template class Grid::Hadrons::MAction::TZMobiusDWF<ZFIMPLF>;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MAction/ZMobiusDWF.hpp
Source file: extras/Hadrons/Modules/MAction/ZMobiusDWF.hpp
Copyright (C) 2015-2018
@ -28,9 +28,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MAction_ZMobiusDWF_hpp_
#define Hadrons_MAction_ZMobiusDWF_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -57,7 +57,7 @@ template <typename FImpl>
class TZMobiusDWF: public Module<ZMobiusDWFPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
FG_TYPE_ALIASES(FImpl,);
public:
// constructor
TZMobiusDWF(const std::string name);
@ -73,7 +73,6 @@ public:
};
MODULE_REGISTER_TMP(ZMobiusDWF, TZMobiusDWF<ZFIMPL>, MAction);
MODULE_REGISTER_TMP(ZMobiusDWFF, TZMobiusDWF<ZFIMPLF>, MAction);
/******************************************************************************
* TZMobiusDWF implementation *
@ -119,11 +118,11 @@ void TZMobiusDWF<FImpl>::setup(void)
<< std::endl;
env().createGrid(par().Ls);
auto &U = envGet(GaugeField, par().gauge);
auto &g4 = *envGetGrid(FermionField);
auto &grb4 = *envGetRbGrid(FermionField);
auto &g5 = *envGetGrid(FermionField, par().Ls);
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
auto &U = envGet(LatticeGaugeField, par().gauge);
auto &g4 = *env().getGrid();
auto &grb4 = *env().getRbGrid();
auto &g5 = *env().getGrid(par().Ls);
auto &grb5 = *env().getRbGrid(par().Ls);
auto omega = par().omega;
std::vector<Complex> boundary = strToVec<Complex>(par().boundary);
typename ZMobiusFermion<FImpl>::ImplParams implParams(boundary);

View File

@ -0,0 +1,8 @@
#include <Grid/Hadrons/Modules/MContraction/A2AMeson.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TA2AMeson<FIMPL>;
template class Grid::Hadrons::MContraction::TA2AMeson<ZFIMPL>;

View File

@ -0,0 +1,207 @@
#ifndef Hadrons_MContraction_A2AMeson_hpp_
#define Hadrons_MContraction_A2AMeson_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/AllToAllVectors.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* A2AMeson *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
typedef std::pair<Gamma::Algebra, Gamma::Algebra> GammaPair;
class A2AMesonPar : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonPar,
int, Nl,
int, N,
std::string, A2A1,
std::string, A2A2,
std::string, gammas,
std::string, output);
};
template <typename FImpl>
class TA2AMeson : public Module<A2AMesonPar>
{
public:
FERM_TYPE_ALIASES(FImpl, );
SOLVER_TYPE_ALIASES(FImpl, );
typedef A2AModesSchurDiagTwo<typename FImpl::FermionField, FMat, Solver> A2ABase;
class Result : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
Gamma::Algebra, gamma_snk,
Gamma::Algebra, gamma_src,
std::vector<Complex>, corr);
};
public:
// constructor
TA2AMeson(const std::string name);
// destructor
virtual ~TA2AMeson(void){};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
virtual void parseGammaString(std::vector<GammaPair> &gammaList);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER(A2AMeson, ARG(TA2AMeson<FIMPL>), MContraction);
MODULE_REGISTER(ZA2AMeson, ARG(TA2AMeson<ZFIMPL>), MContraction);
/******************************************************************************
* TA2AMeson implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TA2AMeson<FImpl>::TA2AMeson(const std::string name)
: Module<A2AMesonPar>(name)
{
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TA2AMeson<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().A2A1 + "_class", par().A2A2 + "_class"};
in.push_back(par().A2A1 + "_w_high_4d");
in.push_back(par().A2A2 + "_v_high_4d");
return in;
}
template <typename FImpl>
std::vector<std::string> TA2AMeson<FImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
template <typename FImpl>
void TA2AMeson<FImpl>::parseGammaString(std::vector<GammaPair> &gammaList)
{
gammaList.clear();
// Parse individual contractions from input string.
gammaList = strToVec<GammaPair>(par().gammas);
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMeson<FImpl>::setup(void)
{
int nt = env().getDim(Tp);
int N = par().N;
int Ls_ = env().getObjectLs(par().A2A1 + "_class");
envTmp(std::vector<FermionField>, "w1", 1, N, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "v1", 1, N, FermionField(env().getGrid(1)));
envTmpLat(FermionField, "tmpv_5d", Ls_);
envTmpLat(FermionField, "tmpw_5d", Ls_);
envTmp(std::vector<ComplexD>, "MF_x", 1, nt);
envTmp(std::vector<ComplexD>, "MF_y", 1, nt);
envTmp(std::vector<ComplexD>, "tmp", 1, nt);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMeson<FImpl>::execute(void)
{
LOG(Message) << "Computing A2A meson contractions" << std::endl;
Result result;
Gamma g5(Gamma::Algebra::Gamma5);
std::vector<GammaPair> gammaList;
int nt = env().getDim(Tp);
parseGammaString(gammaList);
result.gamma_snk = gammaList[0].first;
result.gamma_src = gammaList[0].second;
result.corr.resize(nt);
int Nl = par().Nl;
int N = par().N;
LOG(Message) << "N for A2A cont: " << N << std::endl;
envGetTmp(std::vector<ComplexD>, MF_x);
envGetTmp(std::vector<ComplexD>, MF_y);
envGetTmp(std::vector<ComplexD>, tmp);
for (unsigned int t = 0; t < nt; ++t)
{
tmp[t] = TensorRemove(MF_x[t] * MF_y[t] * 0.0);
}
Gamma gSnk(gammaList[0].first);
Gamma gSrc(gammaList[0].second);
auto &a2a1_fn = envGet(A2ABase, par().A2A1 + "_class");
envGetTmp(std::vector<FermionField>, w1);
envGetTmp(std::vector<FermionField>, v1);
envGetTmp(FermionField, tmpv_5d);
envGetTmp(FermionField, tmpw_5d);
LOG(Message) << "Finding v and w vectors for N = " << N << std::endl;
for (int i = 0; i < N; i++)
{
a2a1_fn.return_v(i, tmpv_5d, v1[i]);
a2a1_fn.return_w(i, tmpw_5d, w1[i]);
}
LOG(Message) << "Found v and w vectors for N = " << N << std::endl;
for (unsigned int i = 0; i < N; i++)
{
v1[i] = gSnk * v1[i];
}
int ty;
for (unsigned int i = 0; i < N; i++)
{
for (unsigned int j = 0; j < N; j++)
{
mySliceInnerProductVector(MF_x, w1[i], v1[j], Tp);
mySliceInnerProductVector(MF_y, w1[j], v1[i], Tp);
for (unsigned int t = 0; t < nt; ++t)
{
for (unsigned int tx = 0; tx < nt; tx++)
{
ty = (tx + t) % nt;
tmp[t] += TensorRemove((MF_x[tx]) * (MF_y[ty]));
}
}
}
if (i % 10 == 0)
{
LOG(Message) << "MF for i = " << i << " of " << N << std::endl;
}
}
double NTinv = 1.0 / static_cast<double>(nt);
for (unsigned int t = 0; t < nt; ++t)
{
result.corr[t] = NTinv * tmp[t];
}
saveResult(par().output, "meson", result);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_A2AMeson_hpp_

View File

@ -0,0 +1,8 @@
#include <Grid/Hadrons/Modules/MContraction/A2AMesonField.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TA2AMesonField<FIMPL>;
template class Grid::Hadrons::MContraction::TA2AMesonField<ZFIMPL>;

View File

@ -0,0 +1,279 @@
#ifndef Hadrons_MContraction_A2AMesonField_hpp_
#define Hadrons_MContraction_A2AMesonField_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/AllToAllVectors.hpp>
#include <Grid/Hadrons/Modules/MContraction/A2Autils.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* A2AMesonField *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
typedef std::pair<Gamma::Algebra, Gamma::Algebra> GammaPair;
class A2AMesonFieldPar : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonFieldPar,
int, cacheBlock,
int, schurBlock,
int, Nmom,
int, N,
int, Nl,
std::string, A2A,
std::string, output);
};
template <typename FImpl>
class TA2AMesonField : public Module<A2AMesonFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl, );
SOLVER_TYPE_ALIASES(FImpl, );
typedef A2AModesSchurDiagTwo<typename FImpl::FermionField, FMat, Solver> A2ABase;
public:
// constructor
TA2AMesonField(const std::string name);
// destructor
virtual ~TA2AMesonField(void){};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER(A2AMesonField, ARG(TA2AMesonField<FIMPL>), MContraction);
MODULE_REGISTER(ZA2AMesonField, ARG(TA2AMesonField<ZFIMPL>), MContraction);
/******************************************************************************
* TA2AMesonField implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TA2AMesonField<FImpl>::TA2AMesonField(const std::string name)
: Module<A2AMesonFieldPar>(name)
{
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TA2AMesonField<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().A2A + "_class"};
in.push_back(par().A2A + "_w_high_4d");
in.push_back(par().A2A + "_v_high_4d");
return in;
}
template <typename FImpl>
std::vector<std::string> TA2AMesonField<FImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMesonField<FImpl>::setup(void)
{
auto &a2a = envGet(A2ABase, par().A2A + "_class");
int nt = env().getDim(Tp);
int Nl = par().Nl;
int N = par().N;
int Ls_ = env().getObjectLs(par().A2A + "_class");
// Four D fields
envTmp(std::vector<FermionField>, "w", 1, par().schurBlock, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "v", 1, par().schurBlock, FermionField(env().getGrid(1)));
// 5D tmp
envTmpLat(FermionField, "tmp_5d", Ls_);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMesonField<FImpl>::execute(void)
{
LOG(Message) << "Computing A2A meson field" << std::endl;
auto &a2a = envGet(A2ABase, par().A2A + "_class");
// 2+6+4+4 = 16 gammas
// Ordering defined here
std::vector<Gamma::Algebra> gammas ( {
Gamma::Algebra::Gamma5,
Gamma::Algebra::Identity,
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaXGamma5,
Gamma::Algebra::GammaYGamma5,
Gamma::Algebra::GammaZGamma5,
Gamma::Algebra::GammaTGamma5,
Gamma::Algebra::SigmaXY,
Gamma::Algebra::SigmaXZ,
Gamma::Algebra::SigmaXT,
Gamma::Algebra::SigmaYZ,
Gamma::Algebra::SigmaYT,
Gamma::Algebra::SigmaZT
});
///////////////////////////////////////////////
// Square assumption for now Nl = Nr = N
///////////////////////////////////////////////
int nt = env().getDim(Tp);
int nx = env().getDim(Xp);
int ny = env().getDim(Yp);
int nz = env().getDim(Zp);
int N = par().N;
int Nl = par().Nl;
int ngamma = gammas.size();
int schurBlock = par().schurBlock;
int cacheBlock = par().cacheBlock;
int nmom = par().Nmom;
///////////////////////////////////////////////
// Momentum setup
///////////////////////////////////////////////
GridBase *grid = env().getGrid(1);
std::vector<LatticeComplex> phases(nmom,grid);
for(int m=0;m<nmom;m++){
phases[m] = Complex(1.0); // All zero momentum for now
}
Eigen::Tensor<ComplexD,5> mesonField (nmom,ngamma,nt,N,N);
LOG(Message) << "N = Nh+Nl for A2A MesonField is " << N << std::endl;
envGetTmp(std::vector<FermionField>, w);
envGetTmp(std::vector<FermionField>, v);
envGetTmp(FermionField, tmp_5d);
LOG(Message) << "Finding v and w vectors for N = " << N << std::endl;
//////////////////////////////////////////////////////////////////////////
// i,j is first loop over SchurBlock factors reusing 5D matrices
// ii,jj is second loop over cacheBlock factors for high perf contractoin
// iii,jjj are loops within cacheBlock
// Total index is sum of these i+ii+iii etc...
//////////////////////////////////////////////////////////////////////////
double flops = 0.0;
double bytes = 0.0;
double vol = nx*ny*nz*nt;
double t_schur=0;
double t_contr=0;
double t_int_0=0;
double t_int_1=0;
double t_int_2=0;
double t_int_3=0;
double t0 = usecond();
int N_i = N;
int N_j = N;
for(int i=0;i<N_i;i+=schurBlock){ //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock){
///////////////////////////////////////////////////////////////
// Get the W and V vectors for this schurBlock^2 set of terms
///////////////////////////////////////////////////////////////
int N_ii = MIN(N_i-i,schurBlock);
int N_jj = MIN(N_j-j,schurBlock);
t_schur-=usecond();
for(int ii =0;ii < N_ii;ii++) a2a.return_w(i+ii, tmp_5d, w[ii]);
for(int jj =0;jj < N_jj;jj++) a2a.return_v(j+jj, tmp_5d, v[jj]);
t_schur+=usecond();
LOG(Message) << "Found w vectors " << i <<" .. " << i+N_ii-1 << std::endl;
LOG(Message) << "Found v vectors " << j <<" .. " << j+N_jj-1 << std::endl;
///////////////////////////////////////////////////////////////
// Series of cache blocked chunks of the contractions within this SchurBlock
///////////////////////////////////////////////////////////////
for(int ii=0;ii<N_ii;ii+=cacheBlock){
for(int jj=0;jj<N_jj;jj+=cacheBlock){
int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock);
Eigen::Tensor<ComplexD,5> mesonFieldBlocked(nmom,ngamma,nt,N_iii,N_jjj);
t_contr-=usecond();
A2Autils<FImpl>::MesonField(mesonFieldBlocked,
&w[ii],
&v[jj], gammas, phases,Tp);
t_contr+=usecond();
flops += vol * ( 2 * 8.0 + 6.0 + 8.0*nmom) * N_iii*N_jjj*ngamma;
bytes += vol * (12.0 * sizeof(Complex) ) * N_iii*N_jjj
+ vol * ( 2.0 * sizeof(Complex) *nmom ) * N_iii*N_jjj* ngamma;
///////////////////////////////////////////////////////////////
// Copy back to full meson field tensor
///////////////////////////////////////////////////////////////
parallel_for_nest2(int iii=0;iii< N_iii;iii++) {
for(int jjj=0;jjj< N_jjj;jjj++) {
for(int m =0;m< nmom;m++) {
for(int g =0;g< ngamma;g++) {
for(int t =0;t< nt;t++) {
mesonField(m,g,t,i+ii+iii,j+jj+jjj) = mesonFieldBlocked(m,g,t,iii,jjj);
}}}
}}
}}
}}
double nodes=grid->NodeCount();
double t1 = usecond();
LOG(Message) << " Contraction of MesonFields took "<<(t1-t0)/1.0e6<< " seconds " << std::endl;
LOG(Message) << " Schur "<<(t_schur)/1.0e6<< " seconds " << std::endl;
LOG(Message) << " Contr "<<(t_contr)/1.0e6<< " seconds " << std::endl;
/////////////////////////////////////////////////////////////////////////
// Test: Build the pion correlator (two end)
// < PI_ij(t0) PI_ji (t0+t) >
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr(nt,ComplexD(0.0));
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
int m=0; // first momentum
int g=0; // first gamma in above ordering is gamma5 for pion
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
int tt = (t0+t)%nt;
corr[t] += mesonField(m,g,t0,i,j)* mesonField(m,g,tt,j,i);
}}
}}
for(int t=0;t<nt;t++) corr[t] = corr[t]/ (double)nt;
for(int t=0;t<nt;t++) LOG(Message) << " " << t << " " << corr[t]<<std::endl;
// saveResult(par().output, "meson", result);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_A2AMesonField_hpp_

View File

@ -0,0 +1,8 @@
#include <Grid/Hadrons/Modules/MContraction/A2APionField.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TA2APionField<FIMPL>;
template class Grid::Hadrons::MContraction::TA2APionField<ZFIMPL>;

View File

@ -0,0 +1,502 @@
#ifndef Hadrons_MContraction_A2APionField_hpp_
#define Hadrons_MContraction_A2APionField_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/AllToAllVectors.hpp>
#include <Grid/Hadrons/Modules/MContraction/A2Autils.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* A2APionField *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
typedef std::pair<Gamma::Algebra, Gamma::Algebra> GammaPair;
class A2APionFieldPar : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2APionFieldPar,
int, cacheBlock,
int, schurBlock,
int, Nmom,
std::string, A2A_i,
std::string, A2A_j,
std::string, output);
};
template <typename FImpl>
class TA2APionField : public Module<A2APionFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl, );
SOLVER_TYPE_ALIASES(FImpl, );
typedef typename FImpl::SiteSpinor vobj;
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef iSpinMatrix<vector_type> SpinMatrix_v;
typedef iSpinMatrix<scalar_type> SpinMatrix_s;
typedef iSinglet<vector_type> Scalar_v;
typedef iSinglet<scalar_type> Scalar_s;
typedef A2AModesSchurDiagTwo<typename FImpl::FermionField, FMat, Solver> A2ABase;
public:
// constructor
TA2APionField(const std::string name);
// destructor
virtual ~TA2APionField(void){};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER(A2APionField, ARG(TA2APionField<FIMPL>), MContraction);
MODULE_REGISTER(ZA2APionField, ARG(TA2APionField<ZFIMPL>), MContraction);
/******************************************************************************
* TA2APionField implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TA2APionField<FImpl>::TA2APionField(const std::string name)
: Module<A2APionFieldPar>(name)
{
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TA2APionField<FImpl>::getInput(void)
{
std::vector<std::string> in;
in.push_back(par().A2A_i + "_class");
in.push_back(par().A2A_i + "_w_high_4d");
in.push_back(par().A2A_i + "_v_high_4d");
in.push_back(par().A2A_j + "_class");
in.push_back(par().A2A_j + "_w_high_4d");
in.push_back(par().A2A_j + "_v_high_4d");
return in;
}
template <typename FImpl>
std::vector<std::string> TA2APionField<FImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2APionField<FImpl>::setup(void)
{
// Four D fields
envTmp(std::vector<FermionField>, "wi", 1, par().schurBlock, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "vi", 1, par().schurBlock, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "wj", 1, par().schurBlock, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "vj", 1, par().schurBlock, FermionField(env().getGrid(1)));
// 5D tmp
int Ls_i = env().getObjectLs(par().A2A_i + "_class");
envTmpLat(FermionField, "tmp_5d", Ls_i);
int Ls_j= env().getObjectLs(par().A2A_j + "_class");
assert ( Ls_i == Ls_j );
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2APionField<FImpl>::execute(void)
{
LOG(Message) << "Computing A2A Pion fields" << std::endl;
auto &a2a_i = envGet(A2ABase, par().A2A_i + "_class");
auto &a2a_j = envGet(A2ABase, par().A2A_j + "_class");
///////////////////////////////////////////////
// Square assumption for now Nl = Nr = N
///////////////////////////////////////////////
int nt = env().getDim(Tp);
int nx = env().getDim(Xp);
int ny = env().getDim(Yp);
int nz = env().getDim(Zp);
// int N_i = a2a_i.par().N;
// int N_j = a2a_j.par().N;
int N_i = a2a_i.getN();
int N_j = a2a_j.getN();
int nmom=par().Nmom;
int schurBlock = par().schurBlock;
int cacheBlock = par().cacheBlock;
///////////////////////////////////////////////
// Momentum setup
///////////////////////////////////////////////
GridBase *grid = env().getGrid(1);
std::vector<LatticeComplex> phases(nmom,grid);
for(int m=0;m<nmom;m++){
phases[m] = Complex(1.0); // All zero momentum for now
}
///////////////////////////////////////////////////////////////////////
// i and j represent different flavours, hits, with different ranks.
// in general non-square case.
///////////////////////////////////////////////////////////////////////
Eigen::Tensor<ComplexD,4> pionFieldWVmom_ij (nmom,nt,N_i,N_j);
Eigen::Tensor<ComplexD,3> pionFieldWV_ij (nt,N_i,N_j);
Eigen::Tensor<ComplexD,4> pionFieldWVmom_ji (nmom,nt,N_j,N_i);
Eigen::Tensor<ComplexD,3> pionFieldWV_ji (nt,N_j,N_i);
LOG(Message) << "Rank for A2A PionField is " << N_i << " x "<<N_j << std::endl;
envGetTmp(std::vector<FermionField>, wi);
envGetTmp(std::vector<FermionField>, vi);
envGetTmp(std::vector<FermionField>, wj);
envGetTmp(std::vector<FermionField>, vj);
envGetTmp(FermionField, tmp_5d);
LOG(Message) << "Finding v and w vectors " << std::endl;
//////////////////////////////////////////////////////////////////////////
// i,j is first loop over SchurBlock factors reusing 5D matrices
// ii,jj is second loop over cacheBlock factors for high perf contractoin
// iii,jjj are loops within cacheBlock
// Total index is sum of these i+ii+iii etc...
//////////////////////////////////////////////////////////////////////////
double flops = 0.0;
double bytes = 0.0;
double vol = nx*ny*nz*nt;
double vol3 = nx*ny*nz;
double t_schur=0;
double t_contr_vwm=0;
double t_contr_vw=0;
double t_contr_ww=0;
double t_contr_vv=0;
double tt0 = usecond();
for(int i=0;i<N_i;i+=schurBlock){ //loop over SchurBlocking to suppress 5D matrix overhead
for(int j=0;j<N_j;j+=schurBlock){
///////////////////////////////////////////////////////////////
// Get the W and V vectors for this schurBlock^2 set of terms
///////////////////////////////////////////////////////////////
int N_ii = MIN(N_i-i,schurBlock);
int N_jj = MIN(N_j-j,schurBlock);
t_schur-=usecond();
for(int ii =0;ii < N_ii;ii++) a2a_i.return_w(i+ii, tmp_5d, wi[ii]);
for(int jj =0;jj < N_jj;jj++) a2a_j.return_w(j+jj, tmp_5d, wj[jj]);
for(int ii =0;ii < N_ii;ii++) a2a_i.return_v(i+ii, tmp_5d, vi[ii]);
for(int jj =0;jj < N_jj;jj++) a2a_j.return_v(j+jj, tmp_5d, vj[jj]);
t_schur+=usecond();
LOG(Message) << "Found i w&v vectors " << i <<" .. " << i+N_ii-1 << std::endl;
LOG(Message) << "Found j w&v vectors " << j <<" .. " << j+N_jj-1 << std::endl;
///////////////////////////////////////////////////////////////
// Series of cache blocked chunks of the contractions within this SchurBlock
///////////////////////////////////////////////////////////////
for(int ii=0;ii<N_ii;ii+=cacheBlock){
for(int jj=0;jj<N_jj;jj+=cacheBlock){
int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock);
Eigen::Tensor<ComplexD,4> pionFieldWVmomB_ij(nmom,nt,N_iii,N_jjj);
Eigen::Tensor<ComplexD,4> pionFieldWVmomB_ji(nmom,nt,N_jjj,N_iii);
Eigen::Tensor<ComplexD,3> pionFieldWVB_ij(nt,N_iii,N_jjj);
Eigen::Tensor<ComplexD,3> pionFieldWVB_ji(nt,N_jjj,N_iii);
t_contr_vwm-=usecond();
A2Autils<FImpl>::PionFieldWVmom(pionFieldWVmomB_ij, &wi[ii], &vj[jj], phases,Tp);
A2Autils<FImpl>::PionFieldWVmom(pionFieldWVmomB_ji, &wj[jj], &vi[ii], phases,Tp);
t_contr_vwm+=usecond();
t_contr_vw-=usecond();
A2Autils<FImpl>::PionFieldWV(pionFieldWVB_ij, &wi[ii], &vj[jj],Tp);
A2Autils<FImpl>::PionFieldWV(pionFieldWVB_ji, &wj[jj], &vi[ii],Tp);
t_contr_vw+=usecond();
flops += vol * ( 2 * 8.0 + 6.0 + 8.0*nmom) * N_iii*N_jjj;
bytes += vol * (12.0 * sizeof(Complex) ) * N_iii*N_jjj
+ vol * ( 2.0 * sizeof(Complex) *nmom ) * N_iii*N_jjj;
///////////////////////////////////////////////////////////////
// Copy back to full meson field tensor
///////////////////////////////////////////////////////////////
parallel_for_nest2(int iii=0;iii< N_iii;iii++) {
for(int jjj=0;jjj< N_jjj;jjj++) {
for(int m =0;m< nmom;m++) {
for(int t =0;t< nt;t++) {
pionFieldWVmom_ij(m,t,i+ii+iii,j+jj+jjj) = pionFieldWVmomB_ij(m,t,iii,jjj);
pionFieldWVmom_ji(m,t,j+jj+jjj,i+ii+iii) = pionFieldWVmomB_ji(m,t,jjj,iii);
}}
for(int t =0;t< nt;t++) {
pionFieldWV_ij(t,i+ii+iii,j+jj+jjj) = pionFieldWVB_ij(t,iii,jjj);
pionFieldWV_ji(t,j+jj+jjj,i+ii+iii) = pionFieldWVB_ji(t,jjj,iii);
}
}}
}}
}}
double nodes=grid->NodeCount();
double tt1 = usecond();
LOG(Message) << " Contraction of PionFields took "<<(tt1-tt0)/1.0e6<< " seconds " << std::endl;
LOG(Message) << " Schur "<<(t_schur)/1.0e6<< " seconds " << std::endl;
LOG(Message) << " Contr WVmom "<<(t_contr_vwm)/1.0e6<< " seconds " << std::endl;
LOG(Message) << " Contr WV "<<(t_contr_vw)/1.0e6<< " seconds " << std::endl;
double t_kernel = t_contr_vwm;
LOG(Message) << " Arith "<<flops/(t_kernel)/1.0e3/nodes<< " Gflop/s / node " << std::endl;
LOG(Message) << " Arith "<<bytes/(t_kernel)/1.0e3/nodes<< " GB/s /node " << std::endl;
/////////////////////////////////////////////////////////////////////////
// Test: Build the pion correlator (two end)
// < PI_ij(t0) PI_ji (t0+t) >
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corrMom(nt,ComplexD(0.0));
for(int i=0;i<N_i;i++){
for(int j=0;j<N_j;j++){
int m=0; // first momentum
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
int tt = (t0+t)%nt;
corrMom[t] += pionFieldWVmom_ij(m,t0,i,j)* pionFieldWVmom_ji(m,tt,j,i);
}}
}}
for(int t=0;t<nt;t++) corrMom[t] = corrMom[t]/ (double)nt;
for(int t=0;t<nt;t++) LOG(Message) << " C_vwm " << t << " " << corrMom[t]<<std::endl;
/////////////////////////////////////////////////////////////////////////
// Test: Build the pion correlator (two end) from zero mom contraction
// < PI_ij(t0) PI_ji (t0+t) >
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr(nt,ComplexD(0.0));
for(int i=0;i<N_i;i++){
for(int j=0;j<N_j;j++){
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
int tt = (t0+t)%nt;
corr[t] += pionFieldWV_ij(t0,i,j)* pionFieldWV_ji(tt,j,i);
}}
}}
for(int t=0;t<nt;t++) corr[t] = corr[t]/ (double)nt;
for(int t=0;t<nt;t++) LOG(Message) << " C_vw " << t << " " << corr[t]<<std::endl;
/////////////////////////////////////////////////////////////////////////
// Test: Build the pion correlator from zero mom contraction with revers
// charge flow
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr_wwvv(nt,ComplexD(0.0));
wi.resize(N_i,grid);
vi.resize(N_i,grid);
wj.resize(N_j,grid);
vj.resize(N_j,grid);
for(int i =0;i < N_i;i++) a2a_i.return_v(i, tmp_5d, vi[i]);
for(int i =0;i < N_i;i++) a2a_i.return_w(i, tmp_5d, wi[i]);
for(int j =0;j < N_j;j++) a2a_j.return_v(j, tmp_5d, vj[j]);
for(int j =0;j < N_j;j++) a2a_j.return_w(j, tmp_5d, wj[j]);
Eigen::Tensor<ComplexD,3> pionFieldWW_ij (nt,N_i,N_j);
Eigen::Tensor<ComplexD,3> pionFieldVV_ji (nt,N_j,N_i);
Eigen::Tensor<ComplexD,3> pionFieldWW_ji (nt,N_j,N_i);
Eigen::Tensor<ComplexD,3> pionFieldVV_ij (nt,N_i,N_j);
A2Autils<FImpl>::PionFieldWW(pionFieldWW_ij, &wi[0], &wj[0],Tp);
A2Autils<FImpl>::PionFieldVV(pionFieldVV_ji, &vj[0], &vi[0],Tp);
A2Autils<FImpl>::PionFieldWW(pionFieldWW_ji, &wj[0], &wi[0],Tp);
A2Autils<FImpl>::PionFieldVV(pionFieldVV_ij, &vi[0], &vj[0],Tp);
for(int i=0;i<N_i;i++){
for(int j=0;j<N_j;j++){
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
int tt = (t0+t)%nt;
corr_wwvv[t] += pionFieldWW_ij(t0,i,j)* pionFieldVV_ji(tt,j,i);
corr_wwvv[t] += pionFieldWW_ji(t0,j,i)* pionFieldVV_ij(tt,i,j);
}}
}}
for(int t=0;t<nt;t++) corr_wwvv[t] = corr_wwvv[t] / vol /2.0 ; // (ij+ji noise contribs if i!=j ).
for(int t=0;t<nt;t++) LOG(Message) << " C_wwvv " << t << " " << corr_wwvv[t]<<std::endl;
/////////////////////////////////////////////////////////////////////////
// This is only correct if there are NO low modes
// Use the "ii" case to construct possible Z wall source one end trick
/////////////////////////////////////////////////////////////////////////
std::vector<ComplexD> corr_z2(nt,ComplexD(0.0));
Eigen::Tensor<ComplexD,3> pionFieldWW (nt,N_i,N_i);
Eigen::Tensor<ComplexD,3> pionFieldVV (nt,N_i,N_i);
A2Autils<FImpl>::PionFieldWW(pionFieldWW, &wi[0], &wi[0],Tp);
A2Autils<FImpl>::PionFieldVV(pionFieldVV, &vi[0], &vi[0],Tp);
for(int i=0;i<N_i;i++){
for(int t0=0;t0<nt;t0++){
for(int t=0;t<nt;t++){
int tt = (t0+t)%nt;
corr_z2[t] += pionFieldWW(t0,i,i) * pionFieldVV(tt,i,i) /vol ;
}}
}
LOG(Message) << " C_z2 WARNING only correct if Nl == 0 "<<std::endl;
for(int t=0;t<nt;t++) LOG(Message) << " C_z2 " << t << " " << corr_z2[t]<<std::endl;
/////////////////////////////////////////////////////////////////////////
// Test: Build a bag contraction
/////////////////////////////////////////////////////////////////////////
Eigen::Tensor<ComplexD,2> DeltaF2_fig8 (nt,16);
Eigen::Tensor<ComplexD,2> DeltaF2_trtr (nt,16);
Eigen::Tensor<ComplexD,1> denom0 (nt);
Eigen::Tensor<ComplexD,1> denom1 (nt);
const int dT=16;
A2Autils<FImpl>::DeltaFeq2 (dT,dT,DeltaF2_fig8,DeltaF2_trtr,
denom0,denom1,
pionFieldWW_ij,&vi[0],&vj[0],Tp);
{
int g=0; // O_{VV+AA}
for(int t=0;t<nt;t++)
LOG(Message) << " Bag [" << t << ","<<g<<"] "
<< (DeltaF2_fig8(t,g)+DeltaF2_trtr(t,g))
/ ( 8.0/3.0 * denom0[t]*denom1[t])
<<std::endl;
}
/////////////////////////////////////////////////////////////////////////
// Test: Build a bag contraction the Z2 way
// Build a wall bag comparison assuming no low modes
/////////////////////////////////////////////////////////////////////////
LOG(Message) << " Bag_z2 WARNING only correct if Nl == 0 "<<std::endl;
int t0=0;
int t1=dT;
int Nl=0;
LatticePropagator Qd0(grid);
LatticePropagator Qd1(grid);
LatticePropagator Qs0(grid);
LatticePropagator Qs1(grid);
for(int s=0;s<4;s++){
for(int c=0;c<3;c++){
int idx0 = Nl+t0*12+s*3+c;
int idx1 = Nl+t1*12+s*3+c;
FermToProp<FImpl>(Qd0, vi[idx0], s, c);
FermToProp<FImpl>(Qd1, vi[idx1], s, c);
FermToProp<FImpl>(Qs0, vj[idx0], s, c);
FermToProp<FImpl>(Qs1, vj[idx1], s, c);
}
}
std::vector<Gamma::Algebra> gammas ( {
Gamma::Algebra::GammaX,
Gamma::Algebra::GammaY,
Gamma::Algebra::GammaZ,
Gamma::Algebra::GammaT,
Gamma::Algebra::GammaXGamma5,
Gamma::Algebra::GammaYGamma5,
Gamma::Algebra::GammaZGamma5,
Gamma::Algebra::GammaTGamma5,
Gamma::Algebra::Identity,
Gamma::Algebra::Gamma5,
Gamma::Algebra::SigmaXY,
Gamma::Algebra::SigmaXZ,
Gamma::Algebra::SigmaXT,
Gamma::Algebra::SigmaYZ,
Gamma::Algebra::SigmaYT,
Gamma::Algebra::SigmaZT
});
auto G5 = Gamma::Algebra::Gamma5;
LatticePropagator anti_d0 = adj( Gamma(G5) * Qd0 * Gamma(G5));
LatticePropagator anti_d1 = adj( Gamma(G5) * Qd1 * Gamma(G5));
LatticeComplex TR1(grid);
LatticeComplex TR2(grid);
LatticeComplex Wick1(grid);
LatticeComplex Wick2(grid);
LatticePropagator PR1(grid);
LatticePropagator PR2(grid);
PR1 = Qs0 * Gamma(G5) * anti_d0;
PR2 = Qs1 * Gamma(G5) * anti_d1;
for(int g=0;g<Nd*Nd;g++){
auto g1 = gammas[g];
Gamma G1 (g1);
TR1 = trace( PR1 * G1 );
TR2 = trace( PR2 * G1 );
Wick1 = TR1*TR2;
Wick2 = trace( PR1* G1 * PR2 * G1 );
std::vector<TComplex> C1;
std::vector<TComplex> C2;
std::vector<TComplex> C3;
sliceSum(Wick1,C1, Tp);
sliceSum(Wick2,C2, Tp);
sliceSum(TR1 ,C3, Tp);
/*
if(g<5){
for(int t=0;t<C1.size();t++){
LOG(Message) << " Wick1["<<g<<","<<t<< "] "<< C1[t]<<std::endl;
}
for(int t=0;t<C2.size();t++){
LOG(Message) << " Wick2["<<g<<","<<t<< "] "<< C2[t]<<std::endl;
}
}
if( (g==9) || (g==7) ){ // P and At in above ordering
for(int t=0;t<C3.size();t++){
LOG(Message) << " <G|P>["<<g<<","<<t<< "] "<< C3[t]<<std::endl;
}
}
*/
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_A2APionField_hpp_

View File

@ -1,11 +1,8 @@
#pragma once
//#include <Grid/Hadrons/Global.hpp>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
#include <Grid/Hadrons/Global.hpp>
#include <unsupported/Eigen/CXX11/Tensor>
namespace Grid {
namespace QCD {
#undef DELTA_F_EQ_2
BEGIN_HADRONS_NAMESPACE
template <typename FImpl>
class A2Autils
@ -64,32 +61,15 @@ public:
const FermionField *vs,
const FermionField *vd);
static void ContractFourQuarkColourDiagonal(const PropagatorField &WWVV0,
const PropagatorField &WWVV1,
const std::vector<Gamma> &gamma0,
const std::vector<Gamma> &gamma1,
ComplexField &O_trtr,
ComplexField &O_fig8);
static void ContractFourQuarkColourMix(const PropagatorField &WWVV0,
const PropagatorField &WWVV1,
const std::vector<Gamma> &gamma0,
const std::vector<Gamma> &gamma1,
ComplexField &O_trtr,
ComplexField &O_fig8);
#ifdef DELTA_F_EQ_2
static void DeltaFeq2(int dt_min,int dt_max,
Eigen::Tensor<ComplexD,2> &dF2_fig8,
Eigen::Tensor<ComplexD,2> &dF2_trtr,
Eigen::Tensor<ComplexD,2> &dF2_fig8_mix,
Eigen::Tensor<ComplexD,2> &dF2_trtr_mix,
Eigen::Tensor<ComplexD,1> &denom_A0,
Eigen::Tensor<ComplexD,1> &denom_P,
Eigen::Tensor<ComplexD,1> &den0,
Eigen::Tensor<ComplexD,1> &den1,
Eigen::Tensor<ComplexD,3> &WW_sd,
const FermionField *vs,
const FermionField *vd,
int orthogdim);
#endif
};
template<class FImpl>
@ -529,6 +509,8 @@ void A2Autils<FImpl>::PionFieldWVmom(Eigen::Tensor<ComplexD,4> &mat,
iScalar<vector_type> temp;
std::vector<iScalar<scalar_type> > extracted(Nsimd);
// std::vector<scalar_type> extracted(Nsimd);
for(int i=0;i<Lblock;i++){
for(int j=0;j<Rblock;j++){
for(int m=0;m<Nmom;m++){
@ -554,7 +536,7 @@ void A2Autils<FImpl>::PionFieldWVmom(Eigen::Tensor<ComplexD,4> &mat,
assert(mat.dimension(0) == Nmom);
assert(mat.dimension(1) == Nt);
// ld loop and local only??
int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++)
@ -621,7 +603,7 @@ void A2Autils<FImpl>::PionFieldVV(Eigen::Tensor<ComplexD,3> &mat,
// -- Pupil or fig8 type topology (depending on flavour structure) done below
// -- Have Bag style -- WWVV VVWW
// _ _
// / \/ \
// / \/ \
// \_/\_/
//
// - Kpipi style (two pion insertions)
@ -691,7 +673,7 @@ void A2Autils<FImpl>::PionFieldVV(Eigen::Tensor<ComplexD,3> &mat,
// Type 4
// *********
// ___ pi
// K / \/\ |\
// K / \/\ |\
// \___/\/ |/
// pi
//
@ -822,155 +804,18 @@ void A2Autils<FImpl>::ContractWWVV(std::vector<PropagatorField> &WWVV,
}
template<class FImpl>
void A2Autils<FImpl>::ContractFourQuarkColourDiagonal(const PropagatorField &WWVV0,
const PropagatorField &WWVV1,
const std::vector<Gamma> &gamma0,
const std::vector<Gamma> &gamma1,
ComplexField &O_trtr,
ComplexField &O_fig8)
{
assert(gamma0.size()==gamma1.size());
int Ng = gamma0.size();
GridBase *grid = WWVV0._grid;
parallel_for(int ss=0;ss<grid->oSites();ss++){
typedef typename ComplexField::vector_object vobj;
vobj v_trtr;
vobj v_fig8;
auto VV0 = WWVV0._odata[ss];
auto VV1 = WWVV1._odata[ss];
for(int g=0;g<Ng;g++){
v_trtr = trace(VV0 * gamma0[g])* trace(VV1*gamma1[g]);
v_fig8 = trace(VV0 * gamma0[g] * VV1 * gamma1[g]);
if ( g==0 ) {
O_trtr._odata[ss] = v_trtr;
O_fig8._odata[ss] = v_fig8;
} else {
O_trtr._odata[ss]+= v_trtr;
O_fig8._odata[ss]+= v_fig8;
}
}
}
}
template<class FImpl>
void A2Autils<FImpl>::ContractFourQuarkColourMix(const PropagatorField &WWVV0,
const PropagatorField &WWVV1,
const std::vector<Gamma> &gamma0,
const std::vector<Gamma> &gamma1,
ComplexField &O_trtr,
ComplexField &O_fig8)
{
assert(gamma0.size()==gamma1.size());
int Ng = gamma0.size();
GridBase *grid = WWVV0._grid;
parallel_for(int ss=0;ss<grid->oSites();ss++){
typedef typename ComplexField::vector_object vobj;
auto VV0 = WWVV0._odata[ss];
auto VV1 = WWVV1._odata[ss];
for(int g=0;g<Ng;g++){
auto VV0G = VV0 * gamma0[g]; // Spin multiply
auto VV1G = VV1 * gamma1[g];
vobj v_trtr=zero;
vobj v_fig8=zero;
/////////////////////////////////////////
// Colour mixed
/////////////////////////////////////////
// _ _
// s_sa G_st d_tb s_s'b G_s't' d_t'a
//
//
// Contracted with prop factor (VV0)_sd,ab (VV1)_s'd',ba
//
// Wick1 [ spin TR TR ]
//
// (VV0*G0)_ss,ba . (VV1*G1)_tt,ab
//
// Wick2 [ spin fig8 ]
//
// (VV0*G0)_st,aa (VV1*G1)_ts,bb
//
/////////////////////////////////////////
for(int a=0;a<Nc;a++){
for(int b=0;b<Nc;b++){
for(int s=0;s<Ns;s++){
for(int t=0;t<Ns;t++){
// Mixed traces
v_trtr()()() += VV0G()(s,s)(a,b)*VV1G()(t,t)(b,a); // Was the fig8 before Fierzing
v_fig8()()() += VV0G()(s,t)(a,a)*VV1G()(t,s)(b,b); // Was the trtr before Fierzing
/*
* CHECKS -- use Fierz identities as a strong test, 4 Oct 2018.
*
BagMix [8,0] fig8 (21.5596,-3.83908e-17) trtr (0.064326,2.51001e-17) // Fierz -1 0 0 0 0
BagMix [8,1] fig8 (-1346.99,1.2481e-16) trtr (34.2501,-3.36935e-17) // 0 0 2 0 0
BagMix [8,2] fig8 (13.7536,-6.04625e-19) trtr (-215.542,3.24326e-17) // 0 1/2 0 0 0
BagMix [8,3] fig8 (555.878,-7.39942e-17) trtr (463.82,-4.73909e-17) // 0 0 0 1/2 -1/2
BagMix [8,4] fig8 (-1602.48,9.08511e-17) trtr (-936.302,1.14156e-16) // 0 0 0 -3/2 -1/2
Bag [8,0] fig8 (-0.064326,1.06281e-17) trtr (-21.5596,1.06051e-17)
Bag [8,2] fig8 (17.125,-3.40959e-17) trtr (-673.493,7.68134e-17)
Bag [8,1] fig8 (-431.084,2.76423e-17) trtr (27.5073,-5.76967e-18) /////////// TR TR FIG8
Bag [8,3] fig8 (700.061,-1.14925e-16) trtr (1079.18,-1.35476e-16) // 555.878 = 0.5(1079.18+32.5776) ; 463.82 =0.5(700.061+227.58)
Bag [8,4] fig8 (-227.58,3.58808e-17) trtr (-32.5776,1.83286e-17) // - 1602.48 = - 1.5*1079.18 + .5* 32.5776; 936.302=-1.5* 700+0.5*227
*/
//Unmixed debug check consistency
// v_trtr()()() += VV0G()(s,s)(a,a)*VV1G()(t,t)(b,b);
// v_fig8()()() += VV0G()(s,t)(a,b)*VV1G()(t,s)(b,a);
}}}}
if ( g==0 ) {
O_trtr._odata[ss] = v_trtr;
O_fig8._odata[ss] = v_fig8;
} else {
O_trtr._odata[ss]+= v_trtr;
O_fig8._odata[ss]+= v_fig8;
}
}
}
}
#ifdef DELTA_F_EQ_2
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Perhaps this should move out of the utils and into Hadrons module
// Now makes use of the primitives above and doesn't touch inside
// the lattice structures.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<class FImpl>
void A2Autils<FImpl>::DeltaFeq2(int dt_min,int dt_max,
Eigen::Tensor<ComplexD,2> &dF2_fig8,
Eigen::Tensor<ComplexD,2> &dF2_trtr,
Eigen::Tensor<ComplexD,2> &dF2_fig8_mix,
Eigen::Tensor<ComplexD,2> &dF2_trtr_mix,
Eigen::Tensor<ComplexD,1> &denom_A0,
Eigen::Tensor<ComplexD,1> &denom_P,
Eigen::Tensor<ComplexD,3> &WW_sd,
const FermionField *vs,
const FermionField *vd,
int orthogdim)
Eigen::Tensor<ComplexD,2> &dF2_fig8,
Eigen::Tensor<ComplexD,2> &dF2_trtr,
Eigen::Tensor<ComplexD,1> &den0,
Eigen::Tensor<ComplexD,1> &den1,
Eigen::Tensor<ComplexD,3> &WW_sd,
const FermionField *vs,
const FermionField *vd,
int orthogdim)
{
GridBase *grid = vs[0]._grid;
LOG(Message) << "Computing A2A DeltaF=2 graph" << std::endl;
auto G5 = Gamma(Gamma::Algebra::Gamma5);
@ -1005,20 +850,15 @@ void A2Autils<FImpl>::DeltaFeq2(int dt_min,int dt_max,
////////////////////////////////////////////////////////
// Contraction
////////////////////////////////////////////////////////
int Ng=5;
dF2_trtr.resize(N_t,Ng);
dF2_fig8.resize(N_t,Ng);
dF2_trtr_mix.resize(N_t,Ng);
dF2_fig8_mix.resize(N_t,Ng);
denom_A0.resize(N_t);
denom_P.resize(N_t);
dF2_trtr.resize(N_t,5);
dF2_fig8.resize(N_t,5);
den0.resize(N_t);
den1.resize(N_t);
for(int t=0;t<N_t;t++){
for(int g=0;g<Ng;g++) dF2_trtr(t,g)= ComplexD(0.0);
for(int g=0;g<Ng;g++) dF2_fig8(t,g)= ComplexD(0.0);
for(int g=0;g<Ng;g++) dF2_trtr_mix(t,g)= ComplexD(0.0);
for(int g=0;g<Ng;g++) dF2_fig8_mix(t,g)= ComplexD(0.0);
denom_A0(t) =ComplexD(0.0);
denom_P(t) =ComplexD(0.0);
for(int g=0;g<dF2_trtr.dimension(1);g++) dF2_trtr(t,g)= ComplexD(0.0);
for(int g=0;g<dF2_fig8.dimension(1);g++) dF2_fig8(t,g)= ComplexD(0.0);
den0(t) =ComplexD(0.0);
den1(t) =ComplexD(0.0);
}
ComplexField D0(grid); D0 = zero; // <P|A0> correlator from each wall
@ -1036,18 +876,6 @@ void A2Autils<FImpl>::DeltaFeq2(int dt_min,int dt_max,
ComplexField O4_fig8(grid); O4_fig8 = zero;
ComplexField O5_fig8(grid); O5_fig8 = zero;
ComplexField VV_trtr(grid); VV_trtr = zero;
ComplexField AA_trtr(grid); AA_trtr = zero;
ComplexField SS_trtr(grid); SS_trtr = zero;
ComplexField PP_trtr(grid); PP_trtr = zero;
ComplexField TT_trtr(grid); TT_trtr = zero;
ComplexField VV_fig8(grid); VV_fig8 = zero;
ComplexField AA_fig8(grid); AA_fig8 = zero;
ComplexField SS_fig8(grid); SS_fig8 = zero;
ComplexField PP_fig8(grid); PP_fig8 = zero;
ComplexField TT_fig8(grid); TT_fig8 = zero;
//////////////////////////////////////////////////
// Used to store appropriate correlation funcs
//////////////////////////////////////////////////
@ -1085,89 +913,127 @@ void A2Autils<FImpl>::DeltaFeq2(int dt_min,int dt_max,
auto T5 = Gamma(Gamma::Algebra::SigmaZT);
std::cout <<GridLogMessage << " dt " <<dt_min<<"..." <<dt_max<<std::endl;
for(int t0=0;t0<N_t;t0++){
std::cout <<GridLogMessage << " t0 " <<t0<<std::endl;
// for(int dt=dt_min;dt<dt_max;dt++){
{
int dt = dt_min;
int t1 = (t0+dt)%N_t;
std::cout <<GridLogMessage << " t1 " <<t1<<std::endl;
std::vector<Gamma> VV({VX,VY,VZ,VT});
std::vector<Gamma> AA({AX,AY,AZ,AT});
std::vector<Gamma> SS({S});
std::vector<Gamma> PP({P});
std::vector<Gamma> TT({T0,T1,T2,T3,T4,T5});
std::vector<Gamma> A0({AT});
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],VV,VV,VV_trtr,VV_fig8); // VV
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],AA,AA,AA_trtr,AA_fig8); // AA
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],SS,SS,SS_trtr,SS_fig8); // SS
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],PP,PP,PP_trtr,PP_fig8); // PP
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],TT,TT,TT_trtr,TT_fig8); // TT
parallel_for(int ss=0;ss<grid->oSites();ss++){
O1_trtr = VV_trtr+AA_trtr; O2_trtr = VV_trtr-AA_trtr; // VV+AA,VV-AA
O1_fig8 = VV_fig8+AA_fig8; O2_fig8 = VV_fig8-AA_fig8;
auto VV0= WWVV[t0]._odata[ss];
auto VV1= WWVV[t1]._odata[ss];
O3_trtr = SS_trtr-PP_trtr; O4_trtr = SS_trtr+PP_trtr; // SS+PP,SS-PP
O3_fig8 = SS_fig8-PP_fig8; O4_fig8 = SS_fig8+PP_fig8;
O1_trtr._odata[ss] = trace(VX*VV0) * trace(VX*VV1)
+ trace(VY*VV0) * trace(VY*VV1)
+ trace(VZ*VV0) * trace(VZ*VV1)
+ trace(VT*VV0) * trace(VT*VV1)
+ trace(AX*VV0) * trace(AX*VV1)
+ trace(AY*VV0) * trace(AY*VV1)
+ trace(AZ*VV0) * trace(AZ*VV1)
+ trace(AT*VV0) * trace(AT*VV1);
O5_trtr = TT_trtr;
O5_fig8 = TT_fig8;
O2_trtr._odata[ss] = trace(VX*VV0) * trace(VX*VV1)
+ trace(VY*VV0) * trace(VY*VV1)
+ trace(VZ*VV0) * trace(VZ*VV1)
+ trace(VT*VV0) * trace(VT*VV1)
- trace(AX*VV0) * trace(AX*VV1)
- trace(AY*VV0) * trace(AY*VV1)
- trace(AZ*VV0) * trace(AZ*VV1)
- trace(AT*VV0) * trace(AT*VV1);
sliceSum(O1_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr(t,0)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O2_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr(t,1)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O3_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr(t,2)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O4_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr(t,3)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O5_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr(t,4)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
O3_trtr._odata[ss] = trace(S*VV0) * trace(S*VV1)
+ trace(P*VV0) * trace(P*VV1);
sliceSum(O1_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8(t,0)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O2_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8(t,1)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O3_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8(t,2)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O4_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8(t,3)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O5_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8(t,4)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
O4_trtr._odata[ss] = trace(S*VV0) * trace(S*VV1)
- trace(P*VV0) * trace(P*VV1);
ContractFourQuarkColourDiagonal(WWVV[t0], WWVV[t1],A0,A0,AA_trtr,AA_fig8); // A0 insertion
O5_trtr._odata[ss] = trace(T0*VV0) * trace(T0*VV1)
+ trace(T1*VV0) * trace(T1*VV1)
+ trace(T2*VV0) * trace(T2*VV1)
+ trace(T3*VV0) * trace(T3*VV1)
+ trace(T4*VV0) * trace(T4*VV1)
+ trace(T5*VV0) * trace(T5*VV1);
sliceSum(AA_trtr,C1, orthogdim);
sliceSum(PP_trtr,C2, orthogdim);
////////////////////////////////////
// Fig8 Wick contraction
////////////////////////////////////
O1_fig8._odata[ss] = trace (VV0 * VX * VV1 * VX)
+ trace (VV0 * VY * VV1 * VY)
+ trace (VV0 * VZ * VV1 * VZ)
+ trace (VV0 * VT * VV1 * VT)
+ trace (VV0 * AX * VV1 * AX)
+ trace (VV0 * AY * VV1 * AY)
+ trace (VV0 * AZ * VV1 * AZ)
+ trace (VV0 * AT * VV1 * AT);
O2_fig8._odata[ss] = trace (VV0 * VX * VV1 * VX)
+ trace (VV0 * VY * VV1 * VY)
+ trace (VV0 * VZ * VV1 * VZ)
+ trace (VV0 * VT * VV1 * VT)
- trace (VV0 * AX * VV1 * AX)
- trace (VV0 * AY * VV1 * AY)
- trace (VV0 * AZ * VV1 * AZ)
- trace (VV0 * AT * VV1 * AT);
O3_fig8._odata[ss] = trace (VV0 * S * VV1 * S)
+ trace (VV0 * P * VV1 * P);
O4_fig8._odata[ss] = trace (VV0 * S * VV1 * S)
- trace (VV0 * P * VV1 * P);
O5_fig8._odata[ss] = trace (VV0 * T0 * VV1 * T0)
+ trace (VV0 * T1 * VV1 * T1)
+ trace (VV0 * T2 * VV1 * T2)
+ trace (VV0 * T3 * VV1 * T3)
+ trace (VV0 * T4 * VV1 * T4)
+ trace (VV0 * T5 * VV1 * T5);
D0._odata[ss] = trace(AT*VV0);
D1._odata[ss] = trace(AT*VV1);
for(int t=0;t<N_t;t++){
denom_A0(t)+=C1[(t+t0)%N_t]()()()/vol;
denom_P(t) +=C2[(t+t0)%N_t]()()()/vol;
}
///////////////////////////////////////////////////////////////////////////
// Colour mixed contractions
///////////////////////////////////////////////////////////////////////////
sliceSum(O1_trtr,C1, orthogdim);
sliceSum(O2_trtr,C2, orthogdim);
sliceSum(O3_trtr,C3, orthogdim);
sliceSum(O4_trtr,C4, orthogdim);
sliceSum(O5_trtr,C5, orthogdim);
ContractFourQuarkColourMix(WWVV[t0], WWVV[t1],VV,VV,VV_trtr,VV_fig8); // VV
ContractFourQuarkColourMix(WWVV[t0], WWVV[t1],AA,AA,AA_trtr,AA_fig8); // AA
ContractFourQuarkColourMix(WWVV[t0], WWVV[t1],SS,SS,SS_trtr,SS_fig8); // SS
ContractFourQuarkColourMix(WWVV[t0], WWVV[t1],PP,PP,PP_trtr,PP_fig8); // PP
ContractFourQuarkColourMix(WWVV[t0], WWVV[t1],TT,TT,TT_trtr,TT_fig8); // TT
O1_trtr = VV_trtr+AA_trtr; O2_trtr = VV_trtr-AA_trtr; // VV+AA,VV-AA
O1_fig8 = VV_fig8+AA_fig8; O2_fig8 = VV_fig8-AA_fig8;
for(int t=0;t<N_t;t++){// 2x from Wick contraction reordering
dF2_trtr(t,0)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
dF2_trtr(t,1)+= 2.0*C2[(t+t0)%N_t]()()()/vol;
dF2_trtr(t,2)+= 2.0*C3[(t+t0)%N_t]()()()/vol;
dF2_trtr(t,3)+= 2.0*C4[(t+t0)%N_t]()()()/vol;
dF2_trtr(t,4)+= 2.0*C5[(t+t0)%N_t]()()()/vol;
}
O3_trtr = SS_trtr-PP_trtr; O4_trtr = SS_trtr+PP_trtr; // SS+PP,SS-PP
O3_fig8 = SS_fig8-PP_fig8; O4_fig8 = SS_fig8+PP_fig8;
sliceSum(O1_fig8,C1, orthogdim);
sliceSum(O2_fig8,C2, orthogdim);
sliceSum(O3_fig8,C3, orthogdim);
sliceSum(O4_fig8,C4, orthogdim);
sliceSum(O5_fig8,C5, orthogdim);
O5_trtr = TT_trtr;
O5_fig8 = TT_fig8;
for(int t=0;t<N_t;t++){
dF2_fig8(t,0)= 2.0*C1[(t+t0)%N_t]()()()/vol;
dF2_fig8(t,1)= 2.0*C2[(t+t0)%N_t]()()()/vol;
dF2_fig8(t,2)= 2.0*C3[(t+t0)%N_t]()()()/vol;
dF2_fig8(t,3)= 2.0*C4[(t+t0)%N_t]()()()/vol;
dF2_fig8(t,4)= 2.0*C5[(t+t0)%N_t]()()()/vol;
}
sliceSum(O1_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr_mix(t,0)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O2_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr_mix(t,1)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O3_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr_mix(t,2)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O4_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr_mix(t,3)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O5_trtr,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_trtr_mix(t,4)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(D0,C1, orthogdim);
sliceSum(D1,C2, orthogdim);
sliceSum(O1_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8_mix(t,0)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O2_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8_mix(t,1)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O3_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8_mix(t,2)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O4_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8_mix(t,3)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
sliceSum(O5_fig8,C1, orthogdim); for(int t=0;t<N_t;t++) dF2_fig8_mix(t,4)+= 2.0*C1[(t+t0)%N_t]()()()/vol;
for(int t=0;t<N_t;t++){
den0(t)+=C1[(t+t0)%N_t]()()()/vol;
den1(t)+=C2[(t+t0)%N_t]()()()/vol;
}
}
}
@ -1179,7 +1045,5 @@ void A2Autils<FImpl>::DeltaFeq2(int dt_min,int dt_max,
LOG(Message) << "Computing A2A DeltaF=2 graph t_outer " << t_outer /million << " s "<< std::endl;
LOG(Message) << "Computing A2A DeltaF=2 graph t_contr " << t_contr /million << " s "<< std::endl;
}
#endif
}}
END_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Baryon.cc
Source file: extras/Hadrons/Modules/MContraction/Baryon.cc
Copyright (C) 2015-2018
@ -25,7 +25,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Grid/Hadrons/Modules/MContraction/Baryon.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Baryon.hpp
Source file: extras/Hadrons/Modules/MContraction/Baryon.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_Baryon_hpp_
#define Hadrons_MContraction_Baryon_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/DiscLoop.cc
Source file: extras/Hadrons/Modules/MContraction/DiscLoop.cc
Copyright (C) 2015-2018
@ -25,7 +25,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Grid/Hadrons/Modules/MContraction/DiscLoop.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/DiscLoop.hpp
Source file: extras/Hadrons/Modules/MContraction/DiscLoop.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_DiscLoop_hpp_
#define Hadrons_MContraction_DiscLoop_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Gamma3pt.cc
Source file: extras/Hadrons/Modules/MContraction/Gamma3pt.cc
Copyright (C) 2015-2018
@ -25,7 +25,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Grid/Hadrons/Modules/MContraction/Gamma3pt.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Gamma3pt.hpp
Source file: extras/Hadrons/Modules/MContraction/Gamma3pt.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_Gamma3pt_hpp_
#define Hadrons_MContraction_Gamma3pt_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Meson.cc
Source file: extras/Hadrons/Modules/MContraction/Meson.cc
Copyright (C) 2015-2018
@ -25,7 +25,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Grid/Hadrons/Modules/MContraction/Meson.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,13 +2,12 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Meson.hpp
Source file: extras/Hadrons/Modules/MContraction/Meson.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Lanny91 <andrew.lawson@gmail.com>
Author: Vera Guelpers <vmg1n14@soton.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -31,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_Meson_hpp_
#define Hadrons_MContraction_Meson_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
@ -77,7 +76,7 @@ class TMeson: public Module<MesonPar>
public:
FERM_TYPE_ALIASES(FImpl1, 1);
FERM_TYPE_ALIASES(FImpl2, 2);
BASIC_TYPE_ALIASES(ScalarImplCR, Scalar);
FERM_TYPE_ALIASES(ScalarImplCR, Scalar);
SINK_TYPE_ALIASES(Scalar);
class Result: Serializable
{

View File

@ -0,0 +1,8 @@
#include <Grid/Hadrons/Modules/MContraction/MesonFieldGamma.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TMesonFieldGamma<FIMPL>;
template class Grid::Hadrons::MContraction::TMesonFieldGamma<ZFIMPL>;

View File

@ -0,0 +1,269 @@
#ifndef Hadrons_MContraction_MesonFieldGamma_hpp_
#define Hadrons_MContraction_MesonFieldGamma_hpp_
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/AllToAllVectors.hpp>
#include <Grid/Hadrons/AllToAllReduction.hpp>
#include <Grid/Grid_Eigen_Dense.h>
#include <fstream>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* MesonFieldGamma *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
class MesonFieldPar : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFieldPar,
int, Nl,
int, N,
int, Nblock,
std::string, A2A1,
std::string, A2A2,
std::string, gammas,
std::string, output);
};
template <typename FImpl>
class TMesonFieldGamma : public Module<MesonFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl, );
SOLVER_TYPE_ALIASES(FImpl, );
typedef A2AModesSchurDiagTwo<typename FImpl::FermionField, FMat, Solver> A2ABase;
class Result : Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
Gamma::Algebra, gamma,
std::vector<std::vector<std::vector<ComplexD>>>, MesonField);
};
public:
// constructor
TMesonFieldGamma(const std::string name);
// destructor
virtual ~TMesonFieldGamma(void){};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
virtual void parseGammaString(std::vector<Gamma::Algebra> &gammaList);
virtual void vectorOfWs(std::vector<FermionField> &w, int i, int Nblock, FermionField &tmpw_5d, std::vector<FermionField> &vec_w);
virtual void vectorOfVs(std::vector<FermionField> &v, int j, int Nblock, FermionField &tmpv_5d, std::vector<FermionField> &vec_v);
virtual void gammaMult(std::vector<FermionField> &v, Gamma gamma);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER(MesonFieldGamma, ARG(TMesonFieldGamma<FIMPL>), MContraction);
MODULE_REGISTER(ZMesonFieldGamma, ARG(TMesonFieldGamma<ZFIMPL>), MContraction);
/******************************************************************************
* TMesonFieldGamma implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TMesonFieldGamma<FImpl>::TMesonFieldGamma(const std::string name)
: Module<MesonFieldPar>(name)
{
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TMesonFieldGamma<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().A2A1 + "_class", par().A2A2 + "_class"};
in.push_back(par().A2A1 + "_w_high_4d");
in.push_back(par().A2A2 + "_v_high_4d");
return in;
}
template <typename FImpl>
std::vector<std::string> TMesonFieldGamma<FImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
template <typename FImpl>
void TMesonFieldGamma<FImpl>::parseGammaString(std::vector<Gamma::Algebra> &gammaList)
{
gammaList.clear();
// Determine gamma matrices to insert at source/sink.
if (par().gammas.compare("all") == 0)
{
// Do all contractions.
for (unsigned int i = 1; i < Gamma::nGamma; i += 2)
{
gammaList.push_back(((Gamma::Algebra)i));
}
}
else
{
// Parse individual contractions from input string.
gammaList = strToVec<Gamma::Algebra>(par().gammas);
}
}
template <typename FImpl>
void TMesonFieldGamma<FImpl>::vectorOfWs(std::vector<FermionField> &w, int i, int Nblock, FermionField &tmpw_5d, std::vector<FermionField> &vec_w)
{
for (unsigned int ni = 0; ni < Nblock; ni++)
{
vec_w[ni] = w[i + ni];
}
}
template <typename FImpl>
void TMesonFieldGamma<FImpl>::vectorOfVs(std::vector<FermionField> &v, int j, int Nblock, FermionField &tmpv_5d, std::vector<FermionField> &vec_v)
{
for (unsigned int nj = 0; nj < Nblock; nj++)
{
vec_v[nj] = v[j+nj];
}
}
template <typename FImpl>
void TMesonFieldGamma<FImpl>::gammaMult(std::vector<FermionField> &v, Gamma gamma)
{
int Nblock = v.size();
for (unsigned int nj = 0; nj < Nblock; nj++)
{
v[nj] = gamma * v[nj];
}
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TMesonFieldGamma<FImpl>::setup(void)
{
int nt = env().getDim(Tp);
int N = par().N;
int Nblock = par().Nblock;
int Ls_ = env().getObjectLs(par().A2A1 + "_class");
envTmpLat(FermionField, "tmpv_5d", Ls_);
envTmpLat(FermionField, "tmpw_5d", Ls_);
envTmp(std::vector<FermionField>, "w", 1, N, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "v", 1, N, FermionField(env().getGrid(1)));
envTmp(Eigen::MatrixXcd, "MF", 1, Eigen::MatrixXcd::Zero(nt, N * N));
envTmp(std::vector<FermionField>, "w_block", 1, Nblock, FermionField(env().getGrid(1)));
envTmp(std::vector<FermionField>, "v_block", 1, Nblock, FermionField(env().getGrid(1)));
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TMesonFieldGamma<FImpl>::execute(void)
{
LOG(Message) << "Computing A2A meson field for gamma = " << par().gammas << ", taking w from " << par().A2A1 << " and v from " << par().A2A2 << std::endl;
int N = par().N;
int nt = env().getDim(Tp);
int Nblock = par().Nblock;
std::vector<Result> result;
std::vector<Gamma::Algebra> gammaResultList;
std::vector<Gamma> gammaList;
parseGammaString(gammaResultList);
result.resize(gammaResultList.size());
Gamma g5(Gamma::Algebra::Gamma5);
gammaList.resize(gammaResultList.size(), g5);
for (unsigned int i = 0; i < result.size(); ++i)
{
result[i].gamma = gammaResultList[i];
result[i].MesonField.resize(N, std::vector<std::vector<ComplexD>>(N, std::vector<ComplexD>(nt)));
Gamma gamma(gammaResultList[i]);
gammaList[i] = gamma;
}
auto &a2a1 = envGet(A2ABase, par().A2A1 + "_class");
auto &a2a2 = envGet(A2ABase, par().A2A2 + "_class");
envGetTmp(FermionField, tmpv_5d);
envGetTmp(FermionField, tmpw_5d);
envGetTmp(std::vector<FermionField>, v);
envGetTmp(std::vector<FermionField>, w);
LOG(Message) << "Finding v and w vectors for N = " << N << std::endl;
for (int i = 0; i < N; i++)
{
a2a2.return_v(i, tmpv_5d, v[i]);
a2a1.return_w(i, tmpw_5d, w[i]);
}
LOG(Message) << "Found v and w vectors for N = " << N << std::endl;
std::vector<std::vector<ComplexD>> MesonField_ij;
LOG(Message) << "Before blocked MFs, Nblock = " << Nblock << std::endl;
envGetTmp(std::vector<FermionField>, v_block);
envGetTmp(std::vector<FermionField>, w_block);
MesonField_ij.resize(Nblock * Nblock, std::vector<ComplexD>(nt));
envGetTmp(Eigen::MatrixXcd, MF);
LOG(Message) << "Before blocked MFs, Nblock = " << Nblock << std::endl;
for (unsigned int i = 0; i < N; i += Nblock)
{
vectorOfWs(w, i, Nblock, tmpw_5d, w_block);
for (unsigned int j = 0; j < N; j += Nblock)
{
vectorOfVs(v, j, Nblock, tmpv_5d, v_block);
for (unsigned int k = 0; k < result.size(); k++)
{
gammaMult(v_block, gammaList[k]);
sliceInnerProductMesonField(MesonField_ij, w_block, v_block, Tp);
for (unsigned int nj = 0; nj < Nblock; nj++)
{
for (unsigned int ni = 0; ni < Nblock; ni++)
{
MF.col((i + ni) + (j + nj) * N) = Eigen::VectorXcd::Map(&MesonField_ij[nj * Nblock + ni][0], MesonField_ij[nj * Nblock + ni].size());
}
}
}
}
if (i % 10 == 0)
{
LOG(Message) << "MF for i = " << i << " of " << N << std::endl;
}
}
LOG(Message) << "Before Global sum, Nblock = " << Nblock << std::endl;
v_block[0]._grid->GlobalSumVector(MF.data(), MF.size());
LOG(Message) << "After Global sum, Nblock = " << Nblock << std::endl;
for (unsigned int i = 0; i < N; i++)
{
for (unsigned int j = 0; j < N; j++)
{
for (unsigned int k = 0; k < result.size(); k++)
{
for (unsigned int t = 0; t < nt; t++)
{
result[k].MesonField[i][j][t] = MF.col(i + N * j)[t];
}
}
}
}
saveResult(par().output, "meson", result);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_MesonFieldGm_hpp_

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WardIdentity.cc
Source file: extras/Hadrons/Modules/MContraction/WardIdentity.cc
Copyright (C) 2015-2018
@ -25,7 +25,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/WardIdentity.hpp>
#include <Grid/Hadrons/Modules/MContraction/WardIdentity.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WardIdentity.hpp
Source file: extras/Hadrons/Modules/MContraction/WardIdentity.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_WardIdentity_hpp_
#define Hadrons_MContraction_WardIdentity_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonian.hpp
Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonian.hpp
Copyright (C) 2015-2018
@ -30,9 +30,9 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_WeakHamiltonian_hpp_
#define Hadrons_MContraction_WeakHamiltonian_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Hadrons/Global.hpp>
#include <Grid/Hadrons/Module.hpp>
#include <Grid/Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianEye.cc
Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.cc
Copyright (C) 2015-2018
@ -27,7 +27,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp>
using namespace Grid;
using namespace Hadrons;

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp
Source file: extras/Hadrons/Modules/MContraction/WeakHamiltonianEye.hpp
Copyright (C) 2015-2018
@ -30,7 +30,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#ifndef Hadrons_MContraction_WeakHamiltonianEye_hpp_
#define Hadrons_MContraction_WeakHamiltonianEye_hpp_
#include <Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Grid/Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
BEGIN_HADRONS_NAMESPACE

Some files were not shown because too many files have changed in this diff Show More