1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 12:47:05 +01:00

Merge GPU support (upstream/develop) into distillation branch.

This compiles and looks right ... but may need some testing

* develop: (762 commits)
  Tensor ambiguous fix
  Fix for GCC preprocessor/pragma handling bug
  Trips up NVCC for reasons I dont understand on summit
  Fix GCC complaint
  Zero() change
  Force a couple of things to compile on NVCC
  Remove debug code
  nvcc error suppress
  Merge develop
  Reduction finished and hopefully fixes CI regression fail on single precisoin and force
  Double precision variants for summation accuracy
  Update todo list
  Freeze the seed
  Fix compiling of MSource::Gauss for single precision
  Think the reduction is now sorted and cleaned up
  Fix force term
  Printing improvement
  GPU reduction fix and also exit backtrace option
  GPU friendly
  Simplify the comms benchmark
  ...

# Conflicts:
#	Grid/communicator/SharedMemoryMPI.cc
#	Grid/qcd/action/fermion/WilsonKernelsAsm.cc
#	Grid/qcd/action/fermion/implementation/StaggeredKernelsAsm.h
#	Grid/qcd/smearing/StoutSmearing.h
#	Hadrons/Modules.hpp
#	Hadrons/Utilities/Contractor.cc
#	Hadrons/modules.inc
#	tests/forces/Test_dwf_force_eofa.cc
#	tests/forces/Test_dwf_gpforce_eofa.cc
This commit is contained in:
Michael Marshall
2019-09-13 13:30:00 +01:00
796 changed files with 41536 additions and 52391 deletions

View File

@ -167,10 +167,12 @@ public:
template <typename C, typename MatLeft, typename MatRight>
static inline void accTrMul(C &acc, const MatLeft &a, const MatRight &b)
{
if ((MatLeft::Options == Eigen::RowMajor) and
(MatRight::Options == Eigen::ColMajor))
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
if ((MatLeft::Options == RowMajor) and
(MatRight::Options == ColMajor))
{
parallel_for (unsigned int r = 0; r < a.rows(); ++r)
thread_for(r,a.rows(),
{
C tmp;
#ifdef USE_MKL
@ -178,15 +180,15 @@ public:
#else
tmp = a.row(r).conjugate().dot(b.col(r));
#endif
parallel_critical
thread_critical
{
acc += tmp;
}
}
});
}
else
{
parallel_for (unsigned int c = 0; c < a.cols(); ++c)
{
thread_for(c,a.cols(),
{
C tmp;
#ifdef USE_MKL
@ -194,11 +196,11 @@ public:
#else
tmp = a.col(c).conjugate().dot(b.row(c));
#endif
parallel_critical
thread_critical
{
acc += tmp;
}
}
});
}
}
@ -218,18 +220,20 @@ public:
const Mat<ComplexD, Opts...> &b)
{
static const ComplexD one(1., 0.), zero(0., 0.);
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
if ((res.rows() != a.rows()) or (res.cols() != b.cols()))
{
res.resize(a.rows(), b.cols());
}
if (Mat<ComplexD, Opts...>::Options == Eigen::RowMajor)
if (Mat<ComplexD, Opts...>::Options == RowMajor)
{
cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.cols(), b.data(), b.cols(), &zero,
res.data(), res.cols());
}
else if (Mat<ComplexD, Opts...>::Options == Eigen::ColMajor)
else if (Mat<ComplexD, Opts...>::Options == ColMajor)
{
cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.rows(), b.data(), b.rows(), &zero,
@ -243,18 +247,20 @@ public:
const Mat<ComplexF, Opts...> &b)
{
static const ComplexF one(1., 0.), zero(0., 0.);
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
if ((res.rows() != a.rows()) or (res.cols() != b.cols()))
{
res.resize(a.rows(), b.cols());
}
if (Mat<ComplexF, Opts...>::Options == Eigen::RowMajor)
if (Mat<ComplexF, Opts...>::Options == RowMajor)
{
cblas_cgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.cols(), b.data(), b.cols(), &zero,
res.data(), res.cols());
}
else if (Mat<ComplexF, Opts...>::Options == Eigen::ColMajor)
else if (Mat<ComplexF, Opts...>::Options == ColMajor)
{
cblas_cgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.rows(), b.data(), b.rows(), &zero,
@ -281,22 +287,25 @@ private:
unsigned int &bInc, const unsigned int aRow,
const MatLeft &a, const MatRight &b)
{
if (MatLeft::Options == Eigen::RowMajor)
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
if (MatLeft::Options == RowMajor)
{
aPt = a.data() + aRow*a.cols();
aInc = 1;
}
else if (MatLeft::Options == Eigen::ColMajor)
else if (MatLeft::Options == ColMajor)
{
aPt = a.data() + aRow;
aInc = a.rows();
}
if (MatRight::Options == Eigen::RowMajor)
if (MatRight::Options == RowMajor)
{
bPt = b.data() + aRow;
bInc = b.cols();
}
else if (MatRight::Options == Eigen::ColMajor)
else if (MatRight::Options == ColMajor)
{
bPt = b.data() + aRow*b.rows();
bInc = 1;
@ -309,22 +318,24 @@ private:
unsigned int &bInc, const unsigned int aCol,
const MatLeft &a, const MatRight &b)
{
if (MatLeft::Options == Eigen::RowMajor)
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
if (MatLeft::Options == RowMajor)
{
aPt = a.data() + aCol;
aInc = a.cols();
}
else if (MatLeft::Options == Eigen::ColMajor)
else if (MatLeft::Options == ColMajor)
{
aPt = a.data() + aCol*a.rows();
aInc = 1;
}
if (MatRight::Options == Eigen::RowMajor)
if (MatRight::Options == RowMajor)
{
bPt = b.data() + aCol*b.cols();
bInc = 1;
}
else if (MatRight::Options == Eigen::ColMajor)
else if (MatRight::Options == ColMajor)
{
bPt = b.data() + aCol;
bInc = b.rows();
@ -466,7 +477,7 @@ void A2AMatrixIo<T>::saveBlock(const T *data,
block = {1, 1, 1};
H5NS::DataSpace memspace(count.size(), count.data()), dataspace;
H5NS::DataSet dataset;
size_t shift;
// size_t shift;
push(reader, dataname_);
auto &group = reader.getGroup();
@ -646,14 +657,15 @@ void A2AMatrixBlockComputation<T, Field, MetadataType, TIo>
bytes += kernel.bytes(N_iii, N_jjj);
START_TIMER("cache copy");
parallel_for_nest5(int e =0;e<next_;e++)
for(int s =0;s< nstr_;s++)
for(int t =0;t< nt_;t++)
for(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
{
thread_for_collapse( 5,e,next_,{
for(int s =0;s< nstr_;s++)
for(int t =0;t< nt_;t++)
for(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
{
mBlock(e,s,t,ii+iii,jj+jjj) = mCacheBlock(e,s,t,iii,jjj);
}
}
});
STOP_TIMER("cache copy");
}

View File

@ -128,7 +128,7 @@ template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeV(FermionField &vout, const FermionField &evec, const Real &eval)
{
src_o_ = evec;
src_o_.checkerboard = Odd;
src_o_.Checkerboard() = Odd;
pickCheckerboard(Even, sol_e_, vout);
pickCheckerboard(Odd, sol_o_, vout);
@ -136,25 +136,25 @@ void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeV(FermionField &vout, const Fermi
// v_ie = -(1/eval_i) * MeeInv Meo MooInv evec_i
/////////////////////////////////////////////////////
action_.MooeeInv(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
assert(tmp_.Checkerboard() == Odd);
action_.Meooe(tmp_, sol_e_);
assert(sol_e_.checkerboard == Even);
assert(sol_e_.Checkerboard() == Even);
action_.MooeeInv(sol_e_, tmp_);
assert(tmp_.checkerboard == Even);
assert(tmp_.Checkerboard() == Even);
sol_e_ = (-1.0 / eval) * tmp_;
assert(sol_e_.checkerboard == Even);
assert(sol_e_.Checkerboard() == Even);
/////////////////////////////////////////////////////
// v_io = (1/eval_i) * MooInv evec_i
/////////////////////////////////////////////////////
action_.MooeeInv(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
assert(tmp_.Checkerboard() == Odd);
sol_o_ = (1.0 / eval) * tmp_;
assert(sol_o_.checkerboard == Odd);
assert(sol_o_.Checkerboard() == Odd);
setCheckerboard(vout, sol_e_);
assert(sol_e_.checkerboard == Even);
assert(sol_e_.Checkerboard() == Even);
setCheckerboard(vout, sol_o_);
assert(sol_o_.checkerboard == Odd);
assert(sol_o_.Checkerboard() == Odd);
}
template <typename FImpl>
@ -168,7 +168,7 @@ template <typename FImpl>
void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeW(FermionField &wout, const FermionField &evec, const Real &eval)
{
src_o_ = evec;
src_o_.checkerboard = Odd;
src_o_.Checkerboard() = Odd;
pickCheckerboard(Even, sol_e_, wout);
pickCheckerboard(Odd, sol_o_, wout);
@ -176,22 +176,22 @@ void A2AVectorsSchurDiagTwo<FImpl>::makeLowModeW(FermionField &wout, const Fermi
// w_ie = - MeeInvDag MoeDag Doo evec_i
/////////////////////////////////////////////////////
op_.Mpc(src_o_, tmp_);
assert(tmp_.checkerboard == Odd);
assert(tmp_.Checkerboard() == Odd);
action_.MeooeDag(tmp_, sol_e_);
assert(sol_e_.checkerboard == Even);
assert(sol_e_.Checkerboard() == Even);
action_.MooeeInvDag(sol_e_, tmp_);
assert(tmp_.checkerboard == Even);
assert(tmp_.Checkerboard() == Even);
sol_e_ = (-1.0) * tmp_;
/////////////////////////////////////////////////////
// w_io = Doo evec_i
/////////////////////////////////////////////////////
op_.Mpc(src_o_, sol_o_);
assert(sol_o_.checkerboard == Odd);
assert(sol_o_.Checkerboard() == Odd);
setCheckerboard(wout, sol_e_);
assert(sol_e_.checkerboard == Even);
assert(sol_e_.Checkerboard() == Even);
setCheckerboard(wout, sol_o_);
assert(sol_o_.checkerboard == Odd);
assert(sol_o_.Checkerboard() == Odd);
}
template <typename FImpl>
@ -217,7 +217,7 @@ void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeV5D(FermionField &vout_4d,
FermionField &vout_5d,
const FermionField &noise)
{
if (noise._grid->Dimensions() == fGrid_->Dimensions() - 1)
if (noise.Grid()->Dimensions() == fGrid_->Dimensions() - 1)
{
action_.ImportPhysicalFermionSource(noise, tmp5_);
}
@ -241,7 +241,7 @@ void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeW5D(FermionField &wout_4d,
FermionField &wout_5d,
const FermionField &noise)
{
if (noise._grid->Dimensions() == fGrid_->Dimensions() - 1)
if (noise.Grid()->Dimensions() == fGrid_->Dimensions() - 1)
{
action_.ImportUnphysicalFermion(noise, wout_5d);
wout_4d = noise;
@ -261,7 +261,7 @@ void A2AVectorsIo::write(const std::string fileStem, std::vector<Field> &vec,
const bool multiFile, const int trajectory)
{
Record record;
GridBase *grid = vec[0]._grid;
GridBase *grid = vec[0].Grid();
ScidacWriter binWriter(grid->IsBoss());
std::string filename = vecFilename(fileStem, trajectory, multiFile);

View File

@ -31,7 +31,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Modules.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
#define BIG_SEP "================"

View File

@ -269,7 +269,7 @@ void TScalarVP::execute(void)
if (!par().output.empty())
{
outputData.projection.resize(par().outputMom.size());
outputData.lattice_size = env().getGrid()->_fdimensions;
outputData.lattice_size = env().getGrid()->FullDimensions().toVector();
outputData.mass = static_cast<TChargedProp *>(vm().getModule(par().scalarProp))->par().mass;
outputData.charge = q;
for (unsigned int i_p = 0; i_p < par().outputMom.size(); ++i_p)
@ -476,7 +476,7 @@ void TScalarVP::makeCaches(void)
{
LOG(Message) << "Caching phases for momentum projections..."
<< std::endl;
std::vector<int> &l = env().getGrid()->_fdimensions;
auto l = env().getGrid()->FullDimensions();
Complex ci(0.0,1.0);
// Calculate phase factors
@ -484,7 +484,7 @@ void TScalarVP::makeCaches(void)
{
std::vector<int> mom = strToVec<int>(par().outputMom[i_p]);
auto &momph_ip = envGet(ScalarField, momPhaseName_[i_p]);
momph_ip = zero;
momph_ip = Zero();
for (unsigned int j = 0; j < env().getNd()-1; ++j)
{
Real twoPiL = M_PI*2./l[j];
@ -541,7 +541,7 @@ void TScalarVP::momD1(ScalarField &s, FFT &fft)
envGetTmp(ScalarField, result);
envGetTmp(ScalarField, Amu);
result = zero;
result = Zero();
for (unsigned int mu = 0; mu < env().getNd(); ++mu)
{
Amu = peekLorentz(A, mu);

View File

@ -115,10 +115,10 @@ void TVPCounterTerms::execute(void)
// Phases and hat{p}^2
auto &phatsq = envGet(ScalarField, phatsqName_);
std::vector<int> &l = env().getGrid()->_fdimensions;
Coordinate l = env().getGrid()->FullDimensions();
LOG(Message) << "Calculating shift phases..." << std::endl;
phatsq = zero;
phatsq = Zero();
for (unsigned int mu = 0; mu < env().getNd(); ++mu)
{
Real twoPiL = M_PI*2./l[mu];
@ -156,7 +156,7 @@ void TVPCounterTerms::execute(void)
if (!par().output.empty())
{
outputData.projection.resize(par().outputMom.size());
outputData.lattice_size = env().getGrid()->_fdimensions;
outputData.lattice_size = env().getGrid()->FullDimensions().toVector();
outputData.mass = par().mass;
for (unsigned int i_p = 0; i_p < par().outputMom.size(); ++i_p)
{
@ -172,7 +172,7 @@ void TVPCounterTerms::execute(void)
}
// Calculate phase factors
auto &momph_ip = envGet(ScalarField, momPhaseName_[i_p]);
momph_ip = zero;
momph_ip = Zero();
for (unsigned int j = 0; j < env().getNd()-1; ++j)
{
Real twoPiL = M_PI*2./l[j];

View File

@ -146,7 +146,7 @@ void TWardIdentity<FImpl>::execute(void)
// Compute D_mu V_mu, D here is backward derivative.
envGetTmp(PropagatorField, tmp);
envGetTmp(PropagatorField, vector_WI);
vector_WI = zero;
vector_WI = Zero();
for (unsigned int mu = 0; mu < Nd; ++mu)
{
act.ContractConservedCurrent(q, q, tmp, Current::Vector, mu);
@ -167,7 +167,7 @@ void TWardIdentity<FImpl>::execute(void)
std::vector<TComplex> axial_buf;
// Compute <P|D_mu A_mu>, D is backwards derivative.
axial_defect = zero;
axial_defect = Zero();
for (unsigned int mu = 0; mu < Nd; ++mu)
{
act.ContractConservedCurrent(q, q, tmp, Current::Axial, mu);
@ -175,8 +175,8 @@ void TWardIdentity<FImpl>::execute(void)
axial_defect += trace(g5*tmp);
}
// Get <P|J5q> for 5D (zero for 4D) and <P|P>.
PJ5q = zero;
// Get <P|J5q> for 5D (Zero(); for 4D) and <P|P>.
PJ5q = Zero();
if (Ls_ > 1)
{
// <P|P>

View File

@ -56,7 +56,7 @@ for (unsigned int t = 0; t < buf.size(); ++t)\
//// Contraction of mu index: use 'mu' variable in exp.
#define SUM_MU(buf,exp)\
buf = zero;\
buf = Zero(); \
for (unsigned int mu = 0; mu < ndim; ++mu)\
{\
buf += exp;\

View File

@ -165,7 +165,7 @@ TimeDilutedSpinColorDiagonalNoise<FImpl>::
TimeDilutedSpinColorDiagonalNoise(GridCartesian *g)
: DilutedNoise<FImpl>(g)
{
nt_ = this->getGrid()->GlobalDimensions().back();
nt_ = this->getGrid()->GlobalDimensions().size();
this->resize(nt_*Ns*FImpl::Dimension);
}
@ -192,11 +192,11 @@ void TimeDilutedSpinColorDiagonalNoise<FImpl>::generateNoise(GridParallelRNG &rn
etaCut = where((tLat == t), eta, 0.*eta);
for (unsigned int s = 0; s < Ns; ++s)
{
etas = zero;
pokeSpin(etas, etaCut, s);
etas = Zero();
pokeSpin(etas, etaCut, s);
for (unsigned int c = 0; c < nc; ++c)
{
noise[i] = zero;
noise[i] = Zero();
pokeColour(noise[i], etas, c);
i++;
}
@ -233,11 +233,11 @@ void FullVolumeSpinColorDiagonalNoise<FImpl>::generateNoise(GridParallelRNG &rng
{
for (unsigned int s = 0; s < Ns; ++s)
{
etas = zero;
etas = Zero();
pokeSpin(etas, eta, s);
for (unsigned int c = 0; c < nc; ++c)
{
noise[i] = zero;
noise[i] = Zero();
pokeColour(noise[i], etas, c);
i++;
}

View File

@ -84,7 +84,7 @@ inline void SliceShare( GridBase * gridLowDim, GridBase * gridHighDim, void * Bu
const int iNumDims{(const int)gridHighDim->_gdimensions.size()};
assert(iNumDims == gridLowDim->_gdimensions.size());
int dimSpreadOut = -1;
std::vector<int> coor(iNumDims);
Coordinate coor(iNumDims);
for( int i = 0 ; i < iNumDims ; i++ ) {
coor[i] = gridHighDim->_processor_coor[i];
if( gridLowDim->_gdimensions[i] != gridHighDim->_gdimensions[i] ) {
@ -147,7 +147,7 @@ protected: // I don't really mind if _gf is messed with ... so make this public?
std::vector<Lattice<iColourMatrix<vCoeff_t> > > U;
public:
// Construct this operator given a gauge field and the number of dimensions it should act on
LinOpPeardonNabla( GaugeField& gf, int dimSpatial = Grid::QCD::Tdir ) : /*_gf(gf),*/ nd{dimSpatial} {
LinOpPeardonNabla( GaugeField& gf, int dimSpatial = Tdir ) : /*_gf(gf),*/ nd{dimSpatial} {
assert(dimSpatial>=1);
for( int mu = 0 ; mu < nd ; mu++ )
U.push_back(PeekIndex<LorentzIndex>(gf,mu));
@ -155,12 +155,12 @@ public:
// Apply this operator to "in", return result in "out"
void operator()(const Field& in, Field& out) {
assert( nd <= in._grid->Nd() );
assert( nd <= in.Grid()->Nd() );
conformable( in, out );
out = ( ( Real ) ( 2 * nd ) ) * in;
Field _tmp(in._grid);
Field _tmp(in.Grid());
typedef typename GaugeField::vector_type vCoeff_t;
//Lattice<iColourMatrix<vCoeff_t> > U(in._grid);
//Lattice<iColourMatrix<vCoeff_t> > U(in.Grid());
for( int mu = 0 ; mu < nd ; mu++ ) {
//U = PeekIndex<LorentzIndex>(_gf,mu);
out -= U[mu] * Cshift( in, mu, 1);
@ -238,7 +238,7 @@ struct DistilParameters: Serializable {
#define DISTIL_PARAMETERS_DEFINE( inSetup ) \
const int Nt{env().getDim(Tdir)}; \
const int nvec{par().nvec}; \
const int Ns{Grid::QCD::Ns}; \
const int Ns{Ns}; \
const int nnoise{par().Distil.nnoise}; \
const int tsrc{par().Distil.tsrc}; \
const int TI{par().Distil.getTI(env(), inSetup)}; \
@ -646,11 +646,11 @@ void NamedTensor<Scalar_, NumIndices_, Endian_Scalar_Size>::read(const char * fi
inline GridCartesian * MakeLowerDimGrid( GridCartesian * gridHD )
{
int nd{static_cast<int>(gridHD->_ndimension)};
std::vector<int> latt_size = gridHD->_gdimensions;
Coordinate latt_size = gridHD->_gdimensions;
latt_size[nd-1] = 1;
std::vector<int> simd_layout = GridDefaultSimd(nd-1, vComplex::Nsimd());
Coordinate simd_layout = GridDefaultSimd(nd-1, vComplex::Nsimd());
simd_layout.push_back( 1 );
std::vector<int> mpi_layout = gridHD->_processors;
Coordinate mpi_layout = gridHD->_processors;
mpi_layout[nd-1] = 1;
GridCartesian * gridLD = new GridCartesian(latt_size,simd_layout,mpi_layout,*gridHD);
return gridLD;
@ -664,8 +664,8 @@ inline GridCartesian * MakeLowerDimGrid( GridCartesian * gridHD )
inline void RotateEigen(std::vector<LatticeColourVector> & evec)
{
ColourVector cv0;
auto grid = evec[0]._grid;
std::vector<int> siteFirst(grid->Nd(),0);
auto grid = evec[0].Grid();
Coordinate siteFirst(grid->Nd(),0);
peekSite(cv0, evec[0], siteFirst);
auto & cplx0 = cv0()()(0);
if( std::imag(cplx0) == 0 )

View File

@ -180,7 +180,7 @@ namespace EigenPackIo
const unsigned int size, bool multiFile,
GridBase *gridIo = nullptr)
{
GridBase *grid = evec[0]._grid;
GridBase *grid = evec[0].Grid();
std::unique_ptr<TIo> ioBuf{nullptr};
std::unique_ptr<T> testBuf{nullptr};
ScidacWriter binWriter(grid->IsBoss());

View File

@ -31,7 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/ModuleFactory.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
#define ERROR_NO_ADDRESS(address)\
@ -43,7 +43,7 @@ HADRONS_ERROR_REF(ObjectDefinition, "no object with address " + std::to_string(a
// constructor /////////////////////////////////////////////////////////////////
Environment::Environment(void)
{
dim_ = GridDefaultLatt();
dim_ = GridDefaultLatt().toVector();
nd_ = dim_.size();
vol_ = 1.;
for (auto d: dim_)

View File

@ -133,7 +133,6 @@ void GeneticScheduler<V, T>::nextGeneration(void)
//LOG(Debug) << "Starting population:\n" << *this << std::endl;
// random mutations
//PARALLEL_FOR_LOOP
for (unsigned int i = 0; i < par_.popSize; ++i)
{
doMutation();
@ -141,7 +140,6 @@ void GeneticScheduler<V, T>::nextGeneration(void)
//LOG(Debug) << "After mutations:\n" << *this << std::endl;
// mating
//PARALLEL_FOR_LOOP
for (unsigned int i = 0; i < par_.popSize/2; ++i)
{
doCrossover();
@ -177,7 +175,7 @@ void GeneticScheduler<V, T>::doCrossover(void)
Gene c1, c2;
crossover(c1, c2, p1, p2);
PARALLEL_CRITICAL
thread_critical
{
population_.insert(std::make_pair(func_(c1), c1));
population_.insert(std::make_pair(func_(c2), c2));
@ -197,7 +195,7 @@ void GeneticScheduler<V, T>::doMutation(void)
std::advance(it, pdis(gen_));
mutation(m, it->second);
PARALLEL_CRITICAL
thread_critical
{
population_.insert(std::make_pair(func_(m), m));
}

View File

@ -29,7 +29,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Global.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
HadronsLogger Hadrons::HadronsLogError(1,"Error");
@ -77,7 +76,7 @@ size_t Hadrons::typeHash(const std::type_info *info)
return info->hash_code();
}
constexpr unsigned int maxNameSize = 1024u;
//constexpr unsigned int maxNameSize = 1024u;
std::string Hadrons::typeName(const std::type_info *info)
{

View File

@ -52,7 +52,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define BEGIN_HADRONS_NAMESPACE \
namespace Grid {\
using namespace QCD;\
namespace Hadrons {\
using Grid::operator<<;\
using Grid::operator>>;
@ -109,10 +108,11 @@ typedef std::vector<typename ComplexField##suffix::vector_object::scalar_object>
#define FERM_TYPE_ALIASES(FImpl, suffix)\
BASIC_TYPE_ALIASES(FImpl, suffix);\
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;
typedef FermionOperator<FImpl> FMat##suffix;\
typedef typename FImpl::FermionField FermionField##suffix;\
typedef typename FImpl::GaugeField GaugeField##suffix;\
typedef typename FImpl::DoubledGaugeField DoubledGaugeField##suffix;\
typedef Lattice<iSpinMatrix<typename FImpl::Simd>> SpinMatrixField##suffix;
#define GAUGE_TYPE_ALIASES(GImpl, suffix)\
typedef typename GImpl::GaugeField GaugeField##suffix;

View File

@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Module.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
/******************************************************************************

View File

@ -1,81 +1,81 @@
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Hadrons/Modules/MSource/Gauss.hpp>
#include <Hadrons/Modules/MSource/Momentum.hpp>
#include <Hadrons/Modules/MSource/SeqAslash.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/Convolution.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MScalarSUN/TrPhi.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
#include <Hadrons/Modules/MScalarSUN/Grad.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
#include <Hadrons/Modules/MScalarSUN/StochFreeField.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/TransProj.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MScalar/FreeProp.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MIO/LoadPerambulator.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MSink/Smear.hpp>
#include <Hadrons/Modules/MSink/Point.hpp>
#include <Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Hadrons/Modules/MFermion/EMLepton.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing3D.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MDistil/Noises.hpp>
#include <Hadrons/Modules/MDistil/Perambulator.hpp>
#include <Hadrons/Modules/MDistil/g5_multiply.hpp>
#include <Hadrons/Modules/MDistil/PerambFromSolve.hpp>
#include <Hadrons/Modules/MDistil/Baryon2pt.hpp>
#include <Hadrons/Modules/MDistil/LapEvec.hpp>
#include <Hadrons/Modules/MDistil/BContraction.hpp>
#include <Hadrons/Modules/MDistil/DistilVectors.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MSource/SeqAslash.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/SeqGamma.hpp>
#include <Hadrons/Modules/MSource/Point.hpp>
#include <Hadrons/Modules/MSource/Momentum.hpp>
#include <Hadrons/Modules/MContraction/WeakMesonDecayKl2.hpp>
#include <Hadrons/Modules/MContraction/Nucleon.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/WeakEye3pt.hpp>
#include <Hadrons/Modules/MContraction/WeakNonEye3pt.hpp>
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MContraction/A2ALoop.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/SelfContract.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Hadrons/Modules/MAction/ScaledDWF.hpp>
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MAction/WilsonClover.hpp>
#include <Hadrons/Modules/MAction/ZMobiusDWF.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MGauge/UnitEm.hpp>
#include <Hadrons/Modules/MGauge/Electrify.hpp>
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MIO/LoadPerambulator.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MContraction/WeakEye3pt.hpp>
#include <Hadrons/Modules/MContraction/WeakMesonDecayKl2.hpp>
#include <Hadrons/Modules/MContraction/Gamma3pt.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/A2ALoop.hpp>
#include <Hadrons/Modules/MContraction/WeakNonEye3pt.hpp>
#include <Hadrons/Modules/MContraction/DiscLoop.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MContraction/Nucleon.hpp>
#include <Hadrons/Modules/MContraction/SelfContract.hpp>
#include <Hadrons/Modules/MDistil/BContraction.hpp>
#include <Hadrons/Modules/MDistil/Baryon2pt.hpp>
#include <Hadrons/Modules/MDistil/DistilVectors.hpp>
#include <Hadrons/Modules/MDistil/LapEvec.hpp>
#include <Hadrons/Modules/MDistil/Noises.hpp>
#include <Hadrons/Modules/MDistil/PerambFromSolve.hpp>
#include <Hadrons/Modules/MDistil/Perambulator.hpp>
#include <Hadrons/Modules/MDistil/g5_multiply.hpp>
#include <Hadrons/Modules/MNPR/FourQuark.hpp>
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
#include <Hadrons/Modules/MNPR/Amputate.hpp>
#include <Hadrons/Modules/MSolver/A2AAslashVectors.hpp>
#include <Hadrons/Modules/MSolver/RBPrecCG.hpp>
#include <Hadrons/Modules/MSolver/Guesser.hpp>
#include <Hadrons/Modules/MSolver/LocalCoherenceLanczos.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
#include <Hadrons/Modules/MFermion/FreeProp.hpp>
#include <Hadrons/Modules/MFermion/GaugeProp.hpp>
#include <Hadrons/Modules/MFermion/EMLepton.hpp>
#include <Hadrons/Modules/MSink/Smear.hpp>
#include <Hadrons/Modules/MSink/Point.hpp>

View File

@ -122,10 +122,8 @@ void TWilson<FImpl>::setup(void)
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases << std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L << std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");

View File

@ -49,7 +49,7 @@ public:
double , M5,
double , b,
double , c,
std::vector<std::complex<double>>, omega,
std::vector<std::complex<double> >, omega,
std::string , boundary,
std::string , twist);
};
@ -126,6 +126,8 @@ void TZMobiusDWF<FImpl>::setup(void)
auto &grb5 = *envGetRbGrid(FermionField, par().Ls);
auto omega = par().omega;
typename ZMobiusFermion<FImpl>::ImplParams implParams;
if (!par().boundary.empty())
{
implParams.boundary_phases = strToVec<Complex>(par().boundary);
@ -134,10 +136,8 @@ void TZMobiusDWF<FImpl>::setup(void)
{
implParams.twist_n_2pi_L = strToVec<Real>(par().twist);
}
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases
<< std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L
<< std::endl;
LOG(Message) << "Fermion boundary conditions: " << implParams.boundary_phases << std::endl;
LOG(Message) << "Twists: " << implParams.twist_n_2pi_L << std::endl;
if (implParams.boundary_phases.size() != env().getNd())
{
HADRONS_ERROR(Size, "Wrong number of boundary phase");
@ -146,9 +146,17 @@ void TZMobiusDWF<FImpl>::setup(void)
{
HADRONS_ERROR(Size, "Wrong number of twist");
}
envCreateDerived(FMat, ZMobiusFermion<FImpl>, getName(), par().Ls, U, g5,
grb5, g4, grb4, par().mass, par().M5, omega,
par().b, par().c, implParams);
assert(par().Ls==omega.size());
int Ls=par().Ls;
std::vector<ComplexD> _omega(Ls);
for(int i=0;i<Ls;i++){
_omega[i] = omega[i];
}
envCreateDerived(FMat, ZMobiusFermion<FImpl>, getName(), par().Ls,
U, g5, grb5, g4, grb4,
par().mass, par().M5,
_omega, par().b, par().c, implParams);
}
// execution ///////////////////////////////////////////////////////////////////

View File

@ -174,6 +174,7 @@ void TA2AAslashField<FImpl, PhotonImpl>::setup(void)
template <typename FImpl, typename PhotonImpl>
void TA2AAslashField<FImpl, PhotonImpl>::execute(void)
{
#ifndef GRID_NVCC
auto &left = envGet(std::vector<FermionField>, par().left);
auto &right = envGet(std::vector<FermionField>, par().right);
@ -237,6 +238,7 @@ void TA2AAslashField<FImpl, PhotonImpl>::execute(void)
envGetTmp(Computation, computation);
computation.execute(left, right, kernel, ionameFn, filenameFn, metadataFn);
#endif
}
END_MODULE_NAMESPACE

View File

@ -109,10 +109,10 @@ void TA2ALoop<FImpl>::execute(void)
auto &left = envGet(std::vector<FermionField>, par().left);
auto &right = envGet(std::vector<FermionField>, par().right);
loop = zero;
loop = Zero();
for (unsigned int i = 0; i < left.size(); ++i)
{
loop += outerProduct(adj(left[i]), right[i]);
loop += outerProduct(left[i], right[i]);
}
}

View File

@ -258,7 +258,7 @@ void TA2AMesonField<FImpl>::execute(void)
std::vector<Real> p;
envGetTmp(ComplexField, coor);
ph[j] = zero;
ph[j] = Zero();
for(unsigned int mu = 0; mu < mom_[j].size(); mu++)
{
LatticeCoordinate(coor, mu);

View File

@ -1,35 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Baryon.cc
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/Baryon.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TBaryon<FIMPL,FIMPL,FIMPL>;

View File

@ -1,257 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/Baryon.hpp
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Felix Erben <felix.erben@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MContraction_Baryon_hpp_
#define Hadrons_MContraction_Baryon_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/qcd/utils/BaryonUtils.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Baryon *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
class BaryonPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(BaryonPar,
std::string, q1,
std::string, q2,
std::string, q3,
std::string, gamma,
std::string, output);
};
template <typename FImpl1, typename FImpl2, typename FImpl3>
class TBaryon: public Module<BaryonPar>
{
public:
FERM_TYPE_ALIASES(FImpl1, 1);
FERM_TYPE_ALIASES(FImpl2, 2);
FERM_TYPE_ALIASES(FImpl3, 3);
class Result: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
std::vector<Complex>, corr);
};
public:
// constructor
TBaryon(const std::string name);
// destructor
virtual ~TBaryon(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
protected:
// setup
virtual void setup(void);
// execution
virtual void execute(void);
// Which gamma algebra was specified
Gamma::Algebra al;
};
MODULE_REGISTER_TMP(Baryon, ARG(TBaryon<FIMPL, FIMPL, FIMPL>), MContraction);
/******************************************************************************
* TBaryon implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2, typename FImpl3>
TBaryon<FImpl1, FImpl2, FImpl3>::TBaryon(const std::string name)
: Module<BaryonPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2, typename FImpl3>
std::vector<std::string> TBaryon<FImpl1, FImpl2, FImpl3>::getInput(void)
{
std::vector<std::string> input = {par().q1, par().q2, par().q3};
return input;
}
template <typename FImpl1, typename FImpl2, typename FImpl3>
std::vector<std::string> TBaryon<FImpl1, FImpl2, FImpl3>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2, typename FImpl3>
void TBaryon<FImpl1, FImpl2, FImpl3>::setup(void)
{
envTmpLat(LatticeComplex, "c");
envTmpLat(LatticeComplex, "c1");
envTmpLat(LatticeComplex, "c2");
envTmpLat(LatticeComplex, "c3");
envTmpLat(LatticeComplex, "c4");
envTmpLat(LatticeComplex, "c5");
envTmpLat(LatticeComplex, "c6");
envTmpLat(LatticeComplex, "diquark");
// Translate the full string naming the desired gamma structure into the one we need to use
const std::string gamma{ par().gamma };
int iGamma = 0;
do
{
const char * pGammaName = Gamma::name[iGamma];
int iLen = 0;
while( pGammaName[iLen] && pGammaName[iLen] != ' ' )
iLen++;
if( !gamma.compare( 0, gamma.size(), pGammaName, iLen ) )
break;
}
while( ++iGamma < Gamma::nGamma );
if( iGamma >= Gamma::nGamma ) {
LOG(Message) << "Unrecognised gamma structure \"" << gamma << "\"" << std::endl;
assert( 0 && "Invalid gamma structure specified" );
}
switch( iGamma ) {
case Gamma::Algebra::GammaX:
std::cout << "using interpolator C gamma_X" << std::endl;
al = Gamma::Algebra::GammaZGamma5; //Still hardcoded CgX = i gamma_3 gamma_5
break;
case Gamma::Algebra::GammaY:
std::cout << "using interpolator C gamma_Y" << std::endl;
al = Gamma::Algebra::GammaT; //Still hardcoded CgX = - gamma_4
break;
case Gamma::Algebra::GammaZ:
std::cout << "using interpolator C gamma_Z" << std::endl;
al = Gamma::Algebra::GammaXGamma5; //Still hardcoded CgX = i gamma_1 gamma_5
break;
default:
{
LOG(Message) << "Unsupported gamma structure " << gamma << " = " << iGamma << std::endl;
assert( 0 && "Unsupported gamma structure" );
// or you could do something like
al = static_cast<Gamma::Algebra>( iGamma );
break;
}
}
LOG(Message) << "Gamma structure " << gamma << " = " << iGamma
<< " translated to " << Gamma::name[al] << std::endl;
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2, typename FImpl3>
void TBaryon<FImpl1, FImpl2, FImpl3>::execute(void)
{
LOG(Message) << "Computing baryon contractions '" << getName() << "' using"
<< " quarks '" << par().q1 << "', and a diquark formed of ('" << par().q2 << "', and '"
<< par().q3 << "')" << std::endl;
auto &q1 = envGet(PropagatorField1, par().q1);
auto &q2 = envGet(PropagatorField2, par().q2);
auto &q3 = envGet(PropagatorField3, par().q3);
envGetTmp(LatticeComplex, c);
envGetTmp(LatticeComplex, c1);
envGetTmp(LatticeComplex, c2);
envGetTmp(LatticeComplex, c3);
envGetTmp(LatticeComplex, c4);
envGetTmp(LatticeComplex, c5);
envGetTmp(LatticeComplex, c6);
envGetTmp(LatticeComplex, diquark);
Result result;
int nt = env().getDim(Tp);
result.corr.resize(nt);
const std::string gamma{ par().gamma };
std::vector<TComplex> buf;
Result result1;
Result result2;
Result result3;
Result result4;
Result result5;
Result result6;
result1.corr.resize(nt);
result2.corr.resize(nt);
result3.corr.resize(nt);
result4.corr.resize(nt);
result5.corr.resize(nt);
result6.corr.resize(nt);
std::vector<TComplex> buf1;
std::vector<TComplex> buf2;
std::vector<TComplex> buf3;
std::vector<TComplex> buf4;
std::vector<TComplex> buf5;
std::vector<TComplex> buf6;
const Gamma GammaA{ Gamma::Algebra::Identity };
const Gamma GammaB{ al };
//BaryonUtils<FIMPL>::ContractBaryons(q1,q2,q3,GammaA,GammaB,c);
BaryonUtils<FIMPL>::ContractBaryons_debug(q1,q2,q3,GammaA,GammaB,c1,c2,c3,c4,c5,c6,c);
sliceSum(c,buf,Tp);
sliceSum(c1,buf1,Tp);
sliceSum(c2,buf2,Tp);
sliceSum(c3,buf3,Tp);
sliceSum(c4,buf4,Tp);
sliceSum(c5,buf5,Tp);
sliceSum(c6,buf6,Tp);
for (unsigned int t = 0; t < buf.size(); ++t)
{
result.corr[t] = TensorRemove(buf[t]);
result1.corr[t] = TensorRemove(buf1[t]);
result2.corr[t] = TensorRemove(buf2[t]);
result3.corr[t] = TensorRemove(buf3[t]);
result4.corr[t] = TensorRemove(buf4[t]);
result5.corr[t] = TensorRemove(buf5[t]);
result6.corr[t] = TensorRemove(buf6[t]);
}
std::string ostr1{ par().output + "_1"};
std::string ostr2{ par().output + "_2"};
std::string ostr3{ par().output + "_3"};
std::string ostr4{ par().output + "_4"};
std::string ostr5{ par().output + "_5"};
std::string ostr6{ par().output + "_6"};
saveResult(par().output, "baryon", result);
saveResult(ostr1, "baryon1", result1);
saveResult(ostr2, "baryon2", result2);
saveResult(ostr3, "baryon3", result3);
saveResult(ostr4, "baryon4", result4);
saveResult(ostr5, "baryon5", result5);
saveResult(ostr6, "baryon6", result6);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_Baryon_hpp_

View File

@ -64,7 +64,7 @@ BEGIN_HADRONS_NAMESPACE
*/
/******************************************************************************
* TWeakMesonDecayKl2 *
* TWeakMesonDecayKl2 *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
@ -75,7 +75,7 @@ public:
std::string, q1,
std::string, q2,
std::string, lepton,
std::string, output);
std::string, output);
};
template <typename FImpl>
@ -83,14 +83,13 @@ class TWeakMesonDecayKl2: public Module<WeakMesonDecayKl2Par>
{
public:
FERM_TYPE_ALIASES(FImpl,);
class Metadata: Serializable
typedef typename SpinMatrixField::vector_object::scalar_object SpinMatrix;
class Result: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Metadata,
int, spinidx1,
int, spinidx2);
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
std::vector<SpinMatrix>, corr);
};
typedef Correlator<Metadata> Result;
public:
// constructor
TWeakMesonDecayKl2(const std::string name);
@ -138,10 +137,10 @@ std::vector<std::string> TWeakMesonDecayKl2<FImpl>::getOutput(void)
template <typename FImpl>
void TWeakMesonDecayKl2<FImpl>::setup(void)
{
envTmpLat(LatticeComplex, "c");
envTmpLat(ComplexField, "c");
envTmpLat(PropagatorField, "prop_buf");
envCreateLat(PropagatorField, getName());
envTmpLat(LatticeComplex, "buf");
envTmpLat(SpinMatrixField, "buf");
}
// execution ///////////////////////////////////////////////////////////////////
@ -150,57 +149,33 @@ void TWeakMesonDecayKl2<FImpl>::execute(void)
{
LOG(Message) << "Computing QED Kl2 contractions '" << getName() << "' using"
<< " quarks '" << par().q1 << "' and '" << par().q2 << "' and"
<< "lepton '" << par().lepton << "'" << std::endl;
<< "lepton '" << par().lepton << "'" << std::endl;
Gamma g5(Gamma::Algebra::Gamma5);
int nt = env().getDim(Tp);
std::vector<SpinMatrix> res_summed;
Result r;
auto &res = envGet(PropagatorField, getName()); res = zero;
Gamma g5(Gamma::Algebra::Gamma5);
int nt = env().getDim(Tp);
auto &q1 = envGet(PropagatorField, par().q1);
auto &q2 = envGet(PropagatorField, par().q2);
auto &res = envGet(PropagatorField, getName()); res = Zero();
auto &q1 = envGet(PropagatorField, par().q1);
auto &q2 = envGet(PropagatorField, par().q2);
auto &lepton = envGet(PropagatorField, par().lepton);
envGetTmp(LatticeComplex, buf);
std::vector<TComplex> res_summed;
envGetTmp(LatticeComplex, c);
envGetTmp(SpinMatrixField, buf);
envGetTmp(ComplexField, c);
envGetTmp(PropagatorField, prop_buf);
std::vector<Result> result;
Result r;
for (unsigned int mu = 0; mu < 4; ++mu)
{
c = zero;
//hadronic part: trace(q1*adj(q2)*g5*gL[mu])
c = trace(q1*adj(q2)*g5*GammaL(Gamma::gmu[mu]));
prop_buf = 1.;
//multiply lepton part
res += c * prop_buf * GammaL(Gamma::gmu[mu]) * lepton;
c = Zero();
//hadronic part: trace(q1*adj(q2)*g5*gL[mu])
c = trace(q1*adj(q2)*g5*GammaL(Gamma::gmu[mu]));
prop_buf = 1.;
//multiply lepton part
res += c * prop_buf * GammaL(Gamma::gmu[mu]) * lepton;
}
//loop over spinor index of lepton part
unsigned int i = 0;
for (unsigned int s1 = 0; s1 < Ns ; ++s1)
for (unsigned int s2 = 0; s2 < Ns ; ++s2)
{
buf = peekColour(peekSpin(res,s1,s2),0,0);
sliceSum(buf, res_summed, Tp);
r.corr.clear();
for (unsigned int t = 0; t < nt; ++t)
{
r.corr.push_back(TensorRemove(res_summed[t]));
}
r.info.spinidx1 = s1;
r.info.spinidx2 = s2;
result.push_back(r);
i+=1;
}
saveResult(par().output, "weakdecay", result);
buf = peekColour(res, 0, 0);
sliceSum(buf, r.corr, Tp);
saveResult(par().output, "weakdecay", r);
}
END_MODULE_NAMESPACE

View File

@ -76,6 +76,7 @@ template <typename FImpl>
class TBContraction: public Module<BContractionPar>
{
public:
using W = WilsonImplR; // Debug so I can see type info for default FImpl
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
@ -174,15 +175,25 @@ void TBContraction<FImpl>::execute(void)
int Bindex;
int Nc=3; //Num colours
FermionField tmp1(grid3d);
FermionField tmp2(grid3d);
FermionField tmp3(grid3d);
FermionField ftmp1(grid3d);
FermionField ftmp2(grid3d);
FermionField ftmp3(grid3d);
LatticeView<typename FImpl::SiteSpinor> tmp1{ ftmp1 };
LatticeView<typename FImpl::SiteSpinor> tmp2{ ftmp2 };
LatticeView<typename FImpl::SiteSpinor> tmp3{ ftmp3 };
//std::complex<double> * tmp33 = reinterpret_cast<std::complex<double> *>(&(tmp3[0]()(0)(0)));
#ifdef THIS_IS_NAUGHTY_TODO_FIXME
// The reinterpret_cast gets rid of SIMD attributes
// Plus other badness - e.g. we perhaps shouldn't explicitly say SpinColourVector
// ... but rather use some correct FIMPL types
// REVIEW WITH PETER
#endif
SpinColourVector * tmp11 = reinterpret_cast<SpinColourVector *>(&(tmp1[0]()(0)(0)));
SpinColourVector * tmp22 = reinterpret_cast<SpinColourVector *>(&(tmp2[0]()(0)(0)));
SpinColourVector * tmp33 = reinterpret_cast<SpinColourVector *>(&(tmp3[0]()(0)(0)));
SpinVector tmp11s;
SpinVector tmp22s;
SpinVector tmp33s;
@ -225,10 +236,10 @@ void TBContraction<FImpl>::execute(void)
for (int imom=0 ; imom < Nmom ; imom++){
for (int t=0 ; t < Nt ; t++){
Bindex = i1 + N_1*(i2 + N_2*(i3 + N_3*(imom+Nmom*t)));
ExtractSliceLocal(tmp1,one[i1],0,t,3);
ExtractSliceLocal(tmp2,two[i2],0,t,3);
ExtractSliceLocal(tmp3,three[i3],0,t,3);
parallel_for (unsigned int sU = 0; sU < grid3d->oSites(); ++sU)
ExtractSliceLocal(ftmp1,one[i1],0,t,3);
ExtractSliceLocal(ftmp2,two[i2],0,t,3);
ExtractSliceLocal(ftmp3,three[i3],0,t,3);
accelerator_for(sU, grid3d->oSites(), grid3d->Nsimd(),
{
for (int ie=0 ; ie < 6 ; ie++){
// Why does peekColour not work????
@ -248,7 +259,7 @@ void TBContraction<FImpl>::execute(void)
}
}
}
}
} );
}
}
}

View File

@ -187,10 +187,10 @@ void TDistilVectors<FImpl>::setup(void)
envCreate(std::vector<FermionField>, SinkName, 1, nnoise*LI*SI*Nt_inv, envGetGrid(FermionField));
grid4d = env().getGrid();
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> simd_layout_3 = GridDefaultSimd(Nd-1, vComplex::Nsimd());
Coordinate latt_size = GridDefaultLatt();
Coordinate simd_layout = GridDefaultSimd(Nd, vComplex::Nsimd());
Coordinate mpi_layout = GridDefaultMpi();
Coordinate simd_layout_3 = GridDefaultSimd(Nd-1, vComplex::Nsimd());
latt_size[Nd-1] = 1;
simd_layout_3.push_back( 1 );
mpi_layout[Nd-1] = 1;
@ -233,7 +233,7 @@ void TDistilVectors<FImpl>::execute(void)
const int Ntlocal{ grid4d->LocalDimensions()[3] };
const int Ntfirst{ grid4d->LocalStarts()[3] };
const int Ns{ Grid::QCD::Ns };
const int Ns{ Ns };
const int Nt{ env().getDim(Tdir) };
const int TI{ Hadrons::MDistil::DistilParameters::ParameterDefault( par().TI, Nt, false ) };
const int LI{ static_cast<int>( perambulator.tensor.dimension(2) ) };
@ -254,20 +254,20 @@ void TDistilVectors<FImpl>::execute(void)
for( int dt = 0; dt < Nt_inv; dt++ ) {
for( int ds = 0; ds < SI; ds++ ) {
vecindex = inoise + nnoise * dk + nnoise * LI * ds + nnoise *LI * SI*dt;
rho[vecindex] = zero;
tmp3d_nospin = zero;
rho[vecindex] = 0;
tmp3d_nospin = 0;
for (int it = dt; it < Nt; it += TI){
if (full_tdil) t_inv = tsrc; else t_inv = it;
if( t_inv >= Ntfirst && t_inv < Ntfirst + Ntlocal ) {
for (int ik = dk; ik < nvec; ik += LI){
for (int is = ds; is < Ns; is += SI){
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv-Ntfirst,Tdir);
//tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
tmp3d_nospin = evec3d * noise(inoise, t_inv, ik, is);
tmp3d=zero;
tmp3d=0;
pokeSpin(tmp3d,tmp3d_nospin,is);
tmp2=zero;
InsertSliceLocal(tmp3d,tmp2,0,t_inv-Ntfirst,Grid::QCD::Tdir);
tmp2=0;
InsertSliceLocal(tmp3d,tmp2,0,t_inv-Ntfirst,Tdir);
rho[vecindex] += tmp2;
}
}
@ -285,14 +285,14 @@ void TDistilVectors<FImpl>::execute(void)
for( int dt = 0; dt < Nt_inv; dt++ ) {
for( int ds = 0; ds < SI; ds++ ) {
vecindex = inoise + nnoise * dk + nnoise * LI * ds + nnoise *LI * SI*dt;
phi[vecindex] = zero;
phi[vecindex] = 0;
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
sink_tslice=zero;
sink_tslice=0;
for (int ivec = 0; ivec < nvec; ivec++) {
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Tdir);
sink_tslice += evec3d * perambulator(t, ivec, dk, inoise,dt,ds);
}
InsertSliceLocal(sink_tslice,phi[vecindex],0,t-Ntfirst,Grid::QCD::Tdir);
InsertSliceLocal(sink_tslice,phi[vecindex],0,t-Ntfirst,Tdir);
}
}
}

View File

@ -236,7 +236,7 @@ void TLapEvec<GImpl>::execute(void)
eig[t].resize(LPar.Nk+LPar.Np,gridLD);
// Construct smearing operator
ExtractSliceLocal(UmuNoTime,Umu_smear,0,t,Grid::QCD::Tdir); // switch to 3d/4d objects
ExtractSliceLocal(UmuNoTime,Umu_smear,0,t,Tdir); // switch to 3d/4d objects
LinOpPeardonNabla<LatticeColourVector> PeardonNabla(UmuNoTime);
LOG(Debug) << "Chebyshev preconditioning to order " << ChebPar.PolyOrder
<< " with parameters (alpha,beta) = (" << ChebPar.alpha << "," << ChebPar.beta << ")" << std::endl;
@ -263,7 +263,7 @@ void TLapEvec<GImpl>::execute(void)
RotateEigen( eig[t].evec ); // Rotate the eigenvectors into our phase convention
for (int i=0;i<LPar.Nvec;i++){
InsertSliceLocal(eig[t].evec[i],eig4d.evec[i],0,t,Grid::QCD::Tdir);
InsertSliceLocal(eig[t].evec[i],eig4d.evec[i],0,t,Tdir);
if(t==0 && Ntfirst==0)
eig4d.eval[i] = eig[t].eval[i]; // TODO: Discuss: is this needed? Is there a better way?
}

View File

@ -110,7 +110,7 @@ template <typename FImpl>
void TNoises<FImpl>::setup(void)
{
const int Nt{env().getDim(Tdir)};
const int Ns{Grid::QCD::Ns};
//const int Ns{Grid::Ns};
const int nnoise{par().nnoise};
const int nvec{par().nvec};
const int TI{ Hadrons::MDistil::DistilParameters::ParameterDefault( par().TI, Nt, true) };
@ -123,7 +123,7 @@ template <typename FImpl>
void TNoises<FImpl>::execute(void)
{
const int Nt{env().getDim(Tdir)};
const int Ns{Grid::QCD::Ns};
//const int Ns{Grid::Ns};
const int nnoise{par().nnoise};
const int nvec{par().nvec};
const int TI{ Hadrons::MDistil::DistilParameters::ParameterDefault( par().TI, Nt, false) };

View File

@ -169,9 +169,9 @@ void TPerambFromSolve<FImpl>::execute(void)
for (int is = 0; is < Ns; is++) {
result_nospin = peekSpin(solve[inoise+nnoise*(dk+LI*(dt+Nt_inv*ds))],is);
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Tdir);
for (int ivec = 0; ivec < nvec_reduced; ivec++) {
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Tdir);
pokeSpin(perambulator(t, ivec, dk, inoise,dt,ds),static_cast<Complex>(innerProduct(evec3d, result_3d)),is);
std::cout << "perambulator(t, ivec, dk, inoise,dt,ds)(is) = (" << t << "," << ivec << "," << dk << "," << inoise << "," << dt << "," << ds << ")(" << is << ") = " << perambulator(t, ivec, dk, inoise,dt,ds)()(is)() << std::endl;
}

View File

@ -203,27 +203,27 @@ void TPerambulator<FImpl>::execute(void)
for (int dt = 0; dt < Nt_inv; dt++) {
for (int ds = 0; ds < SI; ds++) {
std::cout << "LapH source vector from noise " << inoise << " and dilution component (d_k,d_t,d_alpha) : (" << dk << ","<< dt << "," << ds << ")" << std::endl;
dist_source = zero;
tmp3d_nospin = zero;
evec3d = zero;
dist_source = 0;
tmp3d_nospin = 0;
evec3d = 0;
for (int it = dt; it < Nt; it += TI){
if (full_tdil) t_inv = tsrc; else t_inv = it;
if( t_inv >= Ntfirst && t_inv < Ntfirst + Ntlocal ) {
for (int ik = dk; ik < nvec; ik += LI){
for (int is = ds; is < Ns; is += SI){
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(evec3d,epack.evec[ik],0,t_inv-Ntfirst,Tdir);
//tmp3d_nospin = evec3d * noise[inoise + nnoise*(t_inv + Nt*(ik+nvec*is))];
tmp3d_nospin = evec3d * noise(inoise, t_inv, ik, is);
tmp3d=zero;
tmp3d=0;
pokeSpin(tmp3d,tmp3d_nospin,is);
tmp2=zero;
InsertSliceLocal(tmp3d,tmp2,0,t_inv-Ntfirst,Grid::QCD::Tdir);
tmp2=0;
InsertSliceLocal(tmp3d,tmp2,0,t_inv-Ntfirst,Tdir);
dist_source += tmp2;
}
}
}
}
result=zero;
result=0;
v4dtmp = dist_source;
if (Ls_ == 1){
solver(result, v4dtmp);
@ -238,9 +238,9 @@ void TPerambulator<FImpl>::execute(void)
for (int is = 0; is < Ns; is++) {
result_nospin = peekSpin(result,is);
for (int t = Ntfirst; t < Ntfirst + Ntlocal; t++) {
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(result_3d,result_nospin,0,t-Ntfirst,Tdir);
for (int ivec = 0; ivec < nvec; ivec++) {
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Grid::QCD::Tdir);
ExtractSliceLocal(evec3d,epack.evec[ivec],0,t-Ntfirst,Tdir);
pokeSpin(perambulator(t, ivec, dk, inoise,dt,ds),static_cast<Complex>(innerProduct(evec3d, result_3d)),is);
}
}

View File

@ -54,8 +54,9 @@ BEGIN_HADRONS_NAMESPACE
* - action: fermion action used for propagator (string)
* - emField: photon field A_mu (string)
* - mass: input mass for the lepton propagator
* - boundary: boundary conditions for the lepton propagator, e.g. "1 1 1 -1"
* - twist: twisted boundary for lepton propagator, e.g. "0.0 0.0 0.0 0.5"
* - deltat: source-sink separation
* - deltat: list of source-sink separations
*
*******************************************************************************/
@ -74,7 +75,7 @@ public:
double, mass,
std::string , boundary,
std::string, twist,
unsigned int, deltat);
std::vector<unsigned int>, deltat);
};
template <typename FImpl>
@ -124,7 +125,12 @@ std::vector<std::string> TEMLepton<FImpl>::getInput(void)
template <typename FImpl>
std::vector<std::string> TEMLepton<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName(), getName() + "_free"};
std::vector<std::string> out = {};
for(int i=0; i<par().deltat.size(); i++)
{
out.push_back(std::to_string(par().deltat[i]) + "_" + getName() + "_free");
out.push_back(std::to_string(par().deltat[i]) + "_" + getName());
}
return out;
}
@ -134,8 +140,11 @@ template <typename FImpl>
void TEMLepton<FImpl>::setup(void)
{
Ls_ = env().getObjectLs(par().action);
envCreateLat(PropagatorField, getName());
envCreateLat(PropagatorField, getName() + "_free");
for(int i=0; i<par().deltat.size(); i++)
{
envCreateLat(PropagatorField, std::to_string(par().deltat[i]) + "_" + getName() + "_free");
envCreateLat(PropagatorField, std::to_string(par().deltat[i]) + "_" + getName());
}
envTmpLat(FermionField, "source", Ls_);
envTmpLat(FermionField, "sol", Ls_);
envTmpLat(FermionField, "tmp");
@ -156,9 +165,6 @@ void TEMLepton<FImpl>::execute(void)
auto &mat = envGet(FMat, par().action);
RealD mass = par().mass;
Complex ci(0.0,1.0);
PropagatorField &Aslashlep = envGet(PropagatorField, getName());
PropagatorField &lep = envGet(PropagatorField, getName() + "_free");
envGetTmp(FermionField, source);
envGetTmp(FermionField, sol);
@ -213,7 +219,7 @@ void TEMLepton<FImpl>::execute(void)
// 5D source if action is 5d
mat.ImportPhysicalFermionSource(tmp, source);
}
sol = zero;
sol = Zero();
mat.FreePropagator(source,sol,mass,boundary,twist);
if (Ls_ == 1)
{
@ -227,6 +233,22 @@ void TEMLepton<FImpl>::execute(void)
}
}
for(unsigned int dt=0;dt<par().deltat.size();dt++){
PropagatorField &lep = envGet(PropagatorField, std::to_string(par().deltat[dt]) + "_" + getName() + "_free");
for(tl=0;tl<nt;tl++){
//shift free propagator to different source positions
//account for possible anti-periodic boundary in time
proptmp = Cshift(freetmp,Tp, -tl);
proptmp = where( tlat < tl, boundary[Tp]*proptmp, proptmp);
// free propagator for fixed source-sink separation
lep = where(tlat == (tl-par().deltat[dt]+nt)%nt, proptmp, lep);
}
//account for possible anti-periodic boundary in time
lep = where( tlat >= nt-par().deltat[dt], boundary[Tp]*lep, lep);
}
for(tl=0;tl<nt;tl++){
//shift free propagator to different source positions
@ -234,18 +256,15 @@ void TEMLepton<FImpl>::execute(void)
proptmp = Cshift(freetmp,Tp, -tl);
proptmp = where( tlat < tl, boundary[Tp]*proptmp, proptmp);
// free propagator for fixed source-sink separation
lep = where(tlat == (tl-par().deltat+nt)%nt, proptmp, lep);
// i*A_mu*gamma_mu
sourcetmp = zero;
sourcetmp = Zero();
for(unsigned int mu=0;mu<=3;mu++)
{
Gamma gmu(Gamma::gmu[mu]);
sourcetmp += ci * PeekIndex<LorentzIndex>(stoch_photon, mu) * (gmu * proptmp );
}
proptmp = zero;
proptmp = Zero();
//sequential propagator from i*Aslash*S
LOG(Message) << "Sequential propagator for t= " << tl << std::endl;
@ -262,7 +281,7 @@ void TEMLepton<FImpl>::execute(void)
// 5D source if action is 5d
mat.ImportPhysicalFermionSource(tmp, source);
}
sol = zero;
sol = Zero();
mat.FreePropagator(source,sol,mass,boundary,twist);
if (Ls_ == 1)
{
@ -276,13 +295,17 @@ void TEMLepton<FImpl>::execute(void)
}
}
// keep the result for the desired delta t
Aslashlep = where(tlat == (tl-par().deltat+nt)%nt, proptmp, Aslashlep);
for(unsigned int dt=0;dt<par().deltat.size();dt++){
PropagatorField &Aslashlep = envGet(PropagatorField, std::to_string(par().deltat[dt]) + "_" + getName());
Aslashlep = where(tlat == (tl-par().deltat[dt]+nt)%nt, proptmp, Aslashlep);
}
}
//account for possible anti-periodic boundary in time
Aslashlep = where( tlat >= nt-par().deltat, boundary[Tp]*Aslashlep, Aslashlep);
lep = where( tlat >= nt-par().deltat, boundary[Tp]*lep, lep);
for(unsigned int dt=0;dt<par().deltat.size();dt++){
PropagatorField &Aslashlep = envGet(PropagatorField, std::to_string(par().deltat[dt]) + "_" + getName());
Aslashlep = where( tlat >= nt-par().deltat[dt], boundary[Tp]*Aslashlep, Aslashlep);
}
}
END_MODULE_NAMESPACE

View File

@ -167,7 +167,7 @@ void TFreeProp<FImpl>::execute(void)
PropToFerm<FImpl>(source, fullSrc, s, c);
}
}
sol = zero;
sol = Zero();
std::vector<double> twist = strToVec<double>(par().twist);
if(twist.size() != Nd)
{

View File

@ -174,8 +174,8 @@ void TGaugeProp<FImpl>::execute(void)
PropToFerm<FImpl>(source, fullSrc, s, c);
}
}
sol = Zero();
LOG(Message) << "Solve" << std::endl;
sol = zero;
solver(sol, source);
LOG(Message) << "Export solution" << std::endl;
FermToProp<FImpl>(prop, sol, s, c);

View File

@ -1,7 +0,0 @@
#include <Hadrons/Modules/MGauge/StoutSmearing3D.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MGauge;
template class Grid::Hadrons::MGauge::TStoutSmearing3D<GIMPL>;

View File

@ -1,137 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/StoutSmearing3D.hpp
Copyright (C) 2015-2019
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MGauge_StoutSmearing3D_hpp_
#define Hadrons_MGauge_StoutSmearing3D_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Stout smearing *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MGauge)
class StoutSmearing3DPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(StoutSmearing3DPar,
std::string, gauge,
unsigned int, steps,
double, rho,
unsigned int, orthogdim);
};
template <typename GImpl>
class TStoutSmearing3D: public Module<StoutSmearing3DPar>
{
public:
GAUGE_TYPE_ALIASES(GImpl,);
public:
// constructor
TStoutSmearing3D(const std::string name);
// destructor
virtual ~TStoutSmearing3D(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(StoutSmearing3D, TStoutSmearing3D<GIMPL>, MGauge);
/******************************************************************************
* TStoutSmearing3D implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename GImpl>
TStoutSmearing3D<GImpl>::TStoutSmearing3D(const std::string name)
: Module<StoutSmearing3DPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename GImpl>
std::vector<std::string> TStoutSmearing3D<GImpl>::getInput(void)
{
std::vector<std::string> in = {par().gauge};
return in;
}
template <typename GImpl>
std::vector<std::string> TStoutSmearing3D<GImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename GImpl>
void TStoutSmearing3D<GImpl>::setup(void)
{
envCreateLat(GaugeField, getName());
envTmpLat(GaugeField, "buf");
}
// execution ///////////////////////////////////////////////////////////////////
template <typename GImpl>
void TStoutSmearing3D<GImpl>::execute(void)
{
LOG(Message) << "Smearing '" << par().gauge
<< "' with " << par().steps << " step" << ((par().steps > 1) ? "s" : "")
<< " of 3D-stout smearing and rho=" << par().rho
<< " orthogonal to dimension " << par().orthogdim << std::endl;
Smear_Stout<GImpl> smearer(par().rho, par().orthogdim);
auto &U = envGet(GaugeField, par().gauge);
auto &Usmr = envGet(GaugeField, getName());
envGetTmp(GaugeField, buf);
buf = U;
LOG(Message) << "plaquette= " << WilsonLoops<GImpl>::avgPlaquette(U)
<< std::endl;
for (unsigned int n = 0; n < par().steps; ++n)
{
smearer.smear(Usmr, buf);
buf = Usmr;
LOG(Message) << "plaquette= " << WilsonLoops<GImpl>::avgPlaquette(Usmr)
<< std::endl;
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MGauge_StoutSmearing3D_hpp_

View File

@ -177,7 +177,7 @@ void TLoadEigenPack<Pack, GImpl>::execute(void)
for (unsigned int i = 0; i < par().size; i++)
{
LOG(Message) << "Applying gauge transformation to eigenvector i = " << i << "/" << par().size << std::endl;
epack.evec[i].checkerboard = Odd;
epack.evec[i].Checkerboard() = Odd;
epack.evec[i] = tmpXformOdd * epack.evec[i];
}
stopTimer("Transform application");

View File

@ -109,8 +109,6 @@ template <typename FImpl1, typename FImpl2>
std::vector<std::string> TAmputate<FImpl1, FImpl2>::getOutput(void)
{
std::vector<std::string> output = {getName()};
return output;
}
@ -118,15 +116,16 @@ std::vector<std::string> TAmputate<FImpl1, FImpl2>::getOutput(void)
template <typename Fimpl1, typename Fimpl2>
SpinColourMatrix TAmputate<Fimpl1, Fimpl2>::invertspincolmat(SpinColourMatrix &scmat)
{
Eigen::MatrixXcf scmat_2d(Ns*Nc,Ns*Nc);
Eigen::MatrixXcd scmat_2d(Ns*Nc,Ns*Nc);
for(int ic=0; ic<Nc; ic++){
for(int jc=0; jc<Nc; jc++){
for(int is=0; is<Ns; is++){
for(int js=0; js<Ns; js++){
scmat_2d(Ns*ic+is,Ns*jc+js) = scmat()(is,js)(ic,jc);
auto z =scmat()(is,js)(ic,jc);;
scmat_2d(Ns*ic+is,Ns*jc+js) = std::complex<double>(real(z),imag(z));
}}
}}
Eigen::MatrixXcf scmat_2d_inv = scmat_2d.inverse();
Eigen::MatrixXcd scmat_2d_inv = scmat_2d.inverse();
SpinColourMatrix scmat_inv;
for(int ic=0; ic<Nc; ic++){
for(int jc=0; jc<Nc; jc++){
@ -163,8 +162,8 @@ void TAmputate<FImpl1, FImpl2>::execute(void)
read(reader,"vertex", vertex);
LOG(Message) << "vertex read" << std::endl;
pdotxin=zero;
pdotxout=zero;
pdotxin=Zero();
pdotxout=Zero();
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];

View File

@ -129,7 +129,7 @@ LatticeSpinColourMatrix TBilinear<FImpl1, FImpl2>::PhaseProps(LatticeSpinColourM
LatticeComplex pdotx(grid), coor(grid);
std::vector<int> latt_size = grid->_fdimensions;
Complex Ci(0.0,1.0);
pdotx=zero;
pdotx=Zero();
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
@ -187,8 +187,8 @@ q = (p1-p2)
//
pdotxin=zero;
pdotxout=zero;
pdotxin=Zero();
pdotxout=Zero();
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];

View File

@ -116,41 +116,30 @@ std::vector<std::string> TFourQuark<FImpl1, FImpl2>::getOutput(void)
template <typename FImpl1, typename FImpl2>
void TFourQuark<FImpl1, FImpl2>::tensorprod(LatticeSpinColourSpinColourMatrix &lret, LatticeSpinColourMatrix a, LatticeSpinColourMatrix b)
{
#if 0
parallel_for(auto site=lret.begin();site<lret.end();site++) {
for (int si; si < 4; ++si){
for(int sj; sj <4; ++sj){
for (int ci; ci < 3; ++ci){
for (int cj; cj < 3; ++cj){
for (int sk; sk < 4; ++sk){
for(int sl; sl <4; ++sl){
for (int ck; ck < 3; ++ck){
for (int cl; cl < 3; ++cl){
lret[site]()(si,sj)(ci,cj)(sk,sl)(ck,cl)=a[site]()(si,sj)(ci,cj)*b[site]()(sk,sl)(ck,cl);
}}
}}
}}
}}
}
#else
// FIXME ; is there a general need for this construct ? In which case we should encapsulate the
// below loops in a helper function.
//LOG(Message) << "sp co mat a is - " << a << std::endl;
//LOG(Message) << "sp co mat b is - " << b << std::endl;
parallel_for(auto site=lret.begin();site<lret.end();site++) {
vTComplex left;
auto lret_v = lret.View();
auto a_v = a.View();
auto b_v = b.View();
#ifdef GRID_NVCC
#warning "NVCC problem: Removed impossibly slow compile of simple NPR host code in FourQuark.hpp"
#else
thread_foreach( site,lret_v,{
vTComplex left;
for(int si=0; si < Ns; ++si){
for(int sj=0; sj < Ns; ++sj){
for (int ci=0; ci < Nc; ++ci){
for (int cj=0; cj < Nc; ++cj){
//LOG(Message) << "si, sj, ci, cj - " << si << ", " << sj << ", "<< ci << ", "<< cj << std::endl;
left()()() = a[site]()(si,sj)(ci,cj);
left()()() = a_v[site]()(si,sj)(ci,cj);
//LOG(Message) << left << std::endl;
lret[site]()(si,sj)(ci,cj)=left()*b[site]();
lret_v[site]()(si,sj)(ci,cj)=left()*b_v[site]();
}}
}}
}
#endif
});
#endif
}
@ -168,6 +157,7 @@ void TFourQuark<FImpl1, FImpl2>::setup(void)
template <typename FImpl1, typename FImpl2>
void TFourQuark<FImpl1, FImpl2>::execute(void)
{
#ifndef GRID_NVCC
/*********************************************************************************
@ -219,8 +209,8 @@ We have up to 256 of these including the offdiag (G1 != G2).
//Sout = Grid::QCD::PropUtils::PhaseProps(Sout,pout);
//find p.x for in and out so phase can be accounted for in propagators
pdotxin=zero;
pdotxout=zero;
pdotxin=Zero();
pdotxout=Zero();
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
@ -231,7 +221,6 @@ We have up to 256 of these including the offdiag (G1 != G2).
Sin = Sin*exp(-Ci*pdotxin); //phase corrections
Sout = Sout*exp(-Ci*pdotxout);
//Set up Gammas
std::vector<Gamma> gammavector;
for( int i=1; i<Gamma::nGamma; i+=2){
@ -239,7 +228,7 @@ We have up to 256 of these including the offdiag (G1 != G2).
gammavector.push_back(Gamma(gam));
}
lret = zero;
lret = Zero();
if (fullbasis == true){ // all combinations of mu and nu
result.fourquark.resize(Gamma::nGamma/2*Gamma::nGamma/2);
for( int mu=0; mu<Gamma::nGamma/2; mu++){
@ -248,7 +237,7 @@ We have up to 256 of these including the offdiag (G1 != G2).
LatticeSpinColourMatrix bilinear_nu(env().getGrid());
bilinear_nu = g5*adj(Sout)*g5*gammavector[nu]*Sin;
LOG(Message) << "bilinear_nu for nu = " << nu << " is - " << bilinear_mu << std::endl;
result.fourquark[mu*Gamma::nGamma/2 + nu] = zero;
result.fourquark[mu*Gamma::nGamma/2 + nu] = Zero();
tensorprod(lret,bilinear_mu,bilinear_nu);
result.fourquark[mu*Gamma::nGamma/2 + nu] = sum(lret);
}
@ -259,12 +248,13 @@ We have up to 256 of these including the offdiag (G1 != G2).
//for( int mu=0; mu<Gamma::nGamma/2; mu++ ){
bilinear_mu = g5*adj(Sout)*g5*gammavector[mu]*Sin;
//LOG(Message) << "bilinear_mu for mu = " << mu << " is - " << bilinear_mu << std::endl;
result.fourquark[mu] = zero;
result.fourquark[mu] = Zero();
tensorprod(lret,bilinear_mu,bilinear_mu); //tensor outer product
result.fourquark[mu] = sum(lret);
}
}
write(writer, "fourquark", result.fourquark);
#endif
}
END_MODULE_NAMESPACE

View File

@ -149,7 +149,7 @@ void TChargedProp::execute(void)
<< RESULT_FILE_NAME(par().output, vm().getTrajectory()) << "'..."
<< std::endl;
result.projection.resize(par().outputMom.size());
result.lattice_size = env().getGrid()->_fdimensions;
result.lattice_size = env().getGrid()->FullDimensions().toVector();
result.mass = par().mass;
result.charge = q;
siteCoor.resize(env().getNd());
@ -160,11 +160,11 @@ void TChargedProp::execute(void)
LOG(Message) << "Calculating (" << par().outputMom[i_p]
<< ") momentum projection" << std::endl;
result.projection[i_p].corr_0.resize(env().getGrid()->_fdimensions[env().getNd()-1]);
result.projection[i_p].corr.resize(env().getGrid()->_fdimensions[env().getNd()-1]);
result.projection[i_p].corr_Q.resize(env().getGrid()->_fdimensions[env().getNd()-1]);
result.projection[i_p].corr_Sun.resize(env().getGrid()->_fdimensions[env().getNd()-1]);
result.projection[i_p].corr_Tad.resize(env().getGrid()->_fdimensions[env().getNd()-1]);
result.projection[i_p].corr_0.resize(env().getGrid()->FullDimensions()[env().getNd()-1]);
result.projection[i_p].corr.resize(env().getGrid()->FullDimensions()[env().getNd()-1]);
result.projection[i_p].corr_Q.resize(env().getGrid()->FullDimensions()[env().getNd()-1]);
result.projection[i_p].corr_Sun.resize(env().getGrid()->FullDimensions()[env().getNd()-1]);
result.projection[i_p].corr_Tad.resize(env().getGrid()->FullDimensions()[env().getNd()-1]);
for (unsigned int j = 0; j < env().getNd()-1; ++j)
{
@ -226,7 +226,7 @@ void TChargedProp::makeCaches(void)
}
if (!phasesDone_)
{
std::vector<int> &l = env().getGrid()->_fdimensions;
auto l = env().getGrid()->FullDimensions();
Complex ci(0.0,1.0);
LOG(Message) << "Caching shift phases..." << std::endl;
@ -259,7 +259,7 @@ void TChargedProp::momD1(ScalarField &s, FFT &fft)
envGetTmp(ScalarField, result);
envGetTmp(ScalarField, Amu);
result = zero;
result = Zero();
for (unsigned int mu = 0; mu < env().getNd(); ++mu)
{
Amu = peekLorentz(A, mu);
@ -289,7 +289,7 @@ void TChargedProp::momD2(ScalarField &s, FFT &fft)
envGetTmp(ScalarField, result);
envGetTmp(ScalarField, Amu);
result = zero;
result = Zero();
for (unsigned int mu = 0; mu < env().getNd(); ++mu)
{
Amu = peekLorentz(A, mu);

View File

@ -132,7 +132,7 @@ void TDiv<SImpl>::execute(void)
std::cout << std::endl;
auto &div = envGet(ComplexField, getName());
div = zero;
div = Zero();
for (unsigned int mu = 0; mu < nd; ++mu)
{
auto &op = envGet(ComplexField, par().op[mu]);

View File

@ -154,7 +154,7 @@ void TStochFreeField<SImpl>::execute(void)
LOG(Message) << "Generating random momentum-space field" << std::endl;
envGetTmp(Field, phift);
envGetTmp(ComplexField, ca);
phift = zero;
phift = Zero();
for (int a = 0; a < Nadj; ++a)
{
Site ta;

View File

@ -142,7 +142,7 @@ void TTrKinetic<SImpl>::execute(void)
auto &sumkin = envGet(ComplexField, varName(getName(), "sum"));
envGetTmp(std::vector<Field>, der);
sumkin = zero;
sumkin = Zero();
if (!par().output.empty())
{
result.type = par().type;

View File

@ -37,3 +37,4 @@ template class Grid::Hadrons::MScalarSUN::TTransProj<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MScalarSUN::TTransProj<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MScalarSUN::TTransProj<ScalarNxNAdjImplR<6>>;

View File

@ -145,7 +145,7 @@ void TTransProj<SImpl>::execute(void)
envGetTmp(ComplexField, buf1);
envGetTmp(ComplexField, buf2);
envGetTmp(ComplexField, lap);
lap = zero;
lap = Zero();
if (!par().output.empty())
{
result.type = par().type;

View File

@ -128,7 +128,7 @@ void TPoint<FImpl>::execute(void)
envGetTmp(LatticeComplex, coor);
p = strToVec<Real>(par().mom);
ph = zero;
ph = Zero();
for(unsigned int mu = 0; mu < p.size(); mu++)
{
LatticeCoordinate(coor, mu);

View File

@ -156,7 +156,7 @@ void TA2AAslashVectors<FImpl>::execute(void)
<< par().vector << " and the photon field " << par().emField << std::endl;
for(unsigned int i=0; i<Nmodes; i++)
{
v4dtmp = zero;
v4dtmp = Zero();
startTimer("Multiply Aslash");
for(unsigned int mu=0;mu<=3;mu++)
{

View File

@ -68,6 +68,7 @@ private:
class OperatorFunctionWrapper: public OperatorFunction<Field>
{
public:
using OperatorFunction<Field>::operator();
OperatorFunctionWrapper(LinearFunction<Field> &fn): fn_(fn) {};
virtual ~OperatorFunctionWrapper(void) = default;
virtual void operator()(LinearOperatorBase<Field> &op,

View File

@ -0,0 +1,7 @@
#include <Hadrons/Modules/MSource/Convolution.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MSource;
template class Grid::Hadrons::MSource::TConvolution<FIMPL>;

View File

@ -0,0 +1,130 @@
#ifndef Hadrons_MSource_Convolution_hpp_
#define Hadrons_MSource_Convolution_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Convolution *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MSource)
class ConvolutionPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(ConvolutionPar,
std::string, field,
std::string, filter,
std::string, mom);
};
template <typename FImpl>
class TConvolution: public Module<ConvolutionPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
TConvolution(const std::string name);
// destructor
virtual ~TConvolution(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
private:
std::vector<int> mom_;
};
MODULE_REGISTER_TMP(Convolution, TConvolution<FIMPL>, MSource);
/******************************************************************************
* TConvolution implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TConvolution<FImpl>::TConvolution(const std::string name)
: Module<ConvolutionPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TConvolution<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().field, par().filter};
return in;
}
template <typename FImpl>
std::vector<std::string> TConvolution<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TConvolution<FImpl>::setup(void)
{
mom_ = strToVec<int>(par().mom);
if(mom_.size() != env().getNd()) {
HADRONS_ERROR(Size, std::string("momentum has ")
+ std::to_string(mom_.size()) + " instead of "
+ std::to_string(env().getNd()) + " components");
}
envCreateLat(PropagatorField, getName());
envTmpLat(ComplexField, "momfield");
envTmp(FFT, "fft", 1, env().getGrid());
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TConvolution<FImpl>::execute(void)
{
auto &filter = envGet(ComplexField, par().filter);
auto &field = envGet(PropagatorField, par().field);
auto &out = envGet(PropagatorField, getName());
envGetTmp(ComplexField, momfield);
envGetTmp(FFT, fft);
std::vector<int> mask(env().getNd(), 1);
mask.back()=0; //transform only the spatial dimensions
startTimer("Fourier transform");
fft.FFT_dim_mask(momfield, filter, mask, FFT::forward);
fft.FFT_dim_mask(out, field, mask, FFT::forward);
stopTimer("Fourier transform");
startTimer("momentum-space multiplication");
out=momfield*out;
stopTimer("momentum-space multiplication");
startTimer("inserting momentum");
for(int mu=0; mu<env().getNd(); mu++)
{
if(mom_[mu]!=0)
{
out=Cshift(out, mu, -mom_[mu]);
}
}
stopTimer("inserting momentum");
startTimer("Fourier transform");
fft.FFT_dim_mask(out, out, mask, FFT::backward);
stopTimer("Fourier transform");
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MSource_Convolution_hpp_

View File

@ -0,0 +1,8 @@
#include <Hadrons/Modules/MSource/Gauss.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MSource;
template class Grid::Hadrons::MSource::TGauss<FIMPL>;
template class Grid::Hadrons::MSource::TGauss<ScalarImplCR>;

View File

@ -0,0 +1,173 @@
#ifndef Hadrons_MSource_Gauss_hpp_
#define Hadrons_MSource_Gauss_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Gauss *
* result[n] = 1/(sqrt(2*pi)*width)^dim *
* * exp(-|n-position|^2/(2*width^2)) *
* * exp(i*2*pi/L*mom*n) *
* where: *
* n=(n[0],n[1],...,n[dim-1]) (lattice coordinate) *
* dim=Nd-1 *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MSource)
class GaussPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(GaussPar,
std::string, position,
std::string, mom,
Integer, tA,
Integer, tB,
double, width);
};
template <typename FImpl>
class TGauss: public Module<GaussPar>
{
BASIC_TYPE_ALIASES(FImpl,);
public:
// constructor
TGauss(const std::string name);
// destructor
virtual ~TGauss(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
private:
std::vector<int> position_;
std::vector<int> mom_;
};
MODULE_REGISTER_TMP(Gauss, TGauss<FIMPL>, MSource);
MODULE_REGISTER_TMP(ScalarGauss, TGauss<ScalarImplCR>, MSource);
/******************************************************************************
* TGauss implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TGauss<FImpl>::TGauss(const std::string name)
: Module<GaussPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TGauss<FImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImpl>
std::vector<std::string> TGauss<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TGauss<FImpl>::setup(void)
{
auto parse_vector = [](const std::string &vec, int dim,
const std::string &desc)
{
std::vector<int> res = strToVec<int>(vec);
if(res.size() != dim) {
HADRONS_ERROR(Size, desc + " has "
+ std::to_string(res.size()) + " instead of "
+ std::to_string(dim) + " components");
}
return res;
};
position_ = parse_vector(par().position, env().getNd()-1, "position");
mom_ = parse_vector(par().mom, env().getNd(), "momentum");
envCreateLat(PropagatorField, getName());
envTmpLat(ComplexField, "component");
envTmpLat(ComplexField, "ScalarRho");
envTmp(LatticeInteger, "compHelper", 1, envGetGrid(ComplexField));
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TGauss<FImpl>::execute(void)
{
auto &rho = envGet(PropagatorField, getName());
envGetTmp(ComplexField, component);
envGetTmp(ComplexField, ScalarRho);
envGetTmp(LatticeInteger, compHelper);
const int dim=env().getNd()-1;
const Real fact=-0.5/std::pow(par().width,2);
const Complex i(0.0, 1.0);
const Real Pi(M_PI);
const SitePropagator idMat=[](){ SitePropagator s; s=1.; return s; }();
ScalarRho=Zero();
for(int mu=0; mu<dim; mu++) {
assert(env().getDim(mu)%2==0);
assert(position_[mu]>=0 && position_[mu]<env().getDim(mu));
const int Lmu=env().getDim(mu);
const int LmuHalf=Lmu/2;
const int posMu=position_[mu];
LatticeCoordinate(component, mu);
LatticeCoordinate(compHelper, mu);
//spatial dimensions of momentum phase
ScalarRho+=(i*(mom_[mu]*2*Pi/Lmu))*component;
//Gauss distribution
component-=Complex(posMu);
if(posMu<LmuHalf)
{
component=where((compHelper>Integer(posMu+LmuHalf)),
component-Complex(Lmu),
component);
}
else
{
component=where((compHelper<=Integer(posMu-LmuHalf)),
component+Complex(Lmu),
component);
}
ScalarRho+=component*component*fact;
}
//time component of momentum phase
LatticeCoordinate(component, dim);
ScalarRho+=(i*(mom_.at(dim)*2*Pi/env().getDim(dim)))*component;
//compute scalar result
ScalarRho=exp(ScalarRho)*Complex(std::pow(sqrt(2*Pi)*par().width,-dim));
//select time slices
LatticeCoordinate(compHelper, dim);
ScalarRho=where((compHelper>=par().tA && compHelper<=par().tB),
ScalarRho,
0.*ScalarRho);
//compute output field rho
rho=ScalarRho*idMat;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MSource_Gauss_hpp_

View File

@ -126,7 +126,7 @@ void TMomentum<FImpl>::execute(void)
LOG(Message) << " " << std::endl;
//get the momentum from parameters
p = strToVec<Real>(par().mom);
C = zero;
C = Zero();
LOG(Message) << "momentum converted from string - " << std::to_string(p[0]) <<std::to_string(p[1]) <<std::to_string(p[2]) << std::to_string(p[3]) << std::endl;
for(int mu=0;mu<4;mu++){
Real TwoPiL = M_PI * 2.0/ latt_size[mu];

View File

@ -132,7 +132,7 @@ void TPoint<FImpl>::execute(void)
+ " components (must have " + std::to_string(env().getNd()) + ")");
}
id = 1.;
src = zero;
src = Zero();
pokeSite(id, src, position);
}

View File

@ -148,7 +148,7 @@ void TSeqAslash<FImpl>::execute(void)
<< par().tA << " <= t <= " << par().tB
<< " using the photon field " << par().emField << std::endl;
}
auto &src = envGet(PropagatorField, getName()); src=zero;
auto &src = envGet(PropagatorField, getName()); src=Zero();
auto &q = envGet(PropagatorField, par().q);
auto &ph = envGet(LatticeComplex, momphName_);
auto &t = envGet(Lattice<iScalar<vInteger>>, tName_);
@ -160,7 +160,7 @@ void TSeqAslash<FImpl>::execute(void)
envGetTmp(LatticeComplex, coor);
p = strToVec<Real>(par().mom);
ph = zero;
ph = Zero();
for(unsigned int mu = 0; mu < env().getNd(); mu++)
{
LatticeCoordinate(coor, mu);

View File

@ -141,7 +141,7 @@ void TSeqConserved<FImpl>::setup(void)
{
auto Ls_ = env().getObjectLs(par().action);
envCreateLat(PropagatorField, getName(), Ls_);
envTmpLat(PropagatorField, "src_tmp");
envTmpLat(PropagatorField, "src_tmp",Ls_);
envCacheLat(LatticeComplex, SeqmomphName_);
envTmpLat(LatticeComplex, "coor");
envTmpLat(LatticeComplex, "latt_compl");
@ -175,14 +175,14 @@ void TSeqConserved<FImpl>::execute(void)
auto &mat = envGet(FMat, par().action);
envGetTmp(LatticeComplex, latt_compl);
src = zero;
src = Zero();
//exp(ipx)
auto &mom_phase = envGet(LatticeComplex, SeqmomphName_);
if (!SeqhasPhase_)
{
std::vector<Real> mom = strToVec<Real>(par().mom);
mom_phase = zero;
mom_phase = Zero();
Complex i(0.0,1.0);
envGetTmp(LatticeComplex, coor);
for(unsigned int mu = 0; mu < env().getNd(); mu++)

View File

@ -159,7 +159,7 @@ void TSeqGamma<FImpl>::execute(void)
envGetTmp(LatticeComplex, coor);
p = strToVec<Real>(par().mom);
ph = zero;
ph = Zero();
for(unsigned int mu = 0; mu < env().getNd(); mu++)
{
LatticeCoordinate(coor, mu);

View File

@ -142,7 +142,7 @@ void TWall<FImpl>::execute(void)
envGetTmp(LatticeComplex, coor);
p = strToVec<Real>(par().mom);
ph = zero;
ph = Zero();
for(unsigned int mu = 0; mu < env().getNd(); mu++)
{
LatticeCoordinate(coor, mu);

View File

@ -28,7 +28,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/TimerArray.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
void TimerArray::startTimer(const std::string &name)

View File

@ -347,11 +347,12 @@ int main(int argc, char* argv[])
tAr.startTimer("Transpose caching");
lastTerm[t].resize(ref.rows(), ref.cols());
parallel_for (unsigned int j = 0; j < ref.cols(); ++j)
for (unsigned int i = 0; i < ref.rows(); ++i)
{
lastTerm[t](i, j) = ref(i, j);
}
thread_for( j,ref.cols(),{
for (unsigned int i = 0; i < ref.rows(); ++i)
{
lastTerm[t](i, j) = ref(i, j);
}
});
tAr.stopTimer("Transpose caching");
}
bytes = par.global.nt*lastTerm[0].rows()*lastTerm[0].cols()*sizeof(ComplexD);

View File

@ -34,16 +34,20 @@ See the full license in the file "LICENSE" in the top level distribution directo
using namespace Grid;
using namespace Hadrons;
const int RowMajor = Eigen::RowMajor;
const int ColMajor = Eigen::ColMajor;
#ifdef GRID_COMMS_MPI3
#define GET_RANK(rank, nMpi) \
MPI_Comm_size(MPI_COMM_WORLD, &(nMpi));\
MPI_Comm_rank(MPI_COMM_WORLD, &(rank))
MPI_Comm_rank(MPI_COMM_WORLD, &(rank));\
assert(rank<nMpi)
#define BARRIER() MPI_Barrier(MPI_COMM_WORLD)
#define INIT() MPI_Init(NULL, NULL)
#define FINALIZE() MPI_Finalize()
#else
#define GET_RANK(rank, nMpi) (nMpi) = 1; (rank) = 0
#define GET_RANK(rank, nMpi) (nMpi) = 1; (rank) = 0 ; assert(rank<nMpi)
#define BARRIER()
#define INIT()
#define FINALIZE()
@ -74,7 +78,7 @@ inline void trBenchmark(const std::string name, const MatLeft &left,
if (rank == 0)
{
std::cout << std::setw(34) << name << ": diff= "
<< std::setw(12) << std::norm(buf-ref)
<< std::setw(12) << abs(buf-ref)
<< std::setw(10) << t/1.0e6 << " sec "
<< std::setw(10) << flops/t/1.0e3 << " GFlop/s "
<< std::setw(10) << bytes/t*1.0e6/1024/1024/1024 << " GB/s "
@ -88,7 +92,8 @@ inline void mulBenchmark(const std::string name, const MatV &left,
const MatV &right, const Mat &ref, Function fn)
{
double t, flops, bytes;
double nr = left[0].rows(), nc = left[0].cols(), n = nr*nc;
double nr = left[0].rows(), nc = left[0].cols();
// double n = nr*nc;
unsigned int nMat = left.size();
int nMpi, rank;
Mat buf(left[0].rows(), left[0].rows());
@ -126,22 +131,22 @@ static inline void zdotuRow(ComplexD &res, const unsigned int aRow,
const ComplexD *aPt, *bPt;
unsigned int aInc, bInc;
if (MatLeft::Options == Eigen::RowMajor)
if (MatLeft::Options == RowMajor)
{
aPt = a.data() + aRow*a.cols();
aInc = 1;
}
else if (MatLeft::Options == Eigen::ColMajor)
else if (MatLeft::Options == ColMajor)
{
aPt = a.data() + aRow;
aInc = a.rows();
}
if (MatRight::Options == Eigen::RowMajor)
if (MatRight::Options == RowMajor)
{
bPt = b.data() + aRow;
bInc = b.cols();
}
else if (MatRight::Options == Eigen::ColMajor)
else if (MatRight::Options == ColMajor)
{
bPt = b.data() + aRow*b.rows();
bInc = 1;
@ -156,22 +161,22 @@ static inline void zdotuCol(ComplexD &res, const unsigned int aCol,
const ComplexD *aPt, *bPt;
unsigned int aInc, bInc;
if (MatLeft::Options == Eigen::RowMajor)
if (MatLeft::Options == RowMajor)
{
aPt = a.data() + aCol;
aInc = a.cols();
}
else if (MatLeft::Options == Eigen::ColMajor)
else if (MatLeft::Options == ColMajor)
{
aPt = a.data() + aCol*a.rows();
aInc = 1;
}
if (MatRight::Options == Eigen::RowMajor)
if (MatRight::Options == RowMajor)
{
bPt = b.data() + aCol*b.cols();
bInc = 1;
}
else if (MatRight::Options == Eigen::ColMajor)
else if (MatRight::Options == ColMajor)
{
bPt = b.data() + aCol;
bInc = b.rows();
@ -196,20 +201,20 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
{
std::cout << "==== tr(A*B) benchmarks" << std::endl;
std::cout << "A matrices use ";
if (MatLeft::Options == Eigen::RowMajor)
if (MatLeft::Options == RowMajor)
{
std::cout << "row-major ordering" << std::endl;
}
else if (MatLeft::Options == Eigen::ColMajor)
else if (MatLeft::Options == ColMajor)
{
std::cout << "col-major ordering" << std::endl;
}
std::cout << "B matrices use ";
if (MatRight::Options == Eigen::RowMajor)
if (MatRight::Options == RowMajor)
{
std::cout << "row-major ordering" << std::endl;
}
else if (MatRight::Options == Eigen::ColMajor)
else if (MatRight::Options == ColMajor)
{
std::cout << "col-major ordering" << std::endl;
}
@ -229,7 +234,7 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
auto nr = a.rows(), nc = a.cols();
res = 0.;
parallel_for (unsigned int i = 0; i < nr; ++i)
thread_for(i,nr,
{
ComplexD tmp = 0.;
@ -237,11 +242,11 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
{
tmp += a(i, j)*b(j, i);
}
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
trBenchmark("Naive loop cols first", left, right, ref,
[](ComplexD &res, const MatLeft &a, const MatRight &b)
@ -249,7 +254,7 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
auto nr = a.rows(), nc = a.cols();
res = 0.;
parallel_for (unsigned int j = 0; j < nc; ++j)
thread_for(j,nc,
{
ComplexD tmp = 0.;
@ -257,11 +262,11 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
{
tmp += a(i, j)*b(j, i);
}
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
trBenchmark("Eigen tr(A*B)", left, right, ref,
[](ComplexD &res, const MatLeft &a, const MatRight &b)
@ -272,31 +277,31 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
[](ComplexD &res, const MatLeft &a, const MatRight &b)
{
res = 0.;
parallel_for (unsigned int r = 0; r < a.rows(); ++r)
thread_for(r,a.rows(),
{
ComplexD tmp;
tmp = a.row(r).conjugate().dot(b.col(r));
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
trBenchmark("Eigen col-wise dot", left, right, ref,
[](ComplexD &res, const MatLeft &a, const MatRight &b)
{
res = 0.;
parallel_for (unsigned int c = 0; c < a.cols(); ++c)
thread_for(c,a.cols(),
{
ComplexD tmp;
tmp = a.col(c).conjugate().dot(b.row(c));
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
trBenchmark("Eigen Hadamard", left, right, ref,
[](ComplexD &res, const MatLeft &a, const MatRight &b)
@ -308,31 +313,31 @@ void fullTrBenchmark(const unsigned int ni, const unsigned int nj, const unsigne
[](ComplexD &res, const MatLeft &a, const MatRight &b)
{
res = 0.;
parallel_for (unsigned int r = 0; r < a.rows(); ++r)
thread_for(r,a.rows(),
{
ComplexD tmp;
zdotuRow(tmp, r, a, b);
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
trBenchmark("MKL col-wise zdotu", left, right, ref,
[](ComplexD &res, const MatLeft &a, const MatRight &b)
{
res = 0.;
parallel_for (unsigned int c = 0; c < a.cols(); ++c)
thread_for(c,a.cols(),
{
ComplexD tmp;
zdotuCol(tmp, c, a, b);
parallel_critical
thread_critical
{
res += tmp;
}
}
});
});
#endif
BARRIER();
@ -356,11 +361,11 @@ void fullMulBenchmark(const unsigned int ni, const unsigned int nj, const unsign
{
std::cout << "==== A*B benchmarks" << std::endl;
std::cout << "all matrices use ";
if (Mat::Options == Eigen::RowMajor)
if (Mat::Options == RowMajor)
{
std::cout << "row-major ordering" << std::endl;
}
else if (Mat::Options == Eigen::ColMajor)
else if (Mat::Options == ColMajor)
{
std::cout << "col-major ordering" << std::endl;
}
@ -383,13 +388,13 @@ void fullMulBenchmark(const unsigned int ni, const unsigned int nj, const unsign
[](Mat &res, const Mat &a, const Mat &b)
{
const ComplexD one(1., 0.), zero(0., 0.);
if (Mat::Options == Eigen::RowMajor)
if (Mat::Options == RowMajor)
{
cblas_zgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.cols(), b.data(), b.cols(), &zero,
res.data(), res.cols());
}
else if (Mat::Options == Eigen::ColMajor)
else if (Mat::Options == ColMajor)
{
cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, a.rows(), b.cols(),
a.cols(), &one, a.data(), a.rows(), b.data(), b.rows(), &zero,
@ -430,11 +435,7 @@ int main(int argc, char *argv[])
std::cout << nMpi << " MPI processes" << std::endl;
#ifdef GRID_OMP
#pragma omp parallel
{
#pragma omp single
std::cout << omp_get_num_threads() << " threads\n" << std::endl;
}
std::cout << omp_get_num_threads() << " threads\n" << std::endl;
#else
std::cout << "Single-threaded\n" << std::endl;
#endif

View File

@ -29,7 +29,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Environment.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
template <typename FOut, typename FIn>

View File

@ -29,7 +29,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Application.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
int main(int argc, char *argv[])

View File

@ -31,7 +31,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/ModuleFactory.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
/******************************************************************************
@ -586,7 +586,7 @@ VirtualMachine::Program VirtualMachine::schedule(const GeneticPar &par)
LOG(Message) << " max. cst. generation= " << par.maxCstGen << std::endl;
LOG(Message) << " mutation rate= " << par.mutationRate << std::endl;
unsigned int k = 0, gen, prevPeak, nCstPeak = 0;
unsigned int gen, prevPeak, nCstPeak = 0;
std::random_device rd;
Scheduler::Parameters gpar;

View File

@ -1,163 +1,163 @@
modules_cc =\
Modules/MScalarSUN/Grad.cc \
Modules/MScalarSUN/TwoPointNPR.cc \
Modules/MScalarSUN/Div.cc \
Modules/MScalarSUN/TrMag.cc \
Modules/MScalarSUN/TransProj.cc \
Modules/MSource/SeqConserved.cc \
Modules/MSource/Convolution.cc \
Modules/MSource/SeqAslash.cc \
Modules/MSource/Wall.cc \
Modules/MSource/Point.cc \
Modules/MSource/Z2.cc \
Modules/MSource/Gauss.cc \
Modules/MSource/SeqGamma.cc \
Modules/MSource/Momentum.cc \
Modules/MScalarSUN/TwoPoint.cc \
Modules/MScalarSUN/TransProj.cc \
Modules/MScalarSUN/TwoPointNPR.cc \
Modules/MScalarSUN/EMT.cc \
Modules/MScalarSUN/TrKinetic.cc \
Modules/MScalarSUN/TrPhi.cc \
Modules/MScalarSUN/EMT.cc \
Modules/MScalarSUN/Div.cc \
Modules/MScalarSUN/Grad.cc \
Modules/MScalarSUN/StochFreeField.cc \
Modules/MScalarSUN/TrMag.cc \
Modules/MNoise/FullVolumeSpinColorDiagonal.cc \
Modules/MNoise/TimeDilutedSpinColorDiagonal.cc \
Modules/MScalar/FreeProp.cc \
Modules/MScalar/ChargedProp.cc \
Modules/MIO/LoadBinary.cc \
Modules/MIO/LoadCosmHol.cc \
Modules/MIO/LoadCoarseEigenPack.cc \
Modules/MIO/LoadNersc.cc \
Modules/MIO/LoadEigenPack.cc \
Modules/MAction/Wilson.cc \
Modules/MAction/DWF.cc \
Modules/MAction/MobiusDWF.cc \
Modules/MAction/ScaledDWF.cc \
Modules/MAction/WilsonClover.cc \
Modules/MAction/ZMobiusDWF.cc \
Modules/MGauge/FundtoHirep.cc \
Modules/MGauge/StochEm.cc \
Modules/MGauge/Unit.cc \
Modules/MGauge/StoutSmearing.cc \
Modules/MGauge/Electrify.cc \
Modules/MGauge/Random.cc \
Modules/MGauge/UnitEm.cc \
Modules/MGauge/GaugeFix.cc \
Modules/MUtilities/PrecisionCast.cc \
Modules/MUtilities/RandomVectors.cc \
Modules/MIO/LoadA2AVectors.cc \
Modules/MIO/LoadNersc.cc \
Modules/MIO/LoadBinary.cc \
Modules/MIO/LoadCoarseEigenPack.cc \
Modules/MIO/LoadEigenPack.cc \
Modules/MIO/LoadCosmHol.cc \
Modules/MIO/LoadPerambulator.cc \
Modules/MSink/Smear.cc \
Modules/MSink/Point.cc \
Modules/MContraction/A2ALoop.cc \
Modules/MContraction/WeakNonEye3pt.cc \
Modules/MContraction/WeakMesonDecayKl2.cc \
Modules/MContraction/A2AMesonField.cc \
Modules/MContraction/Gamma3pt.cc \
Modules/MContraction/A2AAslashField.cc \
Modules/MContraction/DiscLoop.cc \
Modules/MContraction/Meson.cc \
Modules/MContraction/Nucleon.cc \
Modules/MContraction/SelfContract.cc \
Modules/MContraction/WeakEye3pt.cc \
Modules/MDistil/BContraction.cc \
Modules/MDistil/Baryon2pt.cc \
Modules/MDistil/DistilVectors.cc \
Modules/MDistil/LapEvec.cc \
Modules/MDistil/Noises.cc \
Modules/MDistil/PerambFromSolve.cc \
Modules/MDistil/Perambulator.cc \
Modules/MDistil/g5_multiply.cc \
Modules/MNPR/Bilinear.cc \
Modules/MNPR/FourQuark.cc \
Modules/MNPR/Amputate.cc \
Modules/MSolver/A2AAslashVectors.cc \
Modules/MSolver/MixedPrecisionRBPrecCG.cc \
Modules/MSolver/RBPrecCG.cc \
Modules/MSolver/LocalCoherenceLanczos.cc \
Modules/MSolver/A2AVectors.cc \
Modules/MFermion/FreeProp.cc \
Modules/MFermion/GaugeProp.cc \
Modules/MFermion/EMLepton.cc \
Modules/MGauge/Random.cc \
Modules/MGauge/StoutSmearing3D.cc \
Modules/MGauge/StochEm.cc \
Modules/MGauge/StoutSmearing.cc \
Modules/MGauge/Unit.cc \
Modules/MGauge/Electrify.cc \
Modules/MGauge/UnitEm.cc \
Modules/MGauge/FundtoHirep.cc \
Modules/MGauge/GaugeFix.cc \
Modules/MUtilities/RandomVectors.cc \
Modules/MUtilities/PrecisionCast.cc \
Modules/MDistil/PerambFromSolve.cc \
Modules/MDistil/g5_multiply.cc \
Modules/MDistil/LapEvec.cc \
Modules/MDistil/Perambulator.cc \
Modules/MDistil/Noises.cc \
Modules/MDistil/DistilVectors.cc \
Modules/MDistil/BContraction.cc \
Modules/MDistil/Baryon2pt.cc \
Modules/MSource/Momentum.cc \
Modules/MSource/SeqAslash.cc \
Modules/MSource/Z2.cc \
Modules/MSource/Point.cc \
Modules/MSource/SeqGamma.cc \
Modules/MSource/Wall.cc \
Modules/MSource/SeqConserved.cc \
Modules/MContraction/WeakEye3pt.cc \
Modules/MContraction/Meson.cc \
Modules/MContraction/A2AAslashField.cc \
Modules/MContraction/SelfContract.cc \
Modules/MContraction/Baryon.cc \
Modules/MContraction/Nucleon.cc \
Modules/MContraction/WeakNonEye3pt.cc \
Modules/MContraction/DiscLoop.cc \
Modules/MContraction/WeakMesonDecayKl2.cc \
Modules/MContraction/A2AMesonField.cc \
Modules/MContraction/A2ALoop.cc \
Modules/MContraction/Gamma3pt.cc \
Modules/MAction/MobiusDWF.cc \
Modules/MAction/WilsonClover.cc \
Modules/MAction/Wilson.cc \
Modules/MAction/DWF.cc \
Modules/MAction/ScaledDWF.cc \
Modules/MAction/ZMobiusDWF.cc \
Modules/MSolver/A2AVectors.cc \
Modules/MSolver/RBPrecCG.cc \
Modules/MSolver/LocalCoherenceLanczos.cc \
Modules/MSolver/MixedPrecisionRBPrecCG.cc \
Modules/MSolver/A2AAslashVectors.cc \
Modules/MNPR/Bilinear.cc \
Modules/MNPR/FourQuark.cc \
Modules/MNPR/Amputate.cc
Modules/MSink/Smear.cc \
Modules/MSink/Point.cc
modules_hpp =\
Modules/MScalarSUN/TrKinetic.hpp \
Modules/MScalarSUN/StochFreeField.hpp \
Modules/MScalarSUN/TwoPointNPR.hpp \
Modules/MScalarSUN/Grad.hpp \
Modules/MScalarSUN/TransProj.hpp \
Modules/MSource/Gauss.hpp \
Modules/MSource/Momentum.hpp \
Modules/MSource/SeqAslash.hpp \
Modules/MSource/Z2.hpp \
Modules/MSource/Point.hpp \
Modules/MSource/SeqGamma.hpp \
Modules/MSource/Convolution.hpp \
Modules/MSource/Wall.hpp \
Modules/MSource/SeqConserved.hpp \
Modules/MScalarSUN/Div.hpp \
Modules/MScalarSUN/TrMag.hpp \
Modules/MScalarSUN/Utils.hpp \
Modules/MScalarSUN/EMT.hpp \
Modules/MScalarSUN/TwoPoint.hpp \
Modules/MScalarSUN/TrKinetic.hpp \
Modules/MScalarSUN/TrPhi.hpp \
Modules/MNoise/FullVolumeSpinColorDiagonal.hpp \
Modules/MScalarSUN/TwoPoint.hpp \
Modules/MScalarSUN/Grad.hpp \
Modules/MScalarSUN/Utils.hpp \
Modules/MScalarSUN/StochFreeField.hpp \
Modules/MScalarSUN/EMT.hpp \
Modules/MScalarSUN/TrMag.hpp \
Modules/MScalarSUN/TwoPointNPR.hpp \
Modules/MScalarSUN/TransProj.hpp \
Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp \
Modules/MNoise/FullVolumeSpinColorDiagonal.hpp \
Modules/MScalar/FreeProp.hpp \
Modules/MScalar/Scalar.hpp \
Modules/MScalar/ChargedProp.hpp \
Modules/MIO/LoadPerambulator.hpp \
Modules/MIO/LoadEigenPack.hpp \
Modules/MIO/LoadA2AVectors.hpp \
Modules/MIO/LoadCoarseEigenPack.hpp \
Modules/MIO/LoadCosmHol.hpp \
Modules/MIO/LoadBinary.hpp \
Modules/MIO/LoadNersc.hpp \
Modules/MSink/Smear.hpp \
Modules/MSink/Point.hpp \
Modules/MFermion/FreeProp.hpp \
Modules/MFermion/GaugeProp.hpp \
Modules/MFermion/EMLepton.hpp \
Modules/MGauge/FundtoHirep.hpp \
Modules/MGauge/Random.hpp \
Modules/MGauge/StoutSmearing.hpp \
Modules/MGauge/Unit.hpp \
Modules/MGauge/GaugeFix.hpp \
Modules/MGauge/StoutSmearing3D.hpp \
Modules/MGauge/StochEm.hpp \
Modules/MGauge/Electrify.hpp \
Modules/MGauge/UnitEm.hpp \
Modules/MUtilities/RandomVectors.hpp \
Modules/MUtilities/PrecisionCast.hpp \
Modules/MDistil/Noises.hpp \
Modules/MDistil/Perambulator.hpp \
Modules/MDistil/g5_multiply.hpp \
Modules/MDistil/PerambFromSolve.hpp \
Modules/MDistil/Baryon2pt.hpp \
Modules/MDistil/LapEvec.hpp \
Modules/MDistil/BContraction.hpp \
Modules/MDistil/DistilVectors.hpp \
Modules/MSource/SeqConserved.hpp \
Modules/MSource/SeqAslash.hpp \
Modules/MSource/Z2.hpp \
Modules/MSource/Wall.hpp \
Modules/MSource/SeqGamma.hpp \
Modules/MSource/Point.hpp \
Modules/MSource/Momentum.hpp \
Modules/MContraction/WeakMesonDecayKl2.hpp \
Modules/MContraction/Nucleon.hpp \
Modules/MContraction/A2AAslashField.hpp \
Modules/MContraction/WeakEye3pt.hpp \
Modules/MContraction/WeakNonEye3pt.hpp \
Modules/MContraction/Baryon.hpp \
Modules/MContraction/Meson.hpp \
Modules/MContraction/A2ALoop.hpp \
Modules/MContraction/Gamma3pt.hpp \
Modules/MContraction/DiscLoop.hpp \
Modules/MContraction/SelfContract.hpp \
Modules/MContraction/A2AMesonField.hpp \
Modules/MAction/WilsonClover.hpp \
Modules/MAction/Wilson.hpp \
Modules/MAction/ScaledDWF.hpp \
Modules/MAction/MobiusDWF.hpp \
Modules/MAction/Wilson.hpp \
Modules/MAction/DWF.hpp \
Modules/MAction/WilsonClover.hpp \
Modules/MAction/ZMobiusDWF.hpp \
Modules/MAction/DWF.hpp \
Modules/MGauge/UnitEm.hpp \
Modules/MGauge/Electrify.hpp \
Modules/MGauge/StoutSmearing.hpp \
Modules/MGauge/Random.hpp \
Modules/MGauge/FundtoHirep.hpp \
Modules/MGauge/GaugeFix.hpp \
Modules/MGauge/Unit.hpp \
Modules/MGauge/StochEm.hpp \
Modules/MUtilities/RandomVectors.hpp \
Modules/MUtilities/PrecisionCast.hpp \
Modules/MIO/LoadCosmHol.hpp \
Modules/MIO/LoadA2AVectors.hpp \
Modules/MIO/LoadEigenPack.hpp \
Modules/MIO/LoadNersc.hpp \
Modules/MIO/LoadBinary.hpp \
Modules/MIO/LoadCoarseEigenPack.hpp \
Modules/MIO/LoadPerambulator.hpp \
Modules/MContraction/WeakEye3pt.hpp \
Modules/MContraction/WeakMesonDecayKl2.hpp \
Modules/MContraction/Gamma3pt.hpp \
Modules/MContraction/A2AMesonField.hpp \
Modules/MContraction/A2ALoop.hpp \
Modules/MContraction/WeakNonEye3pt.hpp \
Modules/MContraction/DiscLoop.hpp \
Modules/MContraction/A2AAslashField.hpp \
Modules/MContraction/Meson.hpp \
Modules/MContraction/Nucleon.hpp \
Modules/MContraction/SelfContract.hpp \
Modules/MDistil/BContraction.hpp \
Modules/MDistil/Baryon2pt.hpp \
Modules/MDistil/DistilVectors.hpp \
Modules/MDistil/LapEvec.hpp \
Modules/MDistil/Noises.hpp \
Modules/MDistil/PerambFromSolve.hpp \
Modules/MDistil/Perambulator.hpp \
Modules/MDistil/g5_multiply.hpp \
Modules/MNPR/FourQuark.hpp \
Modules/MNPR/Bilinear.hpp \
Modules/MNPR/Amputate.hpp \
Modules/MSolver/A2AAslashVectors.hpp \
Modules/MSolver/RBPrecCG.hpp \
Modules/MSolver/Guesser.hpp \
Modules/MSolver/LocalCoherenceLanczos.hpp \
Modules/MSolver/A2AVectors.hpp \
Modules/MSolver/MixedPrecisionRBPrecCG.hpp \
Modules/MSolver/Guesser.hpp \
Modules/MSolver/A2AAslashVectors.hpp \
Modules/MNPR/FourQuark.hpp \
Modules/MNPR/Bilinear.hpp \
Modules/MNPR/Amputate.hpp
Modules/MFermion/FreeProp.hpp \
Modules/MFermion/GaugeProp.hpp \
Modules/MFermion/EMLepton.hpp \
Modules/MSink/Smear.hpp \
Modules/MSink/Point.hpp