mirror of
https://github.com/paboyle/Grid.git
synced 2025-11-02 21:14:32 +00:00
Merge branch 'develop' into feature/dwf-multirhs
This commit is contained in:
@@ -10,8 +10,8 @@ if BUILD_COMMS_MPI3
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
if BUILD_COMMS_MPI3L
|
||||
extra_sources+=communicator/Communicator_mpi3_leader.cc
|
||||
if BUILD_COMMS_MPIT
|
||||
extra_sources+=communicator/Communicator_mpit.cc
|
||||
extra_sources+=communicator/Communicator_base.cc
|
||||
endif
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/Algorithms.h
|
||||
|
||||
@@ -37,6 +37,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/algorithms/approx/Chebyshev.h>
|
||||
#include <Grid/algorithms/approx/Remez.h>
|
||||
#include <Grid/algorithms/approx/MultiShiftFunction.h>
|
||||
#include <Grid/algorithms/approx/Forecast.h>
|
||||
|
||||
#include <Grid/algorithms/iterative/ConjugateGradient.h>
|
||||
#include <Grid/algorithms/iterative/ConjugateResidual.h>
|
||||
@@ -45,6 +46,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/algorithms/iterative/ConjugateGradientMultiShift.h>
|
||||
#include <Grid/algorithms/iterative/ConjugateGradientMixedPrec.h>
|
||||
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
|
||||
#include <Grid/algorithms/iterative/ConjugateGradientReliableUpdate.h>
|
||||
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
|
||||
#include <Grid/algorithms/CoarsenedMatrix.h>
|
||||
#include <Grid/algorithms/FFT.h>
|
||||
|
||||
@@ -230,6 +230,7 @@ namespace Grid {
|
||||
// Barrel shift and collect global pencil
|
||||
std::vector<int> lcoor(Nd), gcoor(Nd);
|
||||
result = source;
|
||||
int pc = processor_coor[dim];
|
||||
for(int p=0;p<processors[dim];p++) {
|
||||
PARALLEL_REGION
|
||||
{
|
||||
@@ -240,7 +241,8 @@ namespace Grid {
|
||||
for(int idx=0;idx<sgrid->lSites();idx++) {
|
||||
sgrid->LocalIndexToLocalCoor(idx,cbuf);
|
||||
peekLocalSite(s,result,cbuf);
|
||||
cbuf[dim]+=p*L;
|
||||
cbuf[dim]+=((pc+p) % processors[dim])*L;
|
||||
// cbuf[dim]+=p*L;
|
||||
pokeLocalSite(s,pgbuf,cbuf);
|
||||
}
|
||||
}
|
||||
@@ -278,7 +280,6 @@ namespace Grid {
|
||||
flops+= flops_call*NN;
|
||||
|
||||
// writing out result
|
||||
int pc = processor_coor[dim];
|
||||
PARALLEL_REGION
|
||||
{
|
||||
std::vector<int> clbuf(Nd), cgbuf(Nd);
|
||||
|
||||
152
lib/algorithms/approx/Forecast.h
Normal file
152
lib/algorithms/approx/Forecast.h
Normal file
@@ -0,0 +1,152 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/algorithms/approx/Forecast.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#ifndef INCLUDED_FORECAST_H
|
||||
#define INCLUDED_FORECAST_H
|
||||
|
||||
namespace Grid {
|
||||
|
||||
// Abstract base class.
|
||||
// Takes a matrix (Mat), a source (phi), and a vector of Fields (chi)
|
||||
// and returns a forecasted solution to the system D*psi = phi (psi).
|
||||
template<class Matrix, class Field>
|
||||
class Forecast
|
||||
{
|
||||
public:
|
||||
virtual Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& chi) = 0;
|
||||
};
|
||||
|
||||
// Implementation of Brower et al.'s chronological inverter (arXiv:hep-lat/9509012),
|
||||
// used to forecast solutions across poles of the EOFA heatbath.
|
||||
//
|
||||
// Modified from CPS (cps_pp/src/util/dirac_op/d_op_base/comsrc/minresext.C)
|
||||
template<class Matrix, class Field>
|
||||
class ChronoForecast : public Forecast<Matrix,Field>
|
||||
{
|
||||
public:
|
||||
Field operator()(Matrix &Mat, const Field& phi, const std::vector<Field>& prev_solns)
|
||||
{
|
||||
int degree = prev_solns.size();
|
||||
Field chi(phi); // forecasted solution
|
||||
|
||||
// Trivial cases
|
||||
if(degree == 0){ chi = zero; return chi; }
|
||||
else if(degree == 1){ return prev_solns[0]; }
|
||||
|
||||
RealD dot;
|
||||
ComplexD xp;
|
||||
Field r(phi); // residual
|
||||
Field Mv(phi);
|
||||
std::vector<Field> v(prev_solns); // orthonormalized previous solutions
|
||||
std::vector<Field> MdagMv(degree,phi);
|
||||
|
||||
// Array to hold the matrix elements
|
||||
std::vector<std::vector<ComplexD>> G(degree, std::vector<ComplexD>(degree));
|
||||
|
||||
// Solution and source vectors
|
||||
std::vector<ComplexD> a(degree);
|
||||
std::vector<ComplexD> b(degree);
|
||||
|
||||
// Orthonormalize the vector basis
|
||||
for(int i=0; i<degree; i++){
|
||||
v[i] *= 1.0/std::sqrt(norm2(v[i]));
|
||||
for(int j=i+1; j<degree; j++){ v[j] -= innerProduct(v[i],v[j]) * v[i]; }
|
||||
}
|
||||
|
||||
// Perform sparse matrix multiplication and construct rhs
|
||||
for(int i=0; i<degree; i++){
|
||||
b[i] = innerProduct(v[i],phi);
|
||||
Mat.M(v[i],Mv);
|
||||
Mat.Mdag(Mv,MdagMv[i]);
|
||||
G[i][i] = innerProduct(v[i],MdagMv[i]);
|
||||
}
|
||||
|
||||
// Construct the matrix
|
||||
for(int j=0; j<degree; j++){
|
||||
for(int k=j+1; k<degree; k++){
|
||||
G[j][k] = innerProduct(v[j],MdagMv[k]);
|
||||
G[k][j] = std::conj(G[j][k]);
|
||||
}}
|
||||
|
||||
// Gauss-Jordan elimination with partial pivoting
|
||||
for(int i=0; i<degree; i++){
|
||||
|
||||
// Perform partial pivoting
|
||||
int k = i;
|
||||
for(int j=i+1; j<degree; j++){ if(std::abs(G[j][j]) > std::abs(G[k][k])){ k = j; } }
|
||||
if(k != i){
|
||||
xp = b[k];
|
||||
b[k] = b[i];
|
||||
b[i] = xp;
|
||||
for(int j=0; j<degree; j++){
|
||||
xp = G[k][j];
|
||||
G[k][j] = G[i][j];
|
||||
G[i][j] = xp;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert matrix to upper triangular form
|
||||
for(int j=i+1; j<degree; j++){
|
||||
xp = G[j][i]/G[i][i];
|
||||
b[j] -= xp * b[i];
|
||||
for(int k=0; k<degree; k++){ G[j][k] -= xp*G[i][k]; }
|
||||
}
|
||||
}
|
||||
|
||||
// Use Gaussian elimination to solve equations and calculate initial guess
|
||||
chi = zero;
|
||||
r = phi;
|
||||
for(int i=degree-1; i>=0; i--){
|
||||
a[i] = 0.0;
|
||||
for(int j=i+1; j<degree; j++){ a[i] += G[i][j] * a[j]; }
|
||||
a[i] = (b[i]-a[i])/G[i][i];
|
||||
chi += a[i]*v[i];
|
||||
r -= a[i]*MdagMv[i];
|
||||
}
|
||||
|
||||
RealD true_r(0.0);
|
||||
ComplexD tmp;
|
||||
for(int i=0; i<degree; i++){
|
||||
tmp = -b[i];
|
||||
for(int j=0; j<degree; j++){ tmp += G[i][j]*a[j]; }
|
||||
tmp = std::conj(tmp)*tmp;
|
||||
true_r += std::sqrt(tmp.real());
|
||||
}
|
||||
|
||||
RealD error = std::sqrt(norm2(r)/norm2(phi));
|
||||
std::cout << GridLogMessage << "ChronoForecast: |res|/|src| = " << error << std::endl;
|
||||
|
||||
return chi;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -87,15 +87,22 @@ void ThinQRfact (Eigen::MatrixXcd &m_rr,
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
sliceInnerProductMatrix(m_rr,R,R,Orthog);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Cholesky from Eigen
|
||||
// There exists a ldlt that is documented as more stable
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
Eigen::MatrixXcd L = m_rr.llt().matrixL();
|
||||
// Force manifest hermitian to avoid rounding related
|
||||
m_rr = 0.5*(m_rr+m_rr.adjoint());
|
||||
|
||||
#if 0
|
||||
std::cout << " Calling Cholesky ldlt on m_rr " << m_rr <<std::endl;
|
||||
Eigen::MatrixXcd L_ldlt = m_rr.ldlt().matrixL();
|
||||
std::cout << " Called Cholesky ldlt on m_rr " << L_ldlt <<std::endl;
|
||||
auto D_ldlt = m_rr.ldlt().vectorD();
|
||||
std::cout << " Called Cholesky ldlt on m_rr " << D_ldlt <<std::endl;
|
||||
#endif
|
||||
|
||||
// std::cout << " Calling Cholesky llt on m_rr " <<std::endl;
|
||||
Eigen::MatrixXcd L = m_rr.llt().matrixL();
|
||||
// std::cout << " Called Cholesky llt on m_rr " << L <<std::endl;
|
||||
C = L.adjoint();
|
||||
Cinv = C.inverse();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Q = R C^{-1}
|
||||
//
|
||||
@@ -103,7 +110,6 @@ void ThinQRfact (Eigen::MatrixXcd &m_rr,
|
||||
//
|
||||
// NB maddMatrix conventions are Right multiplication X[j] a[j,i] already
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// FIXME:: make a sliceMulMatrix to avoid zero vector
|
||||
sliceMulMatrix(Q,Cinv,R,Orthog);
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -199,7 +205,12 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
|
||||
|
||||
Linop.HermOp(X, AD);
|
||||
tmp = B - AD;
|
||||
//std::cout << GridLogMessage << " initial tmp " << norm2(tmp)<< std::endl;
|
||||
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
|
||||
//std::cout << GridLogMessage << " initial Q " << norm2(Q)<< std::endl;
|
||||
//std::cout << GridLogMessage << " m_rr " << m_rr<<std::endl;
|
||||
//std::cout << GridLogMessage << " m_C " << m_C<<std::endl;
|
||||
//std::cout << GridLogMessage << " m_Cinv " << m_Cinv<<std::endl;
|
||||
D=Q;
|
||||
|
||||
std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
|
||||
@@ -221,13 +232,15 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
|
||||
MatrixTimer.Start();
|
||||
Linop.HermOp(D, Z);
|
||||
MatrixTimer.Stop();
|
||||
//std::cout << GridLogMessage << " norm2 Z " <<norm2(Z)<<std::endl;
|
||||
|
||||
//4. M = [D^dag Z]^{-1}
|
||||
sliceInnerTimer.Start();
|
||||
sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
|
||||
sliceInnerTimer.Stop();
|
||||
m_M = m_DZ.inverse();
|
||||
|
||||
//std::cout << GridLogMessage << " m_DZ " <<m_DZ<<std::endl;
|
||||
|
||||
//5. X = X + D MC
|
||||
m_tmp = m_M * m_C;
|
||||
sliceMaddTimer.Start();
|
||||
|
||||
256
lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
Normal file
256
lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
Normal file
@@ -0,0 +1,256 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/algorithms/iterative/ConjugateGradientReliableUpdate.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Christopher Kelly <ckelly@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
|
||||
#define GRID_CONJUGATE_GRADIENT_RELIABLE_UPDATE_H
|
||||
|
||||
namespace Grid {
|
||||
|
||||
template<class FieldD,class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
|
||||
class ConjugateGradientReliableUpdate : public LinearFunction<FieldD> {
|
||||
public:
|
||||
bool ErrorOnNoConverge; // throw an assert when the CG fails to converge.
|
||||
// Defaults true.
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
|
||||
Integer ReliableUpdatesPerformed;
|
||||
|
||||
bool DoFinalCleanup; //Final DP cleanup, defaults to true
|
||||
Integer IterationsToCleanup; //Final DP cleanup step iterations
|
||||
|
||||
LinearOperatorBase<FieldF> &Linop_f;
|
||||
LinearOperatorBase<FieldD> &Linop_d;
|
||||
GridBase* SinglePrecGrid;
|
||||
RealD Delta; //reliable update parameter
|
||||
|
||||
//Optional ability to switch to a different linear operator once the tolerance reaches a certain point. Useful for single/half -> single/single
|
||||
LinearOperatorBase<FieldF> *Linop_fallback;
|
||||
RealD fallback_transition_tol;
|
||||
|
||||
|
||||
ConjugateGradientReliableUpdate(RealD tol, Integer maxit, RealD _delta, GridBase* _sp_grid, LinearOperatorBase<FieldF> &_Linop_f, LinearOperatorBase<FieldD> &_Linop_d, bool err_on_no_conv = true)
|
||||
: Tolerance(tol),
|
||||
MaxIterations(maxit),
|
||||
Delta(_delta),
|
||||
Linop_f(_Linop_f),
|
||||
Linop_d(_Linop_d),
|
||||
SinglePrecGrid(_sp_grid),
|
||||
ErrorOnNoConverge(err_on_no_conv),
|
||||
DoFinalCleanup(true),
|
||||
Linop_fallback(NULL)
|
||||
{};
|
||||
|
||||
void setFallbackLinop(LinearOperatorBase<FieldF> &_Linop_fallback, const RealD _fallback_transition_tol){
|
||||
Linop_fallback = &_Linop_fallback;
|
||||
fallback_transition_tol = _fallback_transition_tol;
|
||||
}
|
||||
|
||||
void operator()(const FieldD &src, FieldD &psi) {
|
||||
LinearOperatorBase<FieldF> *Linop_f_use = &Linop_f;
|
||||
bool using_fallback = false;
|
||||
|
||||
psi.checkerboard = src.checkerboard;
|
||||
conformable(psi, src);
|
||||
|
||||
RealD cp, c, a, d, b, ssq, qq, b_pred;
|
||||
|
||||
FieldD p(src);
|
||||
FieldD mmp(src);
|
||||
FieldD r(src);
|
||||
|
||||
// Initial residual computation & set up
|
||||
RealD guess = norm2(psi);
|
||||
assert(std::isnan(guess) == 0);
|
||||
|
||||
Linop_d.HermOpAndNorm(psi, mmp, d, b);
|
||||
|
||||
r = src - mmp;
|
||||
p = r;
|
||||
|
||||
a = norm2(p);
|
||||
cp = a;
|
||||
ssq = norm2(src);
|
||||
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: guess " << guess << std::endl;
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: src " << ssq << std::endl;
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: mp " << d << std::endl;
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: mmp " << b << std::endl;
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: cp,r " << cp << std::endl;
|
||||
std::cout << GridLogIterative << std::setprecision(4) << "ConjugateGradientReliableUpdate: p " << a << std::endl;
|
||||
|
||||
RealD rsq = Tolerance * Tolerance * ssq;
|
||||
|
||||
// Check if guess is really REALLY good :)
|
||||
if (cp <= rsq) {
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate guess was REALLY good\n";
|
||||
std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl;
|
||||
return;
|
||||
}
|
||||
|
||||
//Single prec initialization
|
||||
FieldF r_f(SinglePrecGrid);
|
||||
r_f.checkerboard = r.checkerboard;
|
||||
precisionChange(r_f, r);
|
||||
|
||||
FieldF psi_f(r_f);
|
||||
psi_f = zero;
|
||||
|
||||
FieldF p_f(r_f);
|
||||
FieldF mmp_f(r_f);
|
||||
|
||||
RealD MaxResidSinceLastRelUp = cp; //initial residual
|
||||
|
||||
std::cout << GridLogIterative << std::setprecision(4)
|
||||
<< "ConjugateGradient: k=0 residual " << cp << " target " << rsq << std::endl;
|
||||
|
||||
GridStopWatch LinalgTimer;
|
||||
GridStopWatch MatrixTimer;
|
||||
GridStopWatch SolverTimer;
|
||||
|
||||
SolverTimer.Start();
|
||||
int k = 0;
|
||||
int l = 0;
|
||||
|
||||
for (k = 1; k <= MaxIterations; k++) {
|
||||
c = cp;
|
||||
|
||||
MatrixTimer.Start();
|
||||
Linop_f_use->HermOpAndNorm(p_f, mmp_f, d, qq);
|
||||
MatrixTimer.Stop();
|
||||
|
||||
LinalgTimer.Start();
|
||||
|
||||
a = c / d;
|
||||
b_pred = a * (a * qq - d) / c;
|
||||
|
||||
cp = axpy_norm(r_f, -a, mmp_f, r_f);
|
||||
b = cp / c;
|
||||
|
||||
// Fuse these loops ; should be really easy
|
||||
psi_f = a * p_f + psi_f;
|
||||
//p_f = p_f * b + r_f;
|
||||
|
||||
LinalgTimer.Stop();
|
||||
|
||||
std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: Iteration " << k
|
||||
<< " residual " << cp << " target " << rsq << std::endl;
|
||||
std::cout << GridLogDebug << "a = "<< a << " b_pred = "<< b_pred << " b = "<< b << std::endl;
|
||||
std::cout << GridLogDebug << "qq = "<< qq << " d = "<< d << " c = "<< c << std::endl;
|
||||
|
||||
if(cp > MaxResidSinceLastRelUp){
|
||||
std::cout << GridLogIterative << "ConjugateGradientReliableUpdate: updating MaxResidSinceLastRelUp : " << MaxResidSinceLastRelUp << " -> " << cp << std::endl;
|
||||
MaxResidSinceLastRelUp = cp;
|
||||
}
|
||||
|
||||
// Stopping condition
|
||||
if (cp <= rsq) {
|
||||
//Although not written in the paper, I assume that I have to add on the final solution
|
||||
precisionChange(mmp, psi_f);
|
||||
psi = psi + mmp;
|
||||
|
||||
|
||||
SolverTimer.Stop();
|
||||
Linop_d.HermOpAndNorm(psi, mmp, d, qq);
|
||||
p = mmp - src;
|
||||
|
||||
RealD srcnorm = sqrt(norm2(src));
|
||||
RealD resnorm = sqrt(norm2(p));
|
||||
RealD true_residual = resnorm / srcnorm;
|
||||
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate Converged on iteration " << k << " after " << l << " reliable updates" << std::endl;
|
||||
std::cout << GridLogMessage << "\tComputed residual " << sqrt(cp / ssq)<<std::endl;
|
||||
std::cout << GridLogMessage << "\tTrue residual " << true_residual<<std::endl;
|
||||
std::cout << GridLogMessage << "\tTarget " << Tolerance << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "Time breakdown "<<std::endl;
|
||||
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
|
||||
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
|
||||
std::cout << GridLogMessage << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
|
||||
|
||||
IterationsToComplete = k;
|
||||
ReliableUpdatesPerformed = l;
|
||||
|
||||
if(DoFinalCleanup){
|
||||
//Do a final CG to cleanup
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate performing final cleanup.\n";
|
||||
ConjugateGradient<FieldD> CG(Tolerance,MaxIterations);
|
||||
CG.ErrorOnNoConverge = ErrorOnNoConverge;
|
||||
CG(Linop_d,src,psi);
|
||||
IterationsToCleanup = CG.IterationsToComplete;
|
||||
}
|
||||
else if (ErrorOnNoConverge) assert(true_residual / Tolerance < 10000.0);
|
||||
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate complete.\n";
|
||||
return;
|
||||
}
|
||||
else if(cp < Delta * MaxResidSinceLastRelUp) { //reliable update
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate "
|
||||
<< cp << "(residual) < " << Delta << "(Delta) * " << MaxResidSinceLastRelUp << "(MaxResidSinceLastRelUp) on iteration " << k << " : performing reliable update\n";
|
||||
precisionChange(mmp, psi_f);
|
||||
psi = psi + mmp;
|
||||
|
||||
Linop_d.HermOpAndNorm(psi, mmp, d, qq);
|
||||
r = src - mmp;
|
||||
|
||||
psi_f = zero;
|
||||
precisionChange(r_f, r);
|
||||
cp = norm2(r);
|
||||
MaxResidSinceLastRelUp = cp;
|
||||
|
||||
b = cp/c;
|
||||
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate new residual " << cp << std::endl;
|
||||
|
||||
l = l+1;
|
||||
}
|
||||
|
||||
p_f = p_f * b + r_f; //update search vector after reliable update appears to help convergence
|
||||
|
||||
if(!using_fallback && Linop_fallback != NULL && cp < fallback_transition_tol){
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate switching to fallback linear operator on iteration " << k << " at residual " << cp << std::endl;
|
||||
Linop_f_use = Linop_fallback;
|
||||
using_fallback = true;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
std::cout << GridLogMessage << "ConjugateGradientReliableUpdate did NOT converge"
|
||||
<< std::endl;
|
||||
|
||||
if (ErrorOnNoConverge) assert(0);
|
||||
IterationsToComplete = k;
|
||||
ReliableUpdatesPerformed = l;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
@@ -1,7 +1,5 @@
|
||||
|
||||
|
||||
|
||||
#include <Grid/GridCore.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
@@ -11,7 +9,7 @@ int PointerCache::victim;
|
||||
|
||||
void *PointerCache::Insert(void *ptr,size_t bytes) {
|
||||
|
||||
if (bytes < 4096 ) return NULL;
|
||||
if (bytes < 4096 ) return ptr;
|
||||
|
||||
#ifdef GRID_OMP
|
||||
assert(omp_in_parallel()==0);
|
||||
@@ -63,4 +61,37 @@ void *PointerCache::Lookup(size_t bytes) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
void check_huge_pages(void *Buf,uint64_t BYTES)
|
||||
{
|
||||
#ifdef __linux__
|
||||
int fd = open("/proc/self/pagemap", O_RDONLY);
|
||||
assert(fd >= 0);
|
||||
const int page_size = 4096;
|
||||
uint64_t virt_pfn = (uint64_t)Buf / page_size;
|
||||
off_t offset = sizeof(uint64_t) * virt_pfn;
|
||||
uint64_t npages = (BYTES + page_size-1) / page_size;
|
||||
uint64_t pagedata[npages];
|
||||
uint64_t ret = lseek(fd, offset, SEEK_SET);
|
||||
assert(ret == offset);
|
||||
ret = ::read(fd, pagedata, sizeof(uint64_t)*npages);
|
||||
assert(ret == sizeof(uint64_t) * npages);
|
||||
int nhugepages = npages / 512;
|
||||
int n4ktotal, nnothuge;
|
||||
n4ktotal = 0;
|
||||
nnothuge = 0;
|
||||
for (int i = 0; i < nhugepages; ++i) {
|
||||
uint64_t baseaddr = (pagedata[i*512] & 0x7fffffffffffffULL) * page_size;
|
||||
for (int j = 0; j < 512; ++j) {
|
||||
uint64_t pageaddr = (pagedata[i*512+j] & 0x7fffffffffffffULL) * page_size;
|
||||
++n4ktotal;
|
||||
if (pageaddr != baseaddr + j * page_size)
|
||||
++nnothuge;
|
||||
}
|
||||
}
|
||||
int rank = CartesianCommunicator::RankWorld();
|
||||
printf("rank %d Allocated %d 4k pages, %d not in huge pages\n", rank, n4ktotal, nnothuge);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -64,6 +64,8 @@ namespace Grid {
|
||||
|
||||
};
|
||||
|
||||
void check_huge_pages(void *Buf,uint64_t BYTES);
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// A lattice of something, but assume the something is SIMDized.
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@@ -92,18 +94,34 @@ public:
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
|
||||
_Tp *ptr = (_Tp *) PointerCache::Lookup(bytes);
|
||||
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) _mm_malloc(bytes,128);
|
||||
#else
|
||||
if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) memalign(128,bytes);
|
||||
#endif
|
||||
// if ( ptr != NULL )
|
||||
// std::cout << "alignedAllocator "<<__n << " cache hit "<< std::hex << ptr <<std::dec <<std::endl;
|
||||
|
||||
//////////////////
|
||||
// Hack 2MB align; could make option probably doesn't need configurability
|
||||
//////////////////
|
||||
//define GRID_ALLOC_ALIGN (128)
|
||||
#define GRID_ALLOC_ALIGN (2*1024*1024)
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) _mm_malloc(bytes,GRID_ALLOC_ALIGN);
|
||||
#else
|
||||
if ( ptr == (_Tp *) NULL ) ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,bytes);
|
||||
#endif
|
||||
// std::cout << "alignedAllocator " << std::hex << ptr <<std::dec <<std::endl;
|
||||
// First touch optimise in threaded loop
|
||||
uint8_t *cp = (uint8_t *)ptr;
|
||||
#ifdef GRID_OMP
|
||||
#pragma omp parallel for
|
||||
#endif
|
||||
for(size_type n=0;n<bytes;n+=4096){
|
||||
cp[n]=0;
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(pointer __p, size_type __n) {
|
||||
size_type bytes = __n * sizeof(_Tp);
|
||||
|
||||
pointer __freeme = (pointer)PointerCache::Insert((void *)__p,bytes);
|
||||
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
@@ -182,10 +200,19 @@ public:
|
||||
pointer allocate(size_type __n, const void* _p= 0)
|
||||
{
|
||||
#ifdef HAVE_MM_MALLOC_H
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),128);
|
||||
_Tp * ptr = (_Tp *) _mm_malloc(__n*sizeof(_Tp),GRID_ALLOC_ALIGN);
|
||||
#else
|
||||
_Tp * ptr = (_Tp *) memalign(128,__n*sizeof(_Tp));
|
||||
_Tp * ptr = (_Tp *) memalign(GRID_ALLOC_ALIGN,__n*sizeof(_Tp));
|
||||
#endif
|
||||
size_type bytes = __n*sizeof(_Tp);
|
||||
uint8_t *cp = (uint8_t *)ptr;
|
||||
if ( ptr ) {
|
||||
// One touch per 4k page, static OMP loop to catch same loop order
|
||||
#pragma omp parallel for schedule(static)
|
||||
for(size_type n=0;n<bytes;n+=4096){
|
||||
cp[n]=0;
|
||||
}
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
void deallocate(pointer __p, size_type) {
|
||||
|
||||
@@ -187,17 +187,18 @@ public:
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
void show_decomposition(){
|
||||
std::cout << GridLogMessage << "Full Dimensions : " << _fdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "Global Dimensions : " << _gdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "Local Dimensions : " << _ldimensions << std::endl;
|
||||
std::cout << GridLogMessage << "Reduced Dimensions : " << _rdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "Outer strides : " << _ostride << std::endl;
|
||||
std::cout << GridLogMessage << "Inner strides : " << _istride << std::endl;
|
||||
std::cout << GridLogMessage << "iSites : " << _isites << std::endl;
|
||||
std::cout << GridLogMessage << "oSites : " << _osites << std::endl;
|
||||
std::cout << GridLogMessage << "lSites : " << lSites() << std::endl;
|
||||
std::cout << GridLogMessage << "gSites : " << gSites() << std::endl;
|
||||
std::cout << GridLogMessage << "Nd : " << _ndimension << std::endl;
|
||||
std::cout << GridLogMessage << "\tFull Dimensions : " << _fdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "\tSIMD layout : " << _simd_layout << std::endl;
|
||||
std::cout << GridLogMessage << "\tGlobal Dimensions : " << _gdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "\tLocal Dimensions : " << _ldimensions << std::endl;
|
||||
std::cout << GridLogMessage << "\tReduced Dimensions : " << _rdimensions << std::endl;
|
||||
std::cout << GridLogMessage << "\tOuter strides : " << _ostride << std::endl;
|
||||
std::cout << GridLogMessage << "\tInner strides : " << _istride << std::endl;
|
||||
std::cout << GridLogMessage << "\tiSites : " << _isites << std::endl;
|
||||
std::cout << GridLogMessage << "\toSites : " << _osites << std::endl;
|
||||
std::cout << GridLogMessage << "\tlSites : " << lSites() << std::endl;
|
||||
std::cout << GridLogMessage << "\tgSites : " << gSites() << std::endl;
|
||||
std::cout << GridLogMessage << "\tNd : " << _ndimension << std::endl;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -85,73 +85,78 @@ public:
|
||||
const std::vector<int> &simd_layout,
|
||||
const std::vector<int> &processor_grid)
|
||||
{
|
||||
///////////////////////
|
||||
// Grid information
|
||||
///////////////////////
|
||||
_ndimension = dimensions.size();
|
||||
|
||||
_fdimensions.resize(_ndimension);
|
||||
_gdimensions.resize(_ndimension);
|
||||
_ldimensions.resize(_ndimension);
|
||||
_rdimensions.resize(_ndimension);
|
||||
_simd_layout.resize(_ndimension);
|
||||
_lstart.resize(_ndimension);
|
||||
_lend.resize(_ndimension);
|
||||
|
||||
_ostride.resize(_ndimension);
|
||||
_istride.resize(_ndimension);
|
||||
|
||||
_fsites = _gsites = _osites = _isites = 1;
|
||||
///////////////////////
|
||||
// Grid information
|
||||
///////////////////////
|
||||
_ndimension = dimensions.size();
|
||||
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
_fdimensions[d] = dimensions[d]; // Global dimensions
|
||||
_gdimensions[d] = _fdimensions[d]; // Global dimensions
|
||||
_simd_layout[d] = simd_layout[d];
|
||||
_fsites = _fsites * _fdimensions[d];
|
||||
_gsites = _gsites * _gdimensions[d];
|
||||
_fdimensions.resize(_ndimension);
|
||||
_gdimensions.resize(_ndimension);
|
||||
_ldimensions.resize(_ndimension);
|
||||
_rdimensions.resize(_ndimension);
|
||||
_simd_layout.resize(_ndimension);
|
||||
_lstart.resize(_ndimension);
|
||||
_lend.resize(_ndimension);
|
||||
|
||||
//FIXME check for exact division
|
||||
_ostride.resize(_ndimension);
|
||||
_istride.resize(_ndimension);
|
||||
|
||||
// Use a reduced simd grid
|
||||
_ldimensions[d]= _gdimensions[d]/_processors[d]; //local dimensions
|
||||
_rdimensions[d]= _ldimensions[d]/_simd_layout[d]; //overdecomposition
|
||||
_lstart[d] = _processor_coor[d]*_ldimensions[d];
|
||||
_lend[d] = _processor_coor[d]*_ldimensions[d]+_ldimensions[d]-1;
|
||||
_osites *= _rdimensions[d];
|
||||
_isites *= _simd_layout[d];
|
||||
|
||||
// Addressing support
|
||||
if ( d==0 ) {
|
||||
_ostride[d] = 1;
|
||||
_istride[d] = 1;
|
||||
} else {
|
||||
_ostride[d] = _ostride[d-1]*_rdimensions[d-1];
|
||||
_istride[d] = _istride[d-1]*_simd_layout[d-1];
|
||||
}
|
||||
_fsites = _gsites = _osites = _isites = 1;
|
||||
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
_fdimensions[d] = dimensions[d]; // Global dimensions
|
||||
_gdimensions[d] = _fdimensions[d]; // Global dimensions
|
||||
_simd_layout[d] = simd_layout[d];
|
||||
_fsites = _fsites * _fdimensions[d];
|
||||
_gsites = _gsites * _gdimensions[d];
|
||||
|
||||
// Use a reduced simd grid
|
||||
_ldimensions[d] = _gdimensions[d] / _processors[d]; //local dimensions
|
||||
assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
|
||||
|
||||
_rdimensions[d] = _ldimensions[d] / _simd_layout[d]; //overdecomposition
|
||||
assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]);
|
||||
|
||||
_lstart[d] = _processor_coor[d] * _ldimensions[d];
|
||||
_lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1;
|
||||
_osites *= _rdimensions[d];
|
||||
_isites *= _simd_layout[d];
|
||||
|
||||
// Addressing support
|
||||
if (d == 0)
|
||||
{
|
||||
_ostride[d] = 1;
|
||||
_istride[d] = 1;
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// subplane information
|
||||
///////////////////////
|
||||
_slice_block.resize(_ndimension);
|
||||
_slice_stride.resize(_ndimension);
|
||||
_slice_nblock.resize(_ndimension);
|
||||
|
||||
int block =1;
|
||||
int nblock=1;
|
||||
for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d];
|
||||
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
nblock/=_rdimensions[d];
|
||||
_slice_block[d] =block;
|
||||
_slice_stride[d]=_ostride[d]*_rdimensions[d];
|
||||
_slice_nblock[d]=nblock;
|
||||
block = block*_rdimensions[d];
|
||||
else
|
||||
{
|
||||
_ostride[d] = _ostride[d - 1] * _rdimensions[d - 1];
|
||||
_istride[d] = _istride[d - 1] * _simd_layout[d - 1];
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////
|
||||
// subplane information
|
||||
///////////////////////
|
||||
_slice_block.resize(_ndimension);
|
||||
_slice_stride.resize(_ndimension);
|
||||
_slice_nblock.resize(_ndimension);
|
||||
|
||||
int block = 1;
|
||||
int nblock = 1;
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
nblock *= _rdimensions[d];
|
||||
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
nblock /= _rdimensions[d];
|
||||
_slice_block[d] = block;
|
||||
_slice_stride[d] = _ostride[d] * _rdimensions[d];
|
||||
_slice_nblock[d] = nblock;
|
||||
block = block * _rdimensions[d];
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -164,21 +164,21 @@ public:
|
||||
#endif
|
||||
|
||||
void Init(const std::vector<int> &dimensions,
|
||||
const std::vector<int> &simd_layout,
|
||||
const std::vector<int> &processor_grid,
|
||||
const std::vector<int> &checker_dim_mask,
|
||||
int checker_dim)
|
||||
const std::vector<int> &simd_layout,
|
||||
const std::vector<int> &processor_grid,
|
||||
const std::vector<int> &checker_dim_mask,
|
||||
int checker_dim)
|
||||
{
|
||||
///////////////////////
|
||||
// Grid information
|
||||
///////////////////////
|
||||
///////////////////////
|
||||
// Grid information
|
||||
///////////////////////
|
||||
_checker_dim = checker_dim;
|
||||
assert(checker_dim_mask[checker_dim]==1);
|
||||
assert(checker_dim_mask[checker_dim] == 1);
|
||||
_ndimension = dimensions.size();
|
||||
assert(checker_dim_mask.size()==_ndimension);
|
||||
assert(processor_grid.size()==_ndimension);
|
||||
assert(simd_layout.size()==_ndimension);
|
||||
|
||||
assert(checker_dim_mask.size() == _ndimension);
|
||||
assert(processor_grid.size() == _ndimension);
|
||||
assert(simd_layout.size() == _ndimension);
|
||||
|
||||
_fdimensions.resize(_ndimension);
|
||||
_gdimensions.resize(_ndimension);
|
||||
_ldimensions.resize(_ndimension);
|
||||
@@ -186,114 +186,133 @@ public:
|
||||
_simd_layout.resize(_ndimension);
|
||||
_lstart.resize(_ndimension);
|
||||
_lend.resize(_ndimension);
|
||||
|
||||
|
||||
_ostride.resize(_ndimension);
|
||||
_istride.resize(_ndimension);
|
||||
|
||||
|
||||
_fsites = _gsites = _osites = _isites = 1;
|
||||
|
||||
_checker_dim_mask=checker_dim_mask;
|
||||
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
_fdimensions[d] = dimensions[d];
|
||||
_gdimensions[d] = _fdimensions[d];
|
||||
_fsites = _fsites * _fdimensions[d];
|
||||
_gsites = _gsites * _gdimensions[d];
|
||||
|
||||
if (d==_checker_dim) {
|
||||
_gdimensions[d] = _gdimensions[d]/2; // Remove a checkerboard
|
||||
}
|
||||
_ldimensions[d] = _gdimensions[d]/_processors[d];
|
||||
_lstart[d] = _processor_coor[d]*_ldimensions[d];
|
||||
_lend[d] = _processor_coor[d]*_ldimensions[d]+_ldimensions[d]-1;
|
||||
_checker_dim_mask = checker_dim_mask;
|
||||
|
||||
// Use a reduced simd grid
|
||||
_simd_layout[d] = simd_layout[d];
|
||||
_rdimensions[d]= _ldimensions[d]/_simd_layout[d];
|
||||
assert(_rdimensions[d]>0);
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
_fdimensions[d] = dimensions[d];
|
||||
_gdimensions[d] = _fdimensions[d];
|
||||
_fsites = _fsites * _fdimensions[d];
|
||||
_gsites = _gsites * _gdimensions[d];
|
||||
|
||||
// all elements of a simd vector must have same checkerboard.
|
||||
// If Ls vectorised, this must still be the case; e.g. dwf rb5d
|
||||
if ( _simd_layout[d]>1 ) {
|
||||
if ( checker_dim_mask[d] ) {
|
||||
assert( (_rdimensions[d]&0x1) == 0 );
|
||||
}
|
||||
}
|
||||
if (d == _checker_dim)
|
||||
{
|
||||
assert((_gdimensions[d] & 0x1) == 0);
|
||||
_gdimensions[d] = _gdimensions[d] / 2; // Remove a checkerboard
|
||||
}
|
||||
_ldimensions[d] = _gdimensions[d] / _processors[d];
|
||||
assert(_ldimensions[d] * _processors[d] == _gdimensions[d]);
|
||||
_lstart[d] = _processor_coor[d] * _ldimensions[d];
|
||||
_lend[d] = _processor_coor[d] * _ldimensions[d] + _ldimensions[d] - 1;
|
||||
|
||||
_osites *= _rdimensions[d];
|
||||
_isites *= _simd_layout[d];
|
||||
|
||||
// Addressing support
|
||||
if ( d==0 ) {
|
||||
_ostride[d] = 1;
|
||||
_istride[d] = 1;
|
||||
} else {
|
||||
_ostride[d] = _ostride[d-1]*_rdimensions[d-1];
|
||||
_istride[d] = _istride[d-1]*_simd_layout[d-1];
|
||||
}
|
||||
// Use a reduced simd grid
|
||||
_simd_layout[d] = simd_layout[d];
|
||||
_rdimensions[d] = _ldimensions[d] / _simd_layout[d]; // this is not checking if this is integer
|
||||
assert(_rdimensions[d] * _simd_layout[d] == _ldimensions[d]);
|
||||
assert(_rdimensions[d] > 0);
|
||||
|
||||
// all elements of a simd vector must have same checkerboard.
|
||||
// If Ls vectorised, this must still be the case; e.g. dwf rb5d
|
||||
if (_simd_layout[d] > 1)
|
||||
{
|
||||
if (checker_dim_mask[d])
|
||||
{
|
||||
assert((_rdimensions[d] & 0x1) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
_osites *= _rdimensions[d];
|
||||
_isites *= _simd_layout[d];
|
||||
|
||||
// Addressing support
|
||||
if (d == 0)
|
||||
{
|
||||
_ostride[d] = 1;
|
||||
_istride[d] = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
_ostride[d] = _ostride[d - 1] * _rdimensions[d - 1];
|
||||
_istride[d] = _istride[d - 1] * _simd_layout[d - 1];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// subplane information
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
_slice_block.resize(_ndimension);
|
||||
_slice_stride.resize(_ndimension);
|
||||
_slice_nblock.resize(_ndimension);
|
||||
|
||||
int block =1;
|
||||
int nblock=1;
|
||||
for(int d=0;d<_ndimension;d++) nblock*=_rdimensions[d];
|
||||
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
nblock/=_rdimensions[d];
|
||||
_slice_block[d] =block;
|
||||
_slice_stride[d]=_ostride[d]*_rdimensions[d];
|
||||
_slice_nblock[d]=nblock;
|
||||
block = block*_rdimensions[d];
|
||||
|
||||
int block = 1;
|
||||
int nblock = 1;
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
nblock *= _rdimensions[d];
|
||||
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
nblock /= _rdimensions[d];
|
||||
_slice_block[d] = block;
|
||||
_slice_stride[d] = _ostride[d] * _rdimensions[d];
|
||||
_slice_nblock[d] = nblock;
|
||||
block = block * _rdimensions[d];
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// Create a checkerboard lookup table
|
||||
////////////////////////////////////////////////
|
||||
int rvol = 1;
|
||||
for(int d=0;d<_ndimension;d++){
|
||||
rvol=rvol * _rdimensions[d];
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
rvol = rvol * _rdimensions[d];
|
||||
}
|
||||
_checker_board.resize(rvol);
|
||||
for(int osite=0;osite<_osites;osite++){
|
||||
_checker_board[osite] = CheckerBoardFromOindex (osite);
|
||||
for (int osite = 0; osite < _osites; osite++)
|
||||
{
|
||||
_checker_board[osite] = CheckerBoardFromOindex(osite);
|
||||
}
|
||||
|
||||
};
|
||||
protected:
|
||||
|
||||
protected:
|
||||
virtual int oIndex(std::vector<int> &coor)
|
||||
{
|
||||
int idx=0;
|
||||
for(int d=0;d<_ndimension;d++) {
|
||||
if( d==_checker_dim ) {
|
||||
idx+=_ostride[d]*((coor[d]/2)%_rdimensions[d]);
|
||||
} else {
|
||||
idx+=_ostride[d]*(coor[d]%_rdimensions[d]);
|
||||
}
|
||||
int idx = 0;
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
if (d == _checker_dim)
|
||||
{
|
||||
idx += _ostride[d] * ((coor[d] / 2) % _rdimensions[d]);
|
||||
}
|
||||
else
|
||||
{
|
||||
idx += _ostride[d] * (coor[d] % _rdimensions[d]);
|
||||
}
|
||||
}
|
||||
return idx;
|
||||
};
|
||||
|
||||
|
||||
virtual int iIndex(std::vector<int> &lcoor)
|
||||
{
|
||||
int idx=0;
|
||||
for(int d=0;d<_ndimension;d++) {
|
||||
if( d==_checker_dim ) {
|
||||
idx+=_istride[d]*(lcoor[d]/(2*_rdimensions[d]));
|
||||
} else {
|
||||
idx+=_istride[d]*(lcoor[d]/_rdimensions[d]);
|
||||
}
|
||||
}
|
||||
return idx;
|
||||
int idx = 0;
|
||||
for (int d = 0; d < _ndimension; d++)
|
||||
{
|
||||
if (d == _checker_dim)
|
||||
{
|
||||
idx += _istride[d] * (lcoor[d] / (2 * _rdimensions[d]));
|
||||
}
|
||||
else
|
||||
{
|
||||
idx += _istride[d] * (lcoor[d] / _rdimensions[d]);
|
||||
}
|
||||
}
|
||||
return idx;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -26,6 +26,10 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/GridCore.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <limits.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
@@ -33,8 +37,11 @@ namespace Grid {
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////
|
||||
void * CartesianCommunicator::ShmCommBuf;
|
||||
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 128*1024*1024;
|
||||
CartesianCommunicator::CommunicatorPolicy_t CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
|
||||
uint64_t CartesianCommunicator::MAX_MPI_SHM_BYTES = 1024LL*1024LL*1024LL;
|
||||
CartesianCommunicator::CommunicatorPolicy_t
|
||||
CartesianCommunicator::CommunicatorPolicy= CartesianCommunicator::CommunicatorPolicyConcurrent;
|
||||
int CartesianCommunicator::nCommThreads = -1;
|
||||
int CartesianCommunicator::Hugepages = 0;
|
||||
|
||||
/////////////////////////////////
|
||||
// Alloc, free shmem region
|
||||
@@ -89,25 +96,43 @@ void CartesianCommunicator::GlobalSumVector(ComplexD *c,int N)
|
||||
GlobalSumVector((double *)c,2*N);
|
||||
}
|
||||
|
||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPI3L)
|
||||
#if !defined( GRID_COMMS_MPI3)
|
||||
|
||||
int CartesianCommunicator::NodeCount(void) { return ProcessorCount();};
|
||||
int CartesianCommunicator::RankCount(void) { return ProcessorCount();};
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes)
|
||||
#endif
|
||||
#if !defined( GRID_COMMS_MPI3) && !defined (GRID_COMMS_MPIT)
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes, int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
// Discard the "dir"
|
||||
SendToRecvFromBegin (list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||
SendToRecvFromComplete(list);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes, int dir)
|
||||
{
|
||||
// Discard the "dir"
|
||||
SendToRecvFromBegin(list,xmit,xmit_to_rank,recv,recv_from_rank,bytes);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
|
||||
{
|
||||
SendToRecvFromComplete(waitall);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined( GRID_COMMS_MPI3)
|
||||
|
||||
void CartesianCommunicator::StencilBarrier(void){};
|
||||
|
||||
commVector<uint8_t> CartesianCommunicator::ShmBufStorageVector;
|
||||
@@ -121,8 +146,25 @@ void *CartesianCommunicator::ShmBufferTranslate(int rank,void * local_p) {
|
||||
return NULL;
|
||||
}
|
||||
void CartesianCommunicator::ShmInitGeneric(void){
|
||||
#if 1
|
||||
|
||||
int mmap_flag = MAP_SHARED | MAP_ANONYMOUS;
|
||||
#ifdef MAP_HUGETLB
|
||||
if ( Hugepages ) mmap_flag |= MAP_HUGETLB;
|
||||
#endif
|
||||
ShmCommBuf =(void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag, -1, 0);
|
||||
if (ShmCommBuf == (void *)MAP_FAILED) {
|
||||
perror("mmap failed ");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
#ifdef MADV_HUGEPAGE
|
||||
if (!Hugepages ) madvise(ShmCommBuf,MAX_MPI_SHM_BYTES,MADV_HUGEPAGE);
|
||||
#endif
|
||||
#else
|
||||
ShmBufStorageVector.resize(MAX_MPI_SHM_BYTES);
|
||||
ShmCommBuf=(void *)&ShmBufStorageVector[0];
|
||||
#endif
|
||||
bzero(ShmCommBuf,MAX_MPI_SHM_BYTES);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -38,7 +38,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#ifdef GRID_COMMS_MPI3
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#ifdef GRID_COMMS_MPI3L
|
||||
#ifdef GRID_COMMS_MPIT
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
@@ -50,12 +50,24 @@ namespace Grid {
|
||||
class CartesianCommunicator {
|
||||
public:
|
||||
|
||||
// 65536 ranks per node adequate for now
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Isend/Irecv/Wait, or Sendrecv blocking
|
||||
////////////////////////////////////////////
|
||||
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
|
||||
static CommunicatorPolicy_t CommunicatorPolicy;
|
||||
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
|
||||
|
||||
///////////////////////////////////////////
|
||||
// Up to 65536 ranks per node adequate for now
|
||||
// 128MB shared memory for comms enought for 48^4 local vol comms
|
||||
// Give external control (command line override?) of this
|
||||
|
||||
static const int MAXLOG2RANKSPERNODE = 16;
|
||||
static uint64_t MAX_MPI_SHM_BYTES;
|
||||
///////////////////////////////////////////
|
||||
static const int MAXLOG2RANKSPERNODE = 16;
|
||||
static uint64_t MAX_MPI_SHM_BYTES;
|
||||
static int nCommThreads;
|
||||
// use explicit huge pages
|
||||
static int Hugepages;
|
||||
|
||||
// Communicator should know nothing of the physics grid, only processor grid.
|
||||
int _Nprocessors; // How many in all
|
||||
@@ -64,15 +76,19 @@ class CartesianCommunicator {
|
||||
std::vector<int> _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPI3L)
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT)
|
||||
static MPI_Comm communicator_world;
|
||||
MPI_Comm communicator;
|
||||
|
||||
MPI_Comm communicator;
|
||||
std::vector<MPI_Comm> communicator_halo;
|
||||
|
||||
typedef MPI_Request CommsRequest_t;
|
||||
|
||||
#else
|
||||
typedef int CommsRequest_t;
|
||||
#endif
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// Helper functionality for SHM Windows common to all other impls
|
||||
////////////////////////////////////////////////////////////////////
|
||||
@@ -118,11 +134,7 @@ class CartesianCommunicator {
|
||||
/////////////////////////////////
|
||||
static void * ShmCommBuf;
|
||||
|
||||
// Isend/Irecv/Wait, or Sendrecv blocking
|
||||
enum CommunicatorPolicy_t { CommunicatorPolicyConcurrent, CommunicatorPolicySequential };
|
||||
static CommunicatorPolicy_t CommunicatorPolicy;
|
||||
static void SetCommunicatorPolicy(CommunicatorPolicy_t policy ) { CommunicatorPolicy = policy; }
|
||||
|
||||
|
||||
size_t heap_top;
|
||||
size_t heap_bytes;
|
||||
|
||||
@@ -225,14 +237,21 @@ class CartesianCommunicator {
|
||||
|
||||
void SendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
|
||||
double StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes,int dir);
|
||||
|
||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes);
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes,int dir);
|
||||
|
||||
void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall);
|
||||
|
||||
void StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int i);
|
||||
void StencilBarrier(void);
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
@@ -37,11 +37,12 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <sys/ipc.h>
|
||||
#include <sys/shm.h>
|
||||
#include <sys/mman.h>
|
||||
//#include <zlib.h>
|
||||
#ifndef SHM_HUGETLB
|
||||
#define SHM_HUGETLB 04000
|
||||
#include <zlib.h>
|
||||
#ifdef HAVE_NUMAIF_H
|
||||
#include <numaif.h>
|
||||
#endif
|
||||
|
||||
|
||||
namespace Grid {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -197,7 +198,46 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
ShmCommBuf = 0;
|
||||
ShmCommBufs.resize(ShmSize);
|
||||
|
||||
#if 1
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Hugetlbf and others map filesystems as mappable huge pages
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_MPI3_SHMMMAP
|
||||
char shm_name [NAME_MAX];
|
||||
for(int r=0;r<ShmSize;r++){
|
||||
|
||||
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||
sprintf(shm_name,GRID_SHM_PATH "/Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||
//sprintf(shm_name,"/var/lib/hugetlbfs/group/wheel/pagesize-2MB/" "Grid_mpi3_shm_%d_%d",GroupRank,r);
|
||||
// printf("Opening file %s \n",shm_name);
|
||||
int fd=open(shm_name,O_RDWR|O_CREAT,0666);
|
||||
if ( fd == -1) {
|
||||
printf("open %s failed\n",shm_name);
|
||||
perror("open hugetlbfs");
|
||||
exit(0);
|
||||
}
|
||||
int mmap_flag = MAP_SHARED ;
|
||||
#ifdef MAP_POPULATE
|
||||
mmap_flag|=MAP_POPULATE;
|
||||
#endif
|
||||
#ifdef MAP_HUGETLB
|
||||
if ( Hugepages ) mmap_flag |= MAP_HUGETLB;
|
||||
#endif
|
||||
void *ptr = (void *) mmap(NULL, MAX_MPI_SHM_BYTES, PROT_READ | PROT_WRITE, mmap_flag,fd, 0);
|
||||
if ( ptr == (void *)MAP_FAILED ) {
|
||||
printf("mmap %s failed\n",shm_name);
|
||||
perror("failed mmap"); assert(0);
|
||||
}
|
||||
assert(((uint64_t)ptr&0x3F)==0);
|
||||
ShmCommBufs[r] =ptr;
|
||||
|
||||
}
|
||||
#endif
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// POSIX SHMOPEN ; as far as I know Linux does not allow EXPLICIT HugePages with this case
|
||||
// tmpfs (Larry Meadows says) does not support explicit huge page, and this is used for
|
||||
// the posix shm virtual file system
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_MPI3_SHMOPEN
|
||||
char shm_name [NAME_MAX];
|
||||
if ( ShmRank == 0 ) {
|
||||
for(int r=0;r<ShmSize;r++){
|
||||
@@ -210,11 +250,39 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
int fd=shm_open(shm_name,O_RDWR|O_CREAT,0666);
|
||||
if ( fd < 0 ) { perror("failed shm_open"); assert(0); }
|
||||
ftruncate(fd, size);
|
||||
|
||||
int mmap_flag = MAP_SHARED;
|
||||
#ifdef MAP_POPULATE
|
||||
mmap_flag |= MAP_POPULATE;
|
||||
#endif
|
||||
#ifdef MAP_HUGETLB
|
||||
if (Hugepages) mmap_flag |= MAP_HUGETLB;
|
||||
#endif
|
||||
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, mmap_flag, fd, 0);
|
||||
|
||||
void * ptr = mmap(NULL,size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
|
||||
if ( ptr == MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||
if ( ptr == (void * )MAP_FAILED ) { perror("failed mmap"); assert(0); }
|
||||
assert(((uint64_t)ptr&0x3F)==0);
|
||||
ShmCommBufs[r] =ptr;
|
||||
|
||||
// Experiments; Experiments; Try to force numa domain on the shm segment if we have numaif.h
|
||||
#if 0
|
||||
//#ifdef HAVE_NUMAIF_H
|
||||
int status;
|
||||
int flags=MPOL_MF_MOVE;
|
||||
#ifdef KNL
|
||||
int nodes=1; // numa domain == MCDRAM
|
||||
// Find out if in SNC2,SNC4 mode ?
|
||||
#else
|
||||
int nodes=r; // numa domain == MPI ID
|
||||
#endif
|
||||
unsigned long count=1;
|
||||
for(uint64_t page=0;page<size;page+=4096){
|
||||
void *pages = (void *) ( page + (uint64_t)ptr );
|
||||
uint64_t *cow_it = (uint64_t *)pages; *cow_it = 1;
|
||||
ierr= move_pages(0,count, &pages,&nodes,&status,flags);
|
||||
if (ierr && (page==0)) perror("numa relocate command failed");
|
||||
}
|
||||
#endif
|
||||
ShmCommBufs[r] =ptr;
|
||||
|
||||
}
|
||||
}
|
||||
@@ -236,21 +304,32 @@ void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
ShmCommBufs[r] =ptr;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
#endif
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// SHMGET SHMAT and SHM_HUGETLB flag
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#ifdef GRID_MPI3_SHMGET
|
||||
std::vector<int> shmids(ShmSize);
|
||||
|
||||
if ( ShmRank == 0 ) {
|
||||
for(int r=0;r<ShmSize;r++){
|
||||
size_t size = CartesianCommunicator::MAX_MPI_SHM_BYTES;
|
||||
key_t key = 0x4545 + r;
|
||||
if ((shmids[r]= shmget(key,size, SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
|
||||
key_t key = IPC_PRIVATE;
|
||||
int flags = IPC_CREAT | SHM_R | SHM_W;
|
||||
#ifdef SHM_HUGETLB
|
||||
if (Hugepages) flags|=SHM_HUGETLB;
|
||||
#endif
|
||||
if ((shmids[r]= shmget(key,size, flags)) ==-1) {
|
||||
int errsv = errno;
|
||||
printf("Errno %d\n",errsv);
|
||||
printf("key %d\n",key);
|
||||
printf("size %lld\n",size);
|
||||
printf("flags %d\n",flags);
|
||||
perror("shmget");
|
||||
exit(1);
|
||||
} else {
|
||||
printf("shmid: 0x%x\n", shmids[r]);
|
||||
}
|
||||
printf("shmid: 0x%x\n", shmids[r]);
|
||||
}
|
||||
}
|
||||
MPI_Barrier(ShmComm);
|
||||
@@ -384,8 +463,14 @@ CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
int ierr;
|
||||
communicator=communicator_world;
|
||||
|
||||
_ndimension = processors.size();
|
||||
|
||||
communicator_halo.resize (2*_ndimension);
|
||||
for(int i=0;i<_ndimension*2;i++){
|
||||
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Assert power of two shm_size.
|
||||
////////////////////////////////////////////////////////////////
|
||||
@@ -608,13 +693,27 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
||||
}
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes,int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
||||
StencilSendToRecvFromComplete(list,dir);
|
||||
return offbytes;
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes,int dir)
|
||||
{
|
||||
assert(dir < communicator_halo.size());
|
||||
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
@@ -633,26 +732,26 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
gfrom = MPI_UNDEFINED;
|
||||
#endif
|
||||
if ( gfrom ==MPI_UNDEFINED) {
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator_halo[dir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
|
||||
if ( gdest == MPI_UNDEFINED ) {
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator_halo[dir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
this->StencilSendToRecvFromComplete(list);
|
||||
this->StencilSendToRecvFromComplete(list,dir);
|
||||
}
|
||||
|
||||
return off_node_bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall)
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
|
||||
{
|
||||
SendToRecvFromComplete(waitall);
|
||||
}
|
||||
|
||||
286
lib/communicator/Communicator_mpit.cc
Normal file
286
lib/communicator/Communicator_mpit.cc
Normal file
@@ -0,0 +1,286 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/communicator/Communicator_mpi.cc
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/GridCore.h>
|
||||
#include <Grid/GridQCDcore.h>
|
||||
#include <Grid/qcd/action/ActionCore.h>
|
||||
#include <mpi.h>
|
||||
|
||||
namespace Grid {
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Info that is setup once and indept of cartesian layout
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
MPI_Comm CartesianCommunicator::communicator_world;
|
||||
|
||||
// Should error check all MPI calls.
|
||||
void CartesianCommunicator::Init(int *argc, char ***argv) {
|
||||
int flag;
|
||||
int provided;
|
||||
MPI_Initialized(&flag); // needed to coexist with other libs apparently
|
||||
if ( !flag ) {
|
||||
MPI_Init_thread(argc,argv,MPI_THREAD_MULTIPLE,&provided);
|
||||
if ( provided != MPI_THREAD_MULTIPLE ) {
|
||||
QCD::WilsonKernelsStatic::Comms = QCD::WilsonKernelsStatic::CommsThenCompute;
|
||||
}
|
||||
}
|
||||
MPI_Comm_dup (MPI_COMM_WORLD,&communicator_world);
|
||||
ShmInitGeneric();
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const std::vector<int> &processors)
|
||||
{
|
||||
_ndimension = processors.size();
|
||||
std::vector<int> periodic(_ndimension,1);
|
||||
|
||||
_Nprocessors=1;
|
||||
_processors = processors;
|
||||
_processor_coor.resize(_ndimension);
|
||||
|
||||
MPI_Cart_create(communicator_world, _ndimension,&_processors[0],&periodic[0],1,&communicator);
|
||||
MPI_Comm_rank(communicator,&_processor);
|
||||
MPI_Cart_coords(communicator,_processor,_ndimension,&_processor_coor[0]);
|
||||
|
||||
for(int i=0;i<_ndimension;i++){
|
||||
_Nprocessors*=_processors[i];
|
||||
}
|
||||
|
||||
communicator_halo.resize (2*_ndimension);
|
||||
for(int i=0;i<_ndimension*2;i++){
|
||||
MPI_Comm_dup(communicator,&communicator_halo[i]);
|
||||
}
|
||||
|
||||
int Size;
|
||||
MPI_Comm_size(communicator,&Size);
|
||||
|
||||
assert(Size==_Nprocessors);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint32_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT32_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalXOR(uint64_t &u){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&u,1,MPI_UINT64_T,MPI_BXOR,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(float &f){
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,&f,1,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(float *f,int N)
|
||||
{
|
||||
int ierr=MPI_Allreduce(MPI_IN_PLACE,f,N,MPI_FLOAT,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSum(double &d)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,&d,1,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
||||
{
|
||||
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest)
|
||||
{
|
||||
int ierr=MPI_Cart_shift(communicator,dim,shift,&source,&dest);
|
||||
assert(ierr==0);
|
||||
}
|
||||
int CartesianCommunicator::RankFromProcessorCoor(std::vector<int> &coor)
|
||||
{
|
||||
int rank;
|
||||
int ierr=MPI_Cart_rank (communicator, &coor[0], &rank);
|
||||
assert(ierr==0);
|
||||
return rank;
|
||||
}
|
||||
void CartesianCommunicator::ProcessorCoorFromRank(int rank, std::vector<int> &coor)
|
||||
{
|
||||
coor.resize(_ndimension);
|
||||
int ierr=MPI_Cart_coords (communicator, rank, _ndimension,&coor[0]);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
std::vector<CommsRequest_t> reqs(0);
|
||||
SendToRecvFromBegin(reqs,xmit,dest,recv,from,bytes);
|
||||
SendToRecvFromComplete(reqs);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::SendRecvPacket(void *xmit,
|
||||
void *recv,
|
||||
int sender,
|
||||
int receiver,
|
||||
int bytes)
|
||||
{
|
||||
MPI_Status stat;
|
||||
assert(sender != receiver);
|
||||
int tag = sender;
|
||||
if ( _processor == sender ) {
|
||||
MPI_Send(xmit, bytes, MPI_CHAR,receiver,tag,communicator);
|
||||
}
|
||||
if ( _processor == receiver ) {
|
||||
MPI_Recv(recv, bytes, MPI_CHAR,sender,tag,communicator,&stat);
|
||||
}
|
||||
}
|
||||
|
||||
// Basic Halo comms primitive
|
||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
void *recv,
|
||||
int from,
|
||||
int bytes)
|
||||
{
|
||||
int myrank = _processor;
|
||||
int ierr;
|
||||
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||
MPI_Request xrq;
|
||||
MPI_Request rrq;
|
||||
|
||||
ierr =MPI_Irecv(recv, bytes, MPI_CHAR,from,from,communicator,&rrq);
|
||||
ierr|=MPI_Isend(xmit, bytes, MPI_CHAR,dest,_processor,communicator,&xrq);
|
||||
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
list.push_back(rrq);
|
||||
} else {
|
||||
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||
ierr=MPI_Sendrecv(xmit,bytes,MPI_CHAR,dest,myrank,
|
||||
recv,bytes,MPI_CHAR,from, from,
|
||||
communicator,MPI_STATUS_IGNORE);
|
||||
assert(ierr==0);
|
||||
}
|
||||
}
|
||||
void CartesianCommunicator::SendToRecvFromComplete(std::vector<CommsRequest_t> &list)
|
||||
{
|
||||
if ( CommunicatorPolicy == CommunicatorPolicyConcurrent ) {
|
||||
int nreq=list.size();
|
||||
std::vector<MPI_Status> status(nreq);
|
||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||
assert(ierr==0);
|
||||
}
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Barrier(void)
|
||||
{
|
||||
int ierr = MPI_Barrier(communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
void CartesianCommunicator::Broadcast(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr=MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator);
|
||||
assert(ierr==0);
|
||||
}
|
||||
///////////////////////////////////////////////////////
|
||||
// Should only be used prior to Grid Init finished.
|
||||
// Check for this?
|
||||
///////////////////////////////////////////////////////
|
||||
int CartesianCommunicator::RankWorld(void){
|
||||
int r;
|
||||
MPI_Comm_rank(communicator_world,&r);
|
||||
return r;
|
||||
}
|
||||
void CartesianCommunicator::BroadcastWorld(int root,void* data, int bytes)
|
||||
{
|
||||
int ierr= MPI_Bcast(data,
|
||||
bytes,
|
||||
MPI_BYTE,
|
||||
root,
|
||||
communicator_world);
|
||||
assert(ierr==0);
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int myrank = _processor;
|
||||
int ierr;
|
||||
assert(dir < communicator_halo.size());
|
||||
|
||||
// std::cout << " sending on communicator "<<dir<<" " <<communicator_halo[dir]<<std::endl;
|
||||
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||
MPI_Request req[2];
|
||||
MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[dir],&req[1]);
|
||||
MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank ,myrank , communicator_halo[dir],&req[0]);
|
||||
|
||||
list.push_back(req[0]);
|
||||
list.push_back(req[1]);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &waitall,int dir)
|
||||
{
|
||||
int nreq=waitall.size();
|
||||
MPI_Waitall(nreq, &waitall[0], MPI_STATUSES_IGNORE);
|
||||
};
|
||||
double CartesianCommunicator::StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int myrank = _processor;
|
||||
int ierr;
|
||||
assert(dir < communicator_halo.size());
|
||||
|
||||
// std::cout << " sending on communicator "<<dir<<" " <<communicator_halo[dir]<<std::endl;
|
||||
// Give the CPU to MPI immediately; can use threads to overlap optionally
|
||||
MPI_Request req[2];
|
||||
MPI_Irecv(recv,bytes,MPI_CHAR,recv_from_rank,recv_from_rank, communicator_halo[dir],&req[1]);
|
||||
MPI_Isend(xmit,bytes,MPI_CHAR,xmit_to_rank ,myrank , communicator_halo[dir],&req[0]);
|
||||
MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
|
||||
return 2.0*bytes;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/cshift/Cshift_mpi.h>
|
||||
#endif
|
||||
|
||||
#ifdef GRID_COMMS_MPI3L
|
||||
#ifdef GRID_COMMS_MPIT
|
||||
#include <Grid/cshift/Cshift_mpi.h>
|
||||
#endif
|
||||
|
||||
|
||||
16252
lib/json/json.hpp
16252
lib/json/json.hpp
File diff suppressed because it is too large
Load Diff
@@ -369,6 +369,7 @@ static void sliceMaddVector(Lattice<vobj> &R,std::vector<RealD> &a,const Lattice
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
inline GridBase *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Orthog)
|
||||
{
|
||||
int NN = BlockSolverGrid->_ndimension;
|
||||
@@ -387,6 +388,7 @@ inline GridBase *makeSubSliceGrid(const GridBase *BlockSolverGrid,int Or
|
||||
}
|
||||
return (GridBase *)new GridCartesian(latt_phys,simd_phys,mpi_phys);
|
||||
}
|
||||
*/
|
||||
|
||||
template<class vobj>
|
||||
static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<vobj> &X,const Lattice<vobj> &Y,int Orthog,RealD scale=1.0)
|
||||
@@ -398,14 +400,15 @@ static void sliceMaddMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice
|
||||
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
||||
|
||||
GridBase *FullGrid = X._grid;
|
||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
|
||||
Lattice<vobj> Xslice(SliceGrid);
|
||||
Lattice<vobj> Rslice(SliceGrid);
|
||||
// Lattice<vobj> Xslice(SliceGrid);
|
||||
// Lattice<vobj> Rslice(SliceGrid);
|
||||
|
||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||
int nh = FullGrid->_ndimension;
|
||||
int nl = SliceGrid->_ndimension;
|
||||
// int nl = SliceGrid->_ndimension;
|
||||
int nl = nh-1;
|
||||
|
||||
//FIXME package in a convenient iterator
|
||||
//Should loop over a plane orthogonal to direction "Orthog"
|
||||
@@ -448,14 +451,14 @@ static void sliceMulMatrix (Lattice<vobj> &R,Eigen::MatrixXcd &aa,const Lattice<
|
||||
int Nblock = X._grid->GlobalDimensions()[Orthog];
|
||||
|
||||
GridBase *FullGrid = X._grid;
|
||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
|
||||
Lattice<vobj> Xslice(SliceGrid);
|
||||
Lattice<vobj> Rslice(SliceGrid);
|
||||
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
// Lattice<vobj> Xslice(SliceGrid);
|
||||
// Lattice<vobj> Rslice(SliceGrid);
|
||||
|
||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||
int nh = FullGrid->_ndimension;
|
||||
int nl = SliceGrid->_ndimension;
|
||||
// int nl = SliceGrid->_ndimension;
|
||||
int nl=1;
|
||||
|
||||
//FIXME package in a convenient iterator
|
||||
//Should loop over a plane orthogonal to direction "Orthog"
|
||||
@@ -498,18 +501,19 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
||||
typedef typename vobj::vector_type vector_type;
|
||||
|
||||
GridBase *FullGrid = lhs._grid;
|
||||
GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
// GridBase *SliceGrid = makeSubSliceGrid(FullGrid,Orthog);
|
||||
|
||||
int Nblock = FullGrid->GlobalDimensions()[Orthog];
|
||||
|
||||
Lattice<vobj> Lslice(SliceGrid);
|
||||
Lattice<vobj> Rslice(SliceGrid);
|
||||
// Lattice<vobj> Lslice(SliceGrid);
|
||||
// Lattice<vobj> Rslice(SliceGrid);
|
||||
|
||||
mat = Eigen::MatrixXcd::Zero(Nblock,Nblock);
|
||||
|
||||
assert( FullGrid->_simd_layout[Orthog]==1);
|
||||
int nh = FullGrid->_ndimension;
|
||||
int nl = SliceGrid->_ndimension;
|
||||
// int nl = SliceGrid->_ndimension;
|
||||
int nl = nh-1;
|
||||
|
||||
//FIXME package in a convenient iterator
|
||||
//Should loop over a plane orthogonal to direction "Orthog"
|
||||
@@ -549,6 +553,14 @@ static void sliceInnerProductMatrix( Eigen::MatrixXcd &mat, const Lattice<vobj>
|
||||
mat += mat_thread;
|
||||
}
|
||||
}
|
||||
|
||||
for(int i=0;i<Nblock;i++){
|
||||
for(int j=0;j<Nblock;j++){
|
||||
ComplexD sum = mat(i,j);
|
||||
FullGrid->GlobalSum(sum);
|
||||
mat(i,j)=sum;
|
||||
}}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
////////////////////////////////////////////////////////////
|
||||
void Grid_quiesce_nodes(void) {
|
||||
int me = 0;
|
||||
#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPI3L)
|
||||
#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPIT)
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &me);
|
||||
#endif
|
||||
#ifdef GRID_COMMS_SHMEM
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
#ifndef GRID_BINARY_IO_H
|
||||
#define GRID_BINARY_IO_H
|
||||
|
||||
#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3)
|
||||
#if defined(GRID_COMMS_MPI) || defined(GRID_COMMS_MPI3) || defined(GRID_COMMS_MPIT)
|
||||
#define USE_MPI_IO
|
||||
#else
|
||||
#undef USE_MPI_IO
|
||||
@@ -98,35 +98,39 @@ class BinaryIO {
|
||||
|
||||
NerscChecksum(grid,scalardata,nersc_csum);
|
||||
}
|
||||
|
||||
template<class fobj> static inline void NerscChecksum(GridBase *grid,std::vector<fobj> &fbuf,uint32_t &nersc_csum)
|
||||
|
||||
template <class fobj>
|
||||
static inline void NerscChecksum(GridBase *grid, std::vector<fobj> &fbuf, uint32_t &nersc_csum)
|
||||
{
|
||||
const uint64_t size32 = sizeof(fobj)/sizeof(uint32_t);
|
||||
const uint64_t size32 = sizeof(fobj) / sizeof(uint32_t);
|
||||
|
||||
|
||||
uint64_t lsites =grid->lSites();
|
||||
if (fbuf.size()==1) {
|
||||
lsites=1;
|
||||
uint64_t lsites = grid->lSites();
|
||||
if (fbuf.size() == 1)
|
||||
{
|
||||
lsites = 1;
|
||||
}
|
||||
|
||||
#pragma omp parallel
|
||||
{
|
||||
uint32_t nersc_csum_thr=0;
|
||||
#pragma omp parallel
|
||||
{
|
||||
uint32_t nersc_csum_thr = 0;
|
||||
|
||||
#pragma omp for
|
||||
for(uint64_t local_site=0;local_site<lsites;local_site++){
|
||||
uint32_t * site_buf = (uint32_t *)&fbuf[local_site];
|
||||
for(uint64_t j=0;j<size32;j++){
|
||||
nersc_csum_thr=nersc_csum_thr+site_buf[j];
|
||||
}
|
||||
#pragma omp for
|
||||
for (uint64_t local_site = 0; local_site < lsites; local_site++)
|
||||
{
|
||||
uint32_t *site_buf = (uint32_t *)&fbuf[local_site];
|
||||
for (uint64_t j = 0; j < size32; j++)
|
||||
{
|
||||
nersc_csum_thr = nersc_csum_thr + site_buf[j];
|
||||
}
|
||||
}
|
||||
|
||||
#pragma omp critical
|
||||
#pragma omp critical
|
||||
{
|
||||
nersc_csum += nersc_csum_thr;
|
||||
nersc_csum += nersc_csum_thr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class fobj> static inline void ScidacChecksum(GridBase *grid,std::vector<fobj> &fbuf,uint32_t &scidac_csuma,uint32_t &scidac_csumb)
|
||||
{
|
||||
const uint64_t size32 = sizeof(fobj)/sizeof(uint32_t);
|
||||
@@ -266,7 +270,7 @@ class BinaryIO {
|
||||
grid->Barrier();
|
||||
GridStopWatch timer;
|
||||
GridStopWatch bstimer;
|
||||
|
||||
|
||||
nersc_csum=0;
|
||||
scidac_csuma=0;
|
||||
scidac_csumb=0;
|
||||
@@ -362,18 +366,22 @@ class BinaryIO {
|
||||
#else
|
||||
assert(0);
|
||||
#endif
|
||||
} else {
|
||||
std::cout<< GridLogMessage<< "C++ read I/O "<< file<<" : "
|
||||
<< iodata.size()*sizeof(fobj)<<" bytes"<<std::endl;
|
||||
std::ifstream fin;
|
||||
fin.open(file,std::ios::binary|std::ios::in);
|
||||
if ( control & BINARYIO_MASTER_APPEND ) {
|
||||
fin.seekg(-sizeof(fobj),fin.end);
|
||||
} else {
|
||||
fin.seekg(offset+myrank*lsites*sizeof(fobj));
|
||||
}
|
||||
fin.read((char *)&iodata[0],iodata.size()*sizeof(fobj));assert( fin.fail()==0);
|
||||
fin.close();
|
||||
} else {
|
||||
std::cout << GridLogMessage << "C++ read I/O " << file << " : "
|
||||
<< iodata.size() * sizeof(fobj) << " bytes" << std::endl;
|
||||
std::ifstream fin;
|
||||
fin.open(file, std::ios::binary | std::ios::in);
|
||||
if (control & BINARYIO_MASTER_APPEND)
|
||||
{
|
||||
fin.seekg(-sizeof(fobj), fin.end);
|
||||
}
|
||||
else
|
||||
{
|
||||
fin.seekg(offset + myrank * lsites * sizeof(fobj));
|
||||
}
|
||||
fin.read((char *)&iodata[0], iodata.size() * sizeof(fobj));
|
||||
assert(fin.fail() == 0);
|
||||
fin.close();
|
||||
}
|
||||
timer.Stop();
|
||||
|
||||
@@ -405,30 +413,78 @@ class BinaryIO {
|
||||
timer.Start();
|
||||
if ( (control & BINARYIO_LEXICOGRAPHIC) && (nrank > 1) ) {
|
||||
#ifdef USE_MPI_IO
|
||||
std::cout<< GridLogMessage<< "MPI write I/O "<< file<< std::endl;
|
||||
ierr=MPI_File_open(grid->communicator,(char *) file.c_str(), MPI_MODE_RDWR|MPI_MODE_CREATE,MPI_INFO_NULL, &fh); assert(ierr==0);
|
||||
ierr=MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL); assert(ierr==0);
|
||||
ierr=MPI_File_write_all(fh, &iodata[0], 1, localArray, &status); assert(ierr==0);
|
||||
MPI_File_close(&fh);
|
||||
MPI_Type_free(&fileArray);
|
||||
MPI_Type_free(&localArray);
|
||||
std::cout << GridLogMessage << "MPI write I/O " << file << std::endl;
|
||||
ierr = MPI_File_open(grid->communicator, (char *)file.c_str(), MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh);
|
||||
std::cout << GridLogMessage << "Checking for errors" << std::endl;
|
||||
if (ierr != MPI_SUCCESS)
|
||||
{
|
||||
char error_string[BUFSIZ];
|
||||
int length_of_error_string, error_class;
|
||||
|
||||
MPI_Error_class(ierr, &error_class);
|
||||
MPI_Error_string(error_class, error_string, &length_of_error_string);
|
||||
fprintf(stderr, "%3d: %s\n", myrank, error_string);
|
||||
MPI_Error_string(ierr, error_string, &length_of_error_string);
|
||||
fprintf(stderr, "%3d: %s\n", myrank, error_string);
|
||||
MPI_Abort(MPI_COMM_WORLD, 1); //assert(ierr == 0);
|
||||
}
|
||||
|
||||
std::cout << GridLogDebug << "MPI read I/O set view " << file << std::endl;
|
||||
ierr = MPI_File_set_view(fh, disp, mpiObject, fileArray, "native", MPI_INFO_NULL);
|
||||
assert(ierr == 0);
|
||||
|
||||
std::cout << GridLogDebug << "MPI read I/O write all " << file << std::endl;
|
||||
ierr = MPI_File_write_all(fh, &iodata[0], 1, localArray, &status);
|
||||
assert(ierr == 0);
|
||||
|
||||
MPI_File_close(&fh);
|
||||
MPI_Type_free(&fileArray);
|
||||
MPI_Type_free(&localArray);
|
||||
#else
|
||||
assert(0);
|
||||
#endif
|
||||
} else {
|
||||
std::ofstream fout; fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
|
||||
std::cout<< GridLogMessage<< "C++ write I/O "<< file<<" : "
|
||||
<< iodata.size()*sizeof(fobj)<<" bytes"<<std::endl;
|
||||
if ( control & BINARYIO_MASTER_APPEND ) {
|
||||
|
||||
std::ofstream fout;
|
||||
fout.exceptions ( std::fstream::failbit | std::fstream::badbit );
|
||||
try {
|
||||
fout.open(file,std::ios::binary|std::ios::out|std::ios::in);
|
||||
} catch (const std::fstream::failure& exc) {
|
||||
std::cout << GridLogError << "Error in opening the file " << file << " for output" <<std::endl;
|
||||
std::cout << GridLogError << "Exception description: " << exc.what() << std::endl;
|
||||
std::cout << GridLogError << "Probable cause: wrong path, inaccessible location "<< std::endl;
|
||||
#ifdef USE_MPI_IO
|
||||
MPI_Abort(MPI_COMM_WORLD,1);
|
||||
#else
|
||||
exit(1);
|
||||
#endif
|
||||
}
|
||||
std::cout << GridLogMessage<< "C++ write I/O "<< file<<" : "
|
||||
<< iodata.size()*sizeof(fobj)<<" bytes"<<std::endl;
|
||||
|
||||
if ( control & BINARYIO_MASTER_APPEND ) {
|
||||
fout.seekp(0,fout.end);
|
||||
} else {
|
||||
fout.seekp(offset+myrank*lsites*sizeof(fobj));
|
||||
}
|
||||
fout.write((char *)&iodata[0],iodata.size()*sizeof(fobj));assert( fout.fail()==0);
|
||||
|
||||
try {
|
||||
fout.write((char *)&iodata[0],iodata.size()*sizeof(fobj));//assert( fout.fail()==0);
|
||||
}
|
||||
catch (const std::fstream::failure& exc) {
|
||||
std::cout << "Exception in writing file " << file << std::endl;
|
||||
std::cout << GridLogError << "Exception description: "<< exc.what() << std::endl;
|
||||
#ifdef USE_MPI_IO
|
||||
MPI_Abort(MPI_COMM_WORLD,1);
|
||||
#else
|
||||
exit(1);
|
||||
#endif
|
||||
}
|
||||
|
||||
fout.close();
|
||||
}
|
||||
timer.Stop();
|
||||
}
|
||||
}
|
||||
timer.Stop();
|
||||
}
|
||||
|
||||
std::cout<<GridLogMessage<<"IOobject: ";
|
||||
if ( control & BINARYIO_READ) std::cout << " read ";
|
||||
@@ -442,11 +498,14 @@ class BinaryIO {
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Safety check
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
grid->Barrier();
|
||||
grid->GlobalSum(nersc_csum);
|
||||
grid->GlobalXOR(scidac_csuma);
|
||||
grid->GlobalXOR(scidac_csumb);
|
||||
grid->Barrier();
|
||||
// if the data size is 1 we do not want to sum over the MPI ranks
|
||||
if (iodata.size() != 1){
|
||||
grid->Barrier();
|
||||
grid->GlobalSum(nersc_csum);
|
||||
grid->GlobalXOR(scidac_csuma);
|
||||
grid->GlobalXOR(scidac_csumb);
|
||||
grid->Barrier();
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
@@ -546,9 +605,9 @@ class BinaryIO {
|
||||
int gsites = grid->gSites();
|
||||
int lsites = grid->lSites();
|
||||
|
||||
uint32_t nersc_csum_tmp;
|
||||
uint32_t scidac_csuma_tmp;
|
||||
uint32_t scidac_csumb_tmp;
|
||||
uint32_t nersc_csum_tmp = 0;
|
||||
uint32_t scidac_csuma_tmp = 0;
|
||||
uint32_t scidac_csumb_tmp = 0;
|
||||
|
||||
GridStopWatch timer;
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@ const PerformanceCounter::PerformanceCounterConfig PerformanceCounter::Performan
|
||||
{ PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES , "CPUCYCLES.........." , INSTRUCTIONS},
|
||||
{ PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS , "INSTRUCTIONS......." , CPUCYCLES },
|
||||
// 4
|
||||
#ifdef AVX512
|
||||
#ifdef KNL
|
||||
{ PERF_TYPE_RAW, RawConfig(0x40,0x04), "ALL_LOADS..........", CPUCYCLES },
|
||||
{ PERF_TYPE_RAW, RawConfig(0x01,0x04), "L1_MISS_LOADS......", L1D_READ_ACCESS },
|
||||
{ PERF_TYPE_RAW, RawConfig(0x40,0x04), "ALL_LOADS..........", L1D_READ_ACCESS },
|
||||
|
||||
100
lib/qcd/action/fermion/AbstractEOFAFermion.h
Normal file
100
lib/qcd/action/fermion/AbstractEOFAFermion.h
Normal file
@@ -0,0 +1,100 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/AbstractEOFAFermion.h
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_QCD_ABSTRACT_EOFA_FERMION_H
|
||||
#define GRID_QCD_ABSTRACT_EOFA_FERMION_H
|
||||
|
||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
// DJM: Abstract base class for EOFA fermion types.
|
||||
// Defines layout of additional EOFA-specific parameters and operators.
|
||||
// Use to construct EOFA pseudofermion actions that are agnostic to
|
||||
// Shamir / Mobius / etc., and ensure that no one can construct EOFA
|
||||
// pseudofermion action with non-EOFA fermion type.
|
||||
template<class Impl>
|
||||
class AbstractEOFAFermion : public CayleyFermion5D<Impl> {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
public:
|
||||
// Fermion operator: D(mq1) + shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm}
|
||||
RealD mq1;
|
||||
RealD mq2;
|
||||
RealD mq3;
|
||||
RealD shift;
|
||||
int pm;
|
||||
|
||||
RealD alpha; // Mobius scale
|
||||
RealD k; // EOFA normalization constant
|
||||
|
||||
virtual void Instantiatable(void) = 0;
|
||||
|
||||
// EOFA-specific operations
|
||||
// Force user to implement in derived classes
|
||||
virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag) = 0;
|
||||
virtual void Dtilde (const FermionField& in, FermionField& out) = 0;
|
||||
virtual void DtildeInv(const FermionField& in, FermionField& out) = 0;
|
||||
|
||||
// Implement derivatives in base class:
|
||||
// for EOFA both DWF and Mobius just need d(Dw)/dU
|
||||
virtual void MDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){
|
||||
this->DhopDeriv(mat, U, V, dag);
|
||||
};
|
||||
virtual void MoeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){
|
||||
this->DhopDerivOE(mat, U, V, dag);
|
||||
};
|
||||
virtual void MeoDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag){
|
||||
this->DhopDerivEO(mat, U, V, dag);
|
||||
};
|
||||
|
||||
// Recompute 5D coefficients for different value of shift constant
|
||||
// (needed for heatbath loop over poles)
|
||||
virtual void RefreshShiftCoefficients(RealD new_shift) = 0;
|
||||
|
||||
// Constructors
|
||||
AbstractEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid,
|
||||
GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid,
|
||||
RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int _pm,
|
||||
RealD _M5, RealD _b, RealD _c, const ImplParams& p=ImplParams())
|
||||
: CayleyFermion5D<Impl>(_Umu, FiveDimGrid, FiveDimRedBlackGrid, FourDimGrid, FourDimRedBlackGrid,
|
||||
_mq1, _M5, p), mq1(_mq1), mq2(_mq2), mq3(_mq3), shift(_shift), pm(_pm)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
this->alpha = _b + _c;
|
||||
this->k = this->alpha * (_mq3-_mq2) * std::pow(this->alpha+1.0,2*Ls) /
|
||||
( std::pow(this->alpha+1.0,Ls) + _mq2*std::pow(this->alpha-1.0,Ls) ) /
|
||||
( std::pow(this->alpha+1.0,Ls) + _mq3*std::pow(this->alpha-1.0,Ls) );
|
||||
};
|
||||
};
|
||||
}}
|
||||
|
||||
#endif
|
||||
@@ -414,7 +414,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
|
||||
for(int i=0; i < Ls; i++){
|
||||
as[i] = 1.0;
|
||||
omega[i] = gamma[i]*zolo_hi; //NB reciprocal relative to Chroma NEF code
|
||||
// assert(fabs(omega[i])>0.0);
|
||||
assert(omega[i]!=Coeff_t(0.0));
|
||||
bs[i] = 0.5*(bpc/omega[i] + bmc);
|
||||
cs[i] = 0.5*(bpc/omega[i] - bmc);
|
||||
}
|
||||
@@ -429,7 +429,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
|
||||
|
||||
for(int i=0;i<Ls;i++){
|
||||
bee[i]=as[i]*(bs[i]*(4.0-this->M5) +1.0);
|
||||
// assert(fabs(bee[i])>0.0);
|
||||
assert(bee[i]!=Coeff_t(0.0));
|
||||
cee[i]=as[i]*(1.0-cs[i]*(4.0-this->M5));
|
||||
beo[i]=as[i]*bs[i];
|
||||
ceo[i]=-as[i]*cs[i];
|
||||
@@ -455,11 +455,17 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
|
||||
dee[i] = bee[i];
|
||||
|
||||
if ( i < Ls-1 ) {
|
||||
|
||||
assert(bee[i]!=Coeff_t(0.0));
|
||||
assert(bee[0]!=Coeff_t(0.0));
|
||||
|
||||
lee[i] =-cee[i+1]/bee[i]; // sub-diag entry on the ith column
|
||||
|
||||
leem[i]=mass*cee[Ls-1]/bee[0];
|
||||
for(int j=0;j<i;j++) leem[i]*= aee[j]/bee[j+1];
|
||||
for(int j=0;j<i;j++) {
|
||||
assert(bee[j+1]!=Coeff_t(0.0));
|
||||
leem[i]*= aee[j]/bee[j+1];
|
||||
}
|
||||
|
||||
uee[i] =-aee[i]/bee[i]; // up-diag entry on the ith row
|
||||
|
||||
@@ -478,7 +484,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
|
||||
{
|
||||
Coeff_t delta_d=mass*cee[Ls-1];
|
||||
for(int j=0;j<Ls-1;j++) {
|
||||
// assert(fabs(bee[j])>0.0);
|
||||
assert(bee[j] != Coeff_t(0.0));
|
||||
delta_d *= cee[j]/bee[j];
|
||||
}
|
||||
dee[Ls-1] += delta_d;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/CayleyFermion5D.h
|
||||
|
||||
@@ -35,24 +35,24 @@ namespace Grid {
|
||||
|
||||
namespace QCD {
|
||||
|
||||
template<typename T> struct switcheroo {
|
||||
static inline int iscomplex() { return 0; }
|
||||
template<typename T> struct switcheroo {
|
||||
static inline int iscomplex() { return 0; }
|
||||
|
||||
template<class vec>
|
||||
static inline vec mult(vec a, vec b) {
|
||||
return real_mult(a,b);
|
||||
}
|
||||
};
|
||||
template<> struct switcheroo<ComplexD> {
|
||||
static inline int iscomplex() { return 1; }
|
||||
template<> struct switcheroo<ComplexD> {
|
||||
static inline int iscomplex() { return 1; }
|
||||
|
||||
template<class vec>
|
||||
static inline vec mult(vec a, vec b) {
|
||||
return a*b;
|
||||
}
|
||||
};
|
||||
template<> struct switcheroo<ComplexF> {
|
||||
static inline int iscomplex() { return 1; }
|
||||
template<> struct switcheroo<ComplexF> {
|
||||
static inline int iscomplex() { return 1; }
|
||||
template<class vec>
|
||||
static inline vec mult(vec a, vec b) {
|
||||
return a*b;
|
||||
@@ -90,14 +90,14 @@ namespace Grid {
|
||||
// Instantiate different versions depending on Impl
|
||||
/////////////////////////////////////////////////////
|
||||
void M5D(const FermionField &psi,
|
||||
const FermionField &phi,
|
||||
const FermionField &phi,
|
||||
FermionField &chi,
|
||||
std::vector<Coeff_t> &lower,
|
||||
std::vector<Coeff_t> &diag,
|
||||
std::vector<Coeff_t> &upper);
|
||||
|
||||
void M5Ddag(const FermionField &psi,
|
||||
const FermionField &phi,
|
||||
const FermionField &phi,
|
||||
FermionField &chi,
|
||||
std::vector<Coeff_t> &lower,
|
||||
std::vector<Coeff_t> &diag,
|
||||
@@ -125,7 +125,7 @@ namespace Grid {
|
||||
|
||||
// Efficient support for multigrid coarsening
|
||||
virtual void Mdir (const FermionField &in, FermionField &out,int dir,int disp);
|
||||
|
||||
|
||||
void Meooe5D (const FermionField &in, FermionField &out);
|
||||
void MeooeDag5D (const FermionField &in, FermionField &out);
|
||||
|
||||
@@ -133,23 +133,23 @@ namespace Grid {
|
||||
RealD mass;
|
||||
|
||||
// Cayley form Moebius (tanh and zolotarev)
|
||||
std::vector<Coeff_t> omega;
|
||||
std::vector<Coeff_t> omega;
|
||||
std::vector<Coeff_t> bs; // S dependent coeffs
|
||||
std::vector<Coeff_t> cs;
|
||||
std::vector<Coeff_t> as;
|
||||
std::vector<Coeff_t> cs;
|
||||
std::vector<Coeff_t> as;
|
||||
// For preconditioning Cayley form
|
||||
std::vector<Coeff_t> bee;
|
||||
std::vector<Coeff_t> cee;
|
||||
std::vector<Coeff_t> aee;
|
||||
std::vector<Coeff_t> beo;
|
||||
std::vector<Coeff_t> ceo;
|
||||
std::vector<Coeff_t> aeo;
|
||||
std::vector<Coeff_t> bee;
|
||||
std::vector<Coeff_t> cee;
|
||||
std::vector<Coeff_t> aee;
|
||||
std::vector<Coeff_t> beo;
|
||||
std::vector<Coeff_t> ceo;
|
||||
std::vector<Coeff_t> aeo;
|
||||
// LDU factorisation of the eeoo matrix
|
||||
std::vector<Coeff_t> lee;
|
||||
std::vector<Coeff_t> leem;
|
||||
std::vector<Coeff_t> uee;
|
||||
std::vector<Coeff_t> ueem;
|
||||
std::vector<Coeff_t> dee;
|
||||
std::vector<Coeff_t> lee;
|
||||
std::vector<Coeff_t> leem;
|
||||
std::vector<Coeff_t> uee;
|
||||
std::vector<Coeff_t> ueem;
|
||||
std::vector<Coeff_t> dee;
|
||||
|
||||
// Matrices of 5d ee inverse params
|
||||
Vector<iSinglet<Simd> > MatpInv;
|
||||
@@ -165,7 +165,7 @@ namespace Grid {
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mass,RealD _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
|
||||
|
||||
void CayleyReport(void);
|
||||
void CayleyZeroCounters(void);
|
||||
@@ -179,9 +179,9 @@ namespace Grid {
|
||||
double MooeeInvTime;
|
||||
|
||||
protected:
|
||||
void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
void SetCoefficientsInternal(RealD zolo_hi,std::vector<Coeff_t> & gamma,RealD b,RealD c);
|
||||
virtual void SetCoefficientsZolotarev(RealD zolohi,Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
virtual void SetCoefficientsTanh(Approx::zolotarev_data *zdata,RealD b,RealD c);
|
||||
virtual void SetCoefficientsInternal(RealD zolo_hi,std::vector<Coeff_t> & gamma,RealD b,RealD c);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
438
lib/qcd/action/fermion/DomainWallEOFAFermion.cc
Normal file
438
lib/qcd/action/fermion/DomainWallEOFAFermion.cc
Normal file
@@ -0,0 +1,438 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermion.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid_Eigen_Dense.h>
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
template<class Impl>
|
||||
DomainWallEOFAFermion<Impl>::DomainWallEOFAFermion(
|
||||
GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mq1, RealD _mq2, RealD _mq3,
|
||||
RealD _shift, int _pm, RealD _M5, const ImplParams &p) :
|
||||
AbstractEOFAFermion<Impl>(_Umu, FiveDimGrid, FiveDimRedBlackGrid,
|
||||
FourDimGrid, FourDimRedBlackGrid, _mq1, _mq2, _mq3,
|
||||
_shift, _pm, _M5, 1.0, 0.0, p)
|
||||
{
|
||||
RealD eps = 1.0;
|
||||
Approx::zolotarev_data *zdata = Approx::higham(eps,this->Ls);
|
||||
assert(zdata->n == this->Ls);
|
||||
|
||||
std::cout << GridLogMessage << "DomainWallEOFAFermion with Ls=" << this->Ls << std::endl;
|
||||
this->SetCoefficientsTanh(zdata, 1.0, 0.0);
|
||||
|
||||
Approx::zolotarev_free(zdata);
|
||||
}
|
||||
|
||||
/***************************************************************
|
||||
/* Additional EOFA operators only called outside the inverter.
|
||||
/* Since speed is not essential, simple axpby-style
|
||||
/* implementations should be fine.
|
||||
/***************************************************************/
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
Din = zero;
|
||||
if((sign == 1) && (dag == 0)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, Ls-1, 0); }
|
||||
else if((sign == -1) && (dag == 0)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, 0); }
|
||||
else if((sign == 1 ) && (dag == 1)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, Ls-1); }
|
||||
else if((sign == -1) && (dag == 1)){ axpby_ssp(Din, 0.0, psi, 1.0, psi, 0, 0); }
|
||||
}
|
||||
|
||||
// This is just the identity for DWF
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::Dtilde(const FermionField& psi, FermionField& chi){ chi = psi; }
|
||||
|
||||
// This is just the identity for DWF
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::DtildeInv(const FermionField& psi, FermionField& chi){ chi = psi; }
|
||||
|
||||
/*****************************************************************************************************/
|
||||
|
||||
template<class Impl>
|
||||
RealD DomainWallEOFAFermion<Impl>::M(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField Din(psi._grid);
|
||||
|
||||
this->Meooe5D(psi, Din);
|
||||
this->DW(Din, chi, DaggerNo);
|
||||
axpby(chi, 1.0, 1.0, chi, psi);
|
||||
this->M5D(psi, chi);
|
||||
return(norm2(chi));
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
RealD DomainWallEOFAFermion<Impl>::Mdag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField Din(psi._grid);
|
||||
|
||||
this->DW(psi, Din, DaggerYes);
|
||||
this->MeooeDag5D(Din, chi);
|
||||
this->M5Ddag(psi, chi);
|
||||
axpby(chi, 1.0, 1.0, chi, psi);
|
||||
return(norm2(chi));
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
/* Performance critical fermion operators called inside the inverter
|
||||
/********************************************************************/
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int pm = this->pm;
|
||||
RealD shift = this->shift;
|
||||
RealD mq1 = this->mq1;
|
||||
RealD mq2 = this->mq2;
|
||||
RealD mq3 = this->mq3;
|
||||
|
||||
// coefficients for shift operator ( = shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm} )
|
||||
Coeff_t shiftp(0.0), shiftm(0.0);
|
||||
if(shift != 0.0){
|
||||
if(pm == 1){ shiftp = shift*(mq3-mq2); }
|
||||
else{ shiftm = -shift*(mq3-mq2); }
|
||||
}
|
||||
|
||||
std::vector<Coeff_t> diag(Ls,1.0);
|
||||
std::vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1] = mq1 + shiftm;
|
||||
std::vector<Coeff_t> lower(Ls,-1.0); lower[0] = mq1 + shiftp;
|
||||
|
||||
#if(0)
|
||||
std::cout << GridLogMessage << "DomainWallEOFAFermion::M5D(FF&,FF&):" << std::endl;
|
||||
for(int i=0; i<diag.size(); ++i){
|
||||
std::cout << GridLogMessage << "diag[" << i << "] =" << diag[i] << std::endl;
|
||||
}
|
||||
for(int i=0; i<upper.size(); ++i){
|
||||
std::cout << GridLogMessage << "upper[" << i << "] =" << upper[i] << std::endl;
|
||||
}
|
||||
for(int i=0; i<lower.size(); ++i){
|
||||
std::cout << GridLogMessage << "lower[" << i << "] =" << lower[i] << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
this->M5D(psi, chi, chi, lower, diag, upper);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5Ddag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int pm = this->pm;
|
||||
RealD shift = this->shift;
|
||||
RealD mq1 = this->mq1;
|
||||
RealD mq2 = this->mq2;
|
||||
RealD mq3 = this->mq3;
|
||||
|
||||
// coefficients for shift operator ( = shift*\gamma_{5}*R_{5}*\Delta_{\pm}(mq2,mq3)*P_{\pm} )
|
||||
Coeff_t shiftp(0.0), shiftm(0.0);
|
||||
if(shift != 0.0){
|
||||
if(pm == 1){ shiftp = shift*(mq3-mq2); }
|
||||
else{ shiftm = -shift*(mq3-mq2); }
|
||||
}
|
||||
|
||||
std::vector<Coeff_t> diag(Ls,1.0);
|
||||
std::vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1] = mq1 + shiftp;
|
||||
std::vector<Coeff_t> lower(Ls,-1.0); lower[0] = mq1 + shiftm;
|
||||
|
||||
#if(0)
|
||||
std::cout << GridLogMessage << "DomainWallEOFAFermion::M5Ddag(FF&,FF&):" << std::endl;
|
||||
for(int i=0; i<diag.size(); ++i){
|
||||
std::cout << GridLogMessage << "diag[" << i << "] =" << diag[i] << std::endl;
|
||||
}
|
||||
for(int i=0; i<upper.size(); ++i){
|
||||
std::cout << GridLogMessage << "upper[" << i << "] =" << upper[i] << std::endl;
|
||||
}
|
||||
for(int i=0; i<lower.size(); ++i){
|
||||
std::cout << GridLogMessage << "lower[" << i << "] =" << lower[i] << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
this->M5Ddag(psi, chi, chi, lower, diag, upper);
|
||||
}
|
||||
|
||||
// half checkerboard operations
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::Mooee(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
std::vector<Coeff_t> diag = this->bee;
|
||||
std::vector<Coeff_t> upper(Ls);
|
||||
std::vector<Coeff_t> lower(Ls);
|
||||
|
||||
for(int s=0; s<Ls; s++){
|
||||
upper[s] = -this->cee[s];
|
||||
lower[s] = -this->cee[s];
|
||||
}
|
||||
upper[Ls-1] = this->dm;
|
||||
lower[0] = this->dp;
|
||||
|
||||
this->M5D(psi, psi, chi, lower, diag, upper);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
std::vector<Coeff_t> diag = this->bee;
|
||||
std::vector<Coeff_t> upper(Ls);
|
||||
std::vector<Coeff_t> lower(Ls);
|
||||
|
||||
for(int s=0; s<Ls; s++){
|
||||
upper[s] = -this->cee[s];
|
||||
lower[s] = -this->cee[s];
|
||||
}
|
||||
upper[Ls-1] = this->dp;
|
||||
lower[0] = this->dm;
|
||||
|
||||
this->M5Ddag(psi, psi, chi, lower, diag, upper);
|
||||
}
|
||||
|
||||
/****************************************************************************************/
|
||||
|
||||
//Zolo
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::SetCoefficientsInternal(RealD zolo_hi, std::vector<Coeff_t>& gamma, RealD b, RealD c)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int pm = this->pm;
|
||||
RealD mq1 = this->mq1;
|
||||
RealD mq2 = this->mq2;
|
||||
RealD mq3 = this->mq3;
|
||||
RealD shift = this->shift;
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Constants for the preconditioned matrix Cayley form
|
||||
////////////////////////////////////////////////////////
|
||||
this->bs.resize(Ls);
|
||||
this->cs.resize(Ls);
|
||||
this->aee.resize(Ls);
|
||||
this->aeo.resize(Ls);
|
||||
this->bee.resize(Ls);
|
||||
this->beo.resize(Ls);
|
||||
this->cee.resize(Ls);
|
||||
this->ceo.resize(Ls);
|
||||
|
||||
for(int i=0; i<Ls; ++i){
|
||||
this->bee[i] = 4.0 - this->M5 + 1.0;
|
||||
this->cee[i] = 1.0;
|
||||
}
|
||||
|
||||
for(int i=0; i<Ls; ++i){
|
||||
this->aee[i] = this->cee[i];
|
||||
this->bs[i] = this->beo[i] = 1.0;
|
||||
this->cs[i] = this->ceo[i] = 0.0;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
// EOFA shift terms
|
||||
//////////////////////////////////////////
|
||||
if(pm == 1){
|
||||
this->dp = mq1*this->cee[0] + shift*(mq3-mq2);
|
||||
this->dm = mq1*this->cee[Ls-1];
|
||||
} else if(this->pm == -1) {
|
||||
this->dp = mq1*this->cee[0];
|
||||
this->dm = mq1*this->cee[Ls-1] - shift*(mq3-mq2);
|
||||
} else {
|
||||
this->dp = mq1*this->cee[0];
|
||||
this->dm = mq1*this->cee[Ls-1];
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
// LDU decomposition of eeoo
|
||||
//////////////////////////////////////////
|
||||
this->dee.resize(Ls+1);
|
||||
this->lee.resize(Ls);
|
||||
this->leem.resize(Ls);
|
||||
this->uee.resize(Ls);
|
||||
this->ueem.resize(Ls);
|
||||
|
||||
for(int i=0; i<Ls; ++i){
|
||||
|
||||
if(i < Ls-1){
|
||||
|
||||
this->lee[i] = -this->cee[i+1]/this->bee[i]; // sub-diag entry on the ith column
|
||||
|
||||
this->leem[i] = this->dm/this->bee[i];
|
||||
for(int j=0; j<i; j++){ this->leem[i] *= this->aee[j]/this->bee[j]; }
|
||||
|
||||
this->dee[i] = this->bee[i];
|
||||
|
||||
this->uee[i] = -this->aee[i]/this->bee[i]; // up-diag entry on the ith row
|
||||
|
||||
this->ueem[i] = this->dp / this->bee[0];
|
||||
for(int j=1; j<=i; j++){ this->ueem[i] *= this->cee[j]/this->bee[j]; }
|
||||
|
||||
} else {
|
||||
|
||||
this->lee[i] = 0.0;
|
||||
this->leem[i] = 0.0;
|
||||
this->uee[i] = 0.0;
|
||||
this->ueem[i] = 0.0;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Coeff_t delta_d = 1.0 / this->bee[0];
|
||||
for(int j=1; j<Ls-1; j++){ delta_d *= this->cee[j] / this->bee[j]; }
|
||||
this->dee[Ls-1] = this->bee[Ls-1] + this->cee[0] * this->dm * delta_d;
|
||||
this->dee[Ls] = this->bee[Ls-1] + this->cee[Ls-1] * this->dp * delta_d;
|
||||
}
|
||||
|
||||
int inv = 1;
|
||||
this->MooeeInternalCompute(0, inv, this->MatpInv, this->MatmInv);
|
||||
this->MooeeInternalCompute(1, inv, this->MatpInvDag, this->MatmInvDag);
|
||||
}
|
||||
|
||||
// Recompute Cayley-form coefficients for different shift
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::RefreshShiftCoefficients(RealD new_shift)
|
||||
{
|
||||
this->shift = new_shift;
|
||||
Approx::zolotarev_data *zdata = Approx::higham(1.0, this->Ls);
|
||||
this->SetCoefficientsTanh(zdata, 1.0, 0.0);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInternalCompute(int dag, int inv,
|
||||
Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
GridBase* grid = this->FermionRedBlackGrid();
|
||||
int LLs = grid->_rdimensions[0];
|
||||
|
||||
if(LLs == Ls){ return; } // Not vectorised in 5th direction
|
||||
|
||||
Eigen::MatrixXcd Pplus = Eigen::MatrixXcd::Zero(Ls,Ls);
|
||||
Eigen::MatrixXcd Pminus = Eigen::MatrixXcd::Zero(Ls,Ls);
|
||||
|
||||
for(int s=0; s<Ls; s++){
|
||||
Pplus(s,s) = this->bee[s];
|
||||
Pminus(s,s) = this->bee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pminus(s,s+1) = -this->cee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pplus(s+1,s) = -this->cee[s+1];
|
||||
}
|
||||
|
||||
Pplus (0,Ls-1) = this->dp;
|
||||
Pminus(Ls-1,0) = this->dm;
|
||||
|
||||
Eigen::MatrixXcd PplusMat ;
|
||||
Eigen::MatrixXcd PminusMat;
|
||||
|
||||
#if(0)
|
||||
std::cout << GridLogMessage << "Pplus:" << std::endl;
|
||||
for(int s=0; s<Ls; ++s){
|
||||
for(int ss=0; ss<Ls; ++ss){
|
||||
std::cout << Pplus(s,ss) << "\t";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::cout << GridLogMessage << "Pminus:" << std::endl;
|
||||
for(int s=0; s<Ls; ++s){
|
||||
for(int ss=0; ss<Ls; ++ss){
|
||||
std::cout << Pminus(s,ss) << "\t";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
if(inv) {
|
||||
PplusMat = Pplus.inverse();
|
||||
PminusMat = Pminus.inverse();
|
||||
} else {
|
||||
PplusMat = Pplus;
|
||||
PminusMat = Pminus;
|
||||
}
|
||||
|
||||
if(dag){
|
||||
PplusMat.adjointInPlace();
|
||||
PminusMat.adjointInPlace();
|
||||
}
|
||||
|
||||
typedef typename SiteHalfSpinor::scalar_type scalar_type;
|
||||
const int Nsimd = Simd::Nsimd();
|
||||
Matp.resize(Ls*LLs);
|
||||
Matm.resize(Ls*LLs);
|
||||
|
||||
for(int s2=0; s2<Ls; s2++){
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
int istride = LLs;
|
||||
int ostride = 1;
|
||||
Simd Vp;
|
||||
Simd Vm;
|
||||
scalar_type *sp = (scalar_type*) &Vp;
|
||||
scalar_type *sm = (scalar_type*) &Vm;
|
||||
for(int l=0; l<Nsimd; l++){
|
||||
if(switcheroo<Coeff_t>::iscomplex()) {
|
||||
sp[l] = PplusMat (l*istride+s1*ostride,s2);
|
||||
sm[l] = PminusMat(l*istride+s1*ostride,s2);
|
||||
} else {
|
||||
// if real
|
||||
scalar_type tmp;
|
||||
tmp = PplusMat (l*istride+s1*ostride,s2);
|
||||
sp[l] = scalar_type(tmp.real(),tmp.real());
|
||||
tmp = PminusMat(l*istride+s1*ostride,s2);
|
||||
sm[l] = scalar_type(tmp.real(),tmp.real());
|
||||
}
|
||||
}
|
||||
Matp[LLs*s2+s1] = Vp;
|
||||
Matm[LLs*s2+s1] = Vm;
|
||||
}}
|
||||
}
|
||||
|
||||
FermOpTemplateInstantiate(DomainWallEOFAFermion);
|
||||
GparityFermOpTemplateInstantiate(DomainWallEOFAFermion);
|
||||
|
||||
}}
|
||||
115
lib/qcd/action/fermion/DomainWallEOFAFermion.h
Normal file
115
lib/qcd/action/fermion/DomainWallEOFAFermion.h
Normal file
@@ -0,0 +1,115 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermion.h
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_QCD_DOMAIN_WALL_EOFA_FERMION_H
|
||||
#define GRID_QCD_DOMAIN_WALL_EOFA_FERMION_H
|
||||
|
||||
#include <Grid/qcd/action/fermion/AbstractEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
template<class Impl>
|
||||
class DomainWallEOFAFermion : public AbstractEOFAFermion<Impl>
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
public:
|
||||
// Modified (0,Ls-1) and (Ls-1,0) elements of Mooee
|
||||
// for red-black preconditioned Shamir EOFA
|
||||
Coeff_t dm;
|
||||
Coeff_t dp;
|
||||
|
||||
virtual void Instantiatable(void) {};
|
||||
|
||||
// EOFA-specific operations
|
||||
virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag);
|
||||
virtual void Dtilde (const FermionField& in, FermionField& out);
|
||||
virtual void DtildeInv (const FermionField& in, FermionField& out);
|
||||
|
||||
// override multiply
|
||||
virtual RealD M (const FermionField& in, FermionField& out);
|
||||
virtual RealD Mdag (const FermionField& in, FermionField& out);
|
||||
|
||||
// half checkerboard operations
|
||||
virtual void Mooee (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeDag (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInv (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInvDag(const FermionField& in, FermionField& out);
|
||||
|
||||
virtual void M5D (const FermionField& psi, FermionField& chi);
|
||||
virtual void M5Ddag (const FermionField& psi, FermionField& chi);
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Instantiate different versions depending on Impl
|
||||
/////////////////////////////////////////////////////
|
||||
void M5D(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper);
|
||||
|
||||
void M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper);
|
||||
|
||||
void MooeeInternal(const FermionField& in, FermionField& out, int dag, int inv);
|
||||
|
||||
void MooeeInternalCompute(int dag, int inv, Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
void MooeeInternalAsm(const FermionField& in, FermionField& out, int LLs, int site,
|
||||
Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
void MooeeInternalZAsm(const FermionField& in, FermionField& out, int LLs, int site,
|
||||
Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
virtual void RefreshShiftCoefficients(RealD new_shift);
|
||||
|
||||
// Constructors
|
||||
DomainWallEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid,
|
||||
GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid,
|
||||
RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int pm,
|
||||
RealD _M5, const ImplParams& p=ImplParams());
|
||||
|
||||
protected:
|
||||
void SetCoefficientsInternal(RealD zolo_hi, std::vector<Coeff_t>& gamma, RealD b, RealD c);
|
||||
};
|
||||
}}
|
||||
|
||||
#define INSTANTIATE_DPERP_DWF_EOFA(A)\
|
||||
template void DomainWallEOFAFermion<A>::M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper); \
|
||||
template void DomainWallEOFAFermion<A>::M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper); \
|
||||
template void DomainWallEOFAFermion<A>::MooeeInv(const FermionField& psi, FermionField& chi); \
|
||||
template void DomainWallEOFAFermion<A>::MooeeInvDag(const FermionField& psi, FermionField& chi);
|
||||
|
||||
#undef DOMAIN_WALL_EOFA_DPERP_DENSE
|
||||
#define DOMAIN_WALL_EOFA_DPERP_CACHE
|
||||
#undef DOMAIN_WALL_EOFA_DPERP_LINALG
|
||||
#define DOMAIN_WALL_EOFA_DPERP_VEC
|
||||
|
||||
#endif
|
||||
248
lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc
Normal file
248
lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc
Normal file
@@ -0,0 +1,248 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermioncache.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
// FIXME -- make a version of these routines with site loop outermost for cache reuse.
|
||||
|
||||
// Pminus fowards
|
||||
// Pplus backwards..
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
GridBase* grid = psi._grid;
|
||||
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){ // adds Ls
|
||||
for(int s=0; s<Ls; s++){
|
||||
auto tmp = psi._odata[0];
|
||||
if(s==0) {
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5m(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5Ddag(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
GridBase* grid = psi._grid;
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard=psi.checkerboard;
|
||||
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){ // adds Ls
|
||||
auto tmp = psi._odata[0];
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5p(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){ // adds Ls
|
||||
|
||||
auto tmp1 = psi._odata[0];
|
||||
auto tmp2 = psi._odata[0];
|
||||
|
||||
// flops = 12*2*Ls + 12*2*Ls + 3*12*Ls + 12*2*Ls = 12*Ls * (9) = 108*Ls flops
|
||||
// Apply (L^{\prime})^{-1}
|
||||
chi[ss] = psi[ss]; // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5p(tmp1, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - this->lee[s-1]*tmp1;
|
||||
}
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
spProj5m(tmp1, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - this->leem[s]*tmp1;
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[s] + 1/d chi[s]
|
||||
spProj5p(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/this->dee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls])*tmp1;
|
||||
}
|
||||
spProj5m(tmp2, chi[ss+Ls-1]);
|
||||
chi[ss+Ls-1] = (1.0/this->dee[Ls])*tmp1 + (1.0/this->dee[Ls-1])*tmp2;
|
||||
|
||||
// Apply U^{-1}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
spProj5m(tmp1, chi[ss+s+1]);
|
||||
chi[ss+s] = chi[ss+s] - this->uee[s]*tmp1;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
assert(psi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
std::vector<Coeff_t> ueec(Ls);
|
||||
std::vector<Coeff_t> deec(Ls+1);
|
||||
std::vector<Coeff_t> leec(Ls);
|
||||
std::vector<Coeff_t> ueemc(Ls);
|
||||
std::vector<Coeff_t> leemc(Ls);
|
||||
|
||||
for(int s=0; s<ueec.size(); s++){
|
||||
ueec[s] = conjugate(this->uee[s]);
|
||||
deec[s] = conjugate(this->dee[s]);
|
||||
leec[s] = conjugate(this->lee[s]);
|
||||
ueemc[s] = conjugate(this->ueem[s]);
|
||||
leemc[s] = conjugate(this->leem[s]);
|
||||
}
|
||||
deec[Ls] = conjugate(this->dee[Ls]);
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){ // adds Ls
|
||||
|
||||
auto tmp1 = psi._odata[0];
|
||||
auto tmp2 = psi._odata[0];
|
||||
|
||||
// Apply (U^{\prime})^{-dagger}
|
||||
chi[ss] = psi[ss];
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5m(tmp1, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - ueec[s-1]*tmp1;
|
||||
}
|
||||
|
||||
// U_m^{-\dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5p(tmp1, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - ueemc[s]*tmp1;
|
||||
}
|
||||
|
||||
// L_m^{-\dagger} D^{-dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5m(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/deec[s])*chi[ss+s] - (leemc[s]/deec[Ls-1])*tmp1;
|
||||
}
|
||||
spProj5p(tmp2, chi[ss+Ls-1]);
|
||||
chi[ss+Ls-1] = (1.0/deec[Ls-1])*tmp1 + (1.0/deec[Ls])*tmp2;
|
||||
|
||||
// Apply L^{-dagger}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
spProj5p(tmp1, chi[ss+s+1]);
|
||||
chi[ss+s] = chi[ss+s] - leec[s]*tmp1;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
#ifdef DOMAIN_WALL_EOFA_DPERP_CACHE
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplD);
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
159
lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc
Normal file
159
lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc
Normal file
@@ -0,0 +1,159 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermiondense.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid_Eigen_Dense.h>
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
/*
|
||||
* Dense matrix versions of routines
|
||||
*/
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int LLs = psi._grid->_rdimensions[0];
|
||||
int vol = psi._grid->oSites()/LLs;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
assert(Ls==LLs);
|
||||
|
||||
Eigen::MatrixXd Pplus = Eigen::MatrixXd::Zero(Ls,Ls);
|
||||
Eigen::MatrixXd Pminus = Eigen::MatrixXd::Zero(Ls,Ls);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
Pplus(s,s) = this->bee[s];
|
||||
Pminus(s,s) = this->bee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pminus(s,s+1) = -this->cee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pplus(s+1,s) = -this->cee[s+1];
|
||||
}
|
||||
|
||||
Pplus (0,Ls-1) = this->dp;
|
||||
Pminus(Ls-1,0) = this->dm;
|
||||
|
||||
Eigen::MatrixXd PplusMat ;
|
||||
Eigen::MatrixXd PminusMat;
|
||||
|
||||
if(inv) {
|
||||
PplusMat = Pplus.inverse();
|
||||
PminusMat = Pminus.inverse();
|
||||
} else {
|
||||
PplusMat = Pplus;
|
||||
PminusMat = Pminus;
|
||||
}
|
||||
|
||||
if(dag){
|
||||
PplusMat.adjointInPlace();
|
||||
PminusMat.adjointInPlace();
|
||||
}
|
||||
|
||||
// For the non-vectorised s-direction this is simple
|
||||
|
||||
for(auto site=0; site<vol; site++){
|
||||
|
||||
SiteSpinor SiteChi;
|
||||
SiteHalfSpinor SitePplus;
|
||||
SiteHalfSpinor SitePminus;
|
||||
|
||||
for(int s1=0; s1<Ls; s1++){
|
||||
SiteChi = zero;
|
||||
for(int s2=0; s2<Ls; s2++){
|
||||
int lex2 = s2 + Ls*site;
|
||||
if(PplusMat(s1,s2) != 0.0){
|
||||
spProj5p(SitePplus,psi[lex2]);
|
||||
accumRecon5p(SiteChi, PplusMat(s1,s2)*SitePplus);
|
||||
}
|
||||
if(PminusMat(s1,s2) != 0.0){
|
||||
spProj5m(SitePminus, psi[lex2]);
|
||||
accumRecon5m(SiteChi, PminusMat(s1,s2)*SitePminus);
|
||||
}
|
||||
}
|
||||
chi[s1+Ls*site] = SiteChi*0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DOMAIN_WALL_EOFA_DPERP_DENSE
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplD);
|
||||
|
||||
template void DomainWallEOFAFermion<GparityWilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<GparityWilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<WilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<WilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZWilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZWilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF);
|
||||
|
||||
template void DomainWallEOFAFermion<GparityWilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<GparityWilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<WilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<WilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZWilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZWilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
168
lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc
Normal file
168
lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc
Normal file
@@ -0,0 +1,168 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermionssp.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
// FIXME -- make a version of these routines with site loop outermost for cache reuse.
|
||||
// Pminus fowards
|
||||
// Pplus backwards
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5Ddag(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
// Apply (L^{\prime})^{-1}
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pplus(chi, one, psi, -this->lee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
||||
}
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
axpby_ssp_pminus(chi, one, chi, -this->leem[s], chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one/this->dee[s], chi, -this->ueem[s]/this->dee[Ls], chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp_pminus(tmp, czero, chi, one/this->dee[Ls-1], chi, Ls-1, Ls-1);
|
||||
axpby_ssp_pplus(chi, one, tmp, one/this->dee[Ls], chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply U^{-1}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls]
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
// Apply (U^{\prime})^{-dagger}
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pminus(chi, one, psi, -conjugate(this->uee[s-1]), chi, s, s-1);
|
||||
}
|
||||
|
||||
// U_m^{-\dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->ueem[s]), chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// L_m^{-\dagger} D^{-dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pminus(chi, one/conjugate(this->dee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp_pminus(tmp, czero, chi, one/conjugate(this->dee[Ls-1]), chi, Ls-1, Ls-1);
|
||||
axpby_ssp_pplus(chi, one, tmp, one/conjugate(this->dee[Ls]), chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply L^{-dagger}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls]
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DOMAIN_WALL_EOFA_DPERP_LINALG
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplD);
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZWilsonImplDF);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
605
lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc
Normal file
605
lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc
Normal file
@@ -0,0 +1,605 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/DomainWallEOFAFermionvec.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
/*
|
||||
* Dense matrix versions of routines
|
||||
*/
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5D(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
const int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd> > u(LLs);
|
||||
Vector<iSinglet<Simd> > l(LLs);
|
||||
Vector<iSinglet<Simd> > d(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
|
||||
for(int o=0;o<LLs;o++){ // outer
|
||||
for(int i=0;i<nsimd;i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
assert(Nc == 3);
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
#if 0
|
||||
|
||||
alignas(64) SiteHalfSpinor hp;
|
||||
alignas(64) SiteHalfSpinor hm;
|
||||
alignas(64) SiteSpinor fp;
|
||||
alignas(64) SiteSpinor fm;
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
int vp = (v+1)%LLs;
|
||||
int vm = (v+LLs-1)%LLs;
|
||||
|
||||
spProj5m(hp, psi[ss+vp]);
|
||||
spProj5p(hm, psi[ss+vm]);
|
||||
|
||||
if (vp <= v){ rotate(hp, hp, 1); }
|
||||
if (vm >= v){ rotate(hm, hm, nsimd-1); }
|
||||
|
||||
hp = 0.5*hp;
|
||||
hm = 0.5*hm;
|
||||
|
||||
spRecon5m(fp, hp);
|
||||
spRecon5p(fm, hm);
|
||||
|
||||
chi[ss+v] = d[v]*phi[ss+v];
|
||||
chi[ss+v] = chi[ss+v] + u[v]*fp;
|
||||
chi[ss+v] = chi[ss+v] + l[v]*fm;
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v==LLs-1) ? 0 : v+1;
|
||||
int vm = (v==0) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(2)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(2)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(2)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(3)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(3)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(3)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(0)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(0)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(0)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(1)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(1)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(1)(2);
|
||||
|
||||
if(vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
// Can force these to real arithmetic and save 2x.
|
||||
Simd p_00 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00);
|
||||
Simd p_01 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01);
|
||||
Simd p_02 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02);
|
||||
Simd p_10 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10);
|
||||
Simd p_11 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11);
|
||||
Simd p_12 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12);
|
||||
Simd p_20 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_21 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_22 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_30 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_31 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_32 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::M5Ddag(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd> > u(LLs);
|
||||
Vector<iSinglet<Simd> > l(LLs);
|
||||
Vector<iSinglet<Simd> > d(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
|
||||
for(int o=0; o<LLs; o++){ // outer
|
||||
for(int i=0; i<nsimd; i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
#if 0
|
||||
|
||||
alignas(64) SiteHalfSpinor hp;
|
||||
alignas(64) SiteHalfSpinor hm;
|
||||
alignas(64) SiteSpinor fp;
|
||||
alignas(64) SiteSpinor fm;
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
int vp = (v+1)%LLs;
|
||||
int vm = (v+LLs-1)%LLs;
|
||||
|
||||
spProj5p(hp, psi[ss+vp]);
|
||||
spProj5m(hm, psi[ss+vm]);
|
||||
|
||||
if(vp <= v){ rotate(hp, hp, 1); }
|
||||
if(vm >= v){ rotate(hm, hm, nsimd-1); }
|
||||
|
||||
hp = hp*0.5;
|
||||
hm = hm*0.5;
|
||||
spRecon5p(fp, hp);
|
||||
spRecon5m(fm, hm);
|
||||
|
||||
chi[ss+v] = d[v]*phi[ss+v]+u[v]*fp;
|
||||
chi[ss+v] = chi[ss+v] +l[v]*fm;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v == LLs-1) ? 0 : v+1;
|
||||
int vm = (v == 0 ) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(0)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(0)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(0)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(1)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(1)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(1)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(2)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(2)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(2)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(3)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(3)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(3)(2);
|
||||
|
||||
if (vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
Simd p_00 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_01 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_02 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_10 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_11 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_12 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
Simd p_20 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00);
|
||||
Simd p_21 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01);
|
||||
Simd p_22 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02);
|
||||
Simd p_30 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10);
|
||||
Simd p_31 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11);
|
||||
Simd p_32 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
#ifdef AVX512
|
||||
#include<simd/Intel512common.h>
|
||||
#include<simd/Intel512avx.h>
|
||||
#include<simd/Intel512single.h>
|
||||
#endif
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInternalAsm(const FermionField& psi, FermionField& chi,
|
||||
int LLs, int site, Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
#ifndef AVX512
|
||||
{
|
||||
SiteHalfSpinor BcastP;
|
||||
SiteHalfSpinor BcastM;
|
||||
SiteHalfSpinor SiteChiP;
|
||||
SiteHalfSpinor SiteChiM;
|
||||
|
||||
// Ls*Ls * 2 * 12 * vol flops
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
|
||||
for(int s2=0; s2<LLs; s2++){
|
||||
for(int l=0; l < Simd::Nsimd(); l++){ // simd lane
|
||||
|
||||
int s = s2 + l*LLs;
|
||||
int lex = s2 + LLs*site;
|
||||
|
||||
if( s2==0 && l==0 ){
|
||||
SiteChiP=zero;
|
||||
SiteChiM=zero;
|
||||
}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vbroadcast(BcastP()(sp)(co), psi[lex]()(sp)(co), l);
|
||||
}}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vbroadcast(BcastM()(sp)(co), psi[lex]()(sp+2)(co), l);
|
||||
}}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
SiteChiP()(sp)(co) = real_madd(Matp[LLs*s+s1]()()(), BcastP()(sp)(co), SiteChiP()(sp)(co)); // 1100 us.
|
||||
SiteChiM()(sp)(co) = real_madd(Matm[LLs*s+s1]()()(), BcastM()(sp)(co), SiteChiM()(sp)(co)); // each found by commenting out
|
||||
}}
|
||||
}}
|
||||
|
||||
{
|
||||
int lex = s1 + LLs*site;
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vstream(chi[lex]()(sp)(co), SiteChiP()(sp)(co));
|
||||
vstream(chi[lex]()(sp+2)(co), SiteChiM()(sp)(co));
|
||||
}}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
{
|
||||
// pointers
|
||||
// MASK_REGS;
|
||||
#define Chi_00 %%zmm1
|
||||
#define Chi_01 %%zmm2
|
||||
#define Chi_02 %%zmm3
|
||||
#define Chi_10 %%zmm4
|
||||
#define Chi_11 %%zmm5
|
||||
#define Chi_12 %%zmm6
|
||||
#define Chi_20 %%zmm7
|
||||
#define Chi_21 %%zmm8
|
||||
#define Chi_22 %%zmm9
|
||||
#define Chi_30 %%zmm10
|
||||
#define Chi_31 %%zmm11
|
||||
#define Chi_32 %%zmm12
|
||||
|
||||
#define BCAST0 %%zmm13
|
||||
#define BCAST1 %%zmm14
|
||||
#define BCAST2 %%zmm15
|
||||
#define BCAST3 %%zmm16
|
||||
#define BCAST4 %%zmm17
|
||||
#define BCAST5 %%zmm18
|
||||
#define BCAST6 %%zmm19
|
||||
#define BCAST7 %%zmm20
|
||||
#define BCAST8 %%zmm21
|
||||
#define BCAST9 %%zmm22
|
||||
#define BCAST10 %%zmm23
|
||||
#define BCAST11 %%zmm24
|
||||
|
||||
int incr = LLs*LLs*sizeof(iSinglet<Simd>);
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
|
||||
for(int s2=0; s2<LLs; s2++){
|
||||
|
||||
int lex = s2 + LLs*site;
|
||||
uint64_t a0 = (uint64_t) &Matp[LLs*s2+s1]; // should be cacheable
|
||||
uint64_t a1 = (uint64_t) &Matm[LLs*s2+s1];
|
||||
uint64_t a2 = (uint64_t) &psi[lex];
|
||||
|
||||
for(int l=0; l<Simd::Nsimd(); l++){ // simd lane
|
||||
if((s2+l)==0) {
|
||||
asm(
|
||||
VPREFETCH1(0,%2) VPREFETCH1(0,%1)
|
||||
VPREFETCH1(12,%2) VPREFETCH1(13,%2)
|
||||
VPREFETCH1(14,%2) VPREFETCH1(15,%2)
|
||||
VBCASTCDUP(0,%2,BCAST0)
|
||||
VBCASTCDUP(1,%2,BCAST1)
|
||||
VBCASTCDUP(2,%2,BCAST2)
|
||||
VBCASTCDUP(3,%2,BCAST3)
|
||||
VBCASTCDUP(4,%2,BCAST4) VMULMEM(0,%0,BCAST0,Chi_00)
|
||||
VBCASTCDUP(5,%2,BCAST5) VMULMEM(0,%0,BCAST1,Chi_01)
|
||||
VBCASTCDUP(6,%2,BCAST6) VMULMEM(0,%0,BCAST2,Chi_02)
|
||||
VBCASTCDUP(7,%2,BCAST7) VMULMEM(0,%0,BCAST3,Chi_10)
|
||||
VBCASTCDUP(8,%2,BCAST8) VMULMEM(0,%0,BCAST4,Chi_11)
|
||||
VBCASTCDUP(9,%2,BCAST9) VMULMEM(0,%0,BCAST5,Chi_12)
|
||||
VBCASTCDUP(10,%2,BCAST10) VMULMEM(0,%1,BCAST6,Chi_20)
|
||||
VBCASTCDUP(11,%2,BCAST11) VMULMEM(0,%1,BCAST7,Chi_21)
|
||||
VMULMEM(0,%1,BCAST8,Chi_22)
|
||||
VMULMEM(0,%1,BCAST9,Chi_30)
|
||||
VMULMEM(0,%1,BCAST10,Chi_31)
|
||||
VMULMEM(0,%1,BCAST11,Chi_32)
|
||||
: : "r" (a0), "r" (a1), "r" (a2) );
|
||||
} else {
|
||||
asm(
|
||||
VBCASTCDUP(0,%2,BCAST0) VMADDMEM(0,%0,BCAST0,Chi_00)
|
||||
VBCASTCDUP(1,%2,BCAST1) VMADDMEM(0,%0,BCAST1,Chi_01)
|
||||
VBCASTCDUP(2,%2,BCAST2) VMADDMEM(0,%0,BCAST2,Chi_02)
|
||||
VBCASTCDUP(3,%2,BCAST3) VMADDMEM(0,%0,BCAST3,Chi_10)
|
||||
VBCASTCDUP(4,%2,BCAST4) VMADDMEM(0,%0,BCAST4,Chi_11)
|
||||
VBCASTCDUP(5,%2,BCAST5) VMADDMEM(0,%0,BCAST5,Chi_12)
|
||||
VBCASTCDUP(6,%2,BCAST6) VMADDMEM(0,%1,BCAST6,Chi_20)
|
||||
VBCASTCDUP(7,%2,BCAST7) VMADDMEM(0,%1,BCAST7,Chi_21)
|
||||
VBCASTCDUP(8,%2,BCAST8) VMADDMEM(0,%1,BCAST8,Chi_22)
|
||||
VBCASTCDUP(9,%2,BCAST9) VMADDMEM(0,%1,BCAST9,Chi_30)
|
||||
VBCASTCDUP(10,%2,BCAST10) VMADDMEM(0,%1,BCAST10,Chi_31)
|
||||
VBCASTCDUP(11,%2,BCAST11) VMADDMEM(0,%1,BCAST11,Chi_32)
|
||||
: : "r" (a0), "r" (a1), "r" (a2) );
|
||||
}
|
||||
a0 = a0 + incr;
|
||||
a1 = a1 + incr;
|
||||
a2 = a2 + sizeof(Simd::scalar_type);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
int lexa = s1+LLs*site;
|
||||
asm (
|
||||
VSTORE(0,%0,Chi_00) VSTORE(1 ,%0,Chi_01) VSTORE(2 ,%0,Chi_02)
|
||||
VSTORE(3,%0,Chi_10) VSTORE(4 ,%0,Chi_11) VSTORE(5 ,%0,Chi_12)
|
||||
VSTORE(6,%0,Chi_20) VSTORE(7 ,%0,Chi_21) VSTORE(8 ,%0,Chi_22)
|
||||
VSTORE(9,%0,Chi_30) VSTORE(10,%0,Chi_31) VSTORE(11,%0,Chi_32)
|
||||
: : "r" ((uint64_t)&chi[lexa]) : "memory" );
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef Chi_00
|
||||
#undef Chi_01
|
||||
#undef Chi_02
|
||||
#undef Chi_10
|
||||
#undef Chi_11
|
||||
#undef Chi_12
|
||||
#undef Chi_20
|
||||
#undef Chi_21
|
||||
#undef Chi_22
|
||||
#undef Chi_30
|
||||
#undef Chi_31
|
||||
#undef Chi_32
|
||||
|
||||
#undef BCAST0
|
||||
#undef BCAST1
|
||||
#undef BCAST2
|
||||
#undef BCAST3
|
||||
#undef BCAST4
|
||||
#undef BCAST5
|
||||
#undef BCAST6
|
||||
#undef BCAST7
|
||||
#undef BCAST8
|
||||
#undef BCAST9
|
||||
#undef BCAST10
|
||||
#undef BCAST11
|
||||
#endif
|
||||
};
|
||||
|
||||
// Z-mobius version
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInternalZAsm(const FermionField& psi, FermionField& chi,
|
||||
int LLs, int site, Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
std::cout << "Error: zMobius not implemented for EOFA" << std::endl;
|
||||
exit(-1);
|
||||
};
|
||||
|
||||
template<class Impl>
|
||||
void DomainWallEOFAFermion<Impl>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int LLs = psi._grid->_rdimensions[0];
|
||||
int vol = psi._grid->oSites()/LLs;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
Vector<iSinglet<Simd> > Matp;
|
||||
Vector<iSinglet<Simd> > Matm;
|
||||
Vector<iSinglet<Simd> > *_Matp;
|
||||
Vector<iSinglet<Simd> > *_Matm;
|
||||
|
||||
// MooeeInternalCompute(dag,inv,Matp,Matm);
|
||||
if(inv && dag){
|
||||
_Matp = &this->MatpInvDag;
|
||||
_Matm = &this->MatmInvDag;
|
||||
}
|
||||
|
||||
if(inv && (!dag)){
|
||||
_Matp = &this->MatpInv;
|
||||
_Matm = &this->MatmInv;
|
||||
}
|
||||
|
||||
if(!inv){
|
||||
MooeeInternalCompute(dag, inv, Matp, Matm);
|
||||
_Matp = &Matp;
|
||||
_Matm = &Matm;
|
||||
}
|
||||
|
||||
assert(_Matp->size() == Ls*LLs);
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
if(switcheroo<Coeff_t>::iscomplex()){
|
||||
parallel_for(auto site=0; site<vol; site++){
|
||||
MooeeInternalZAsm(psi, chi, LLs, site, *_Matp, *_Matm);
|
||||
}
|
||||
} else {
|
||||
parallel_for(auto site=0; site<vol; site++){
|
||||
MooeeInternalAsm(psi, chi, LLs, site, *_Matp, *_Matm);
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
#ifdef DOMAIN_WALL_EOFA_DPERP_VEC
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplD);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplF);
|
||||
|
||||
INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(DomainWallVec5dImplFH);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplDF);
|
||||
INSTANTIATE_DPERP_DWF_EOFA(ZDomainWallVec5dImplFH);
|
||||
|
||||
template void DomainWallEOFAFermion<DomainWallVec5dImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<DomainWallVec5dImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZDomainWallVec5dImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZDomainWallVec5dImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
template void DomainWallEOFAFermion<DomainWallVec5dImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<DomainWallVec5dImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZDomainWallVec5dImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void DomainWallEOFAFermion<ZDomainWallVec5dImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
@@ -1,6 +1,6 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/Fermion_base_aggregate.h
|
||||
|
||||
@@ -38,6 +38,8 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
// - ContinuedFractionFermion5D.cc
|
||||
// - WilsonFermion.cc
|
||||
// - WilsonKernels.cc
|
||||
// - DomainWallEOFAFermion.cc
|
||||
// - MobiusEOFAFermion.cc
|
||||
//
|
||||
// The explicit instantiation is only avoidable if we move this source to headers and end up with include/parse/recompile
|
||||
// for EVERY .cc file. This define centralises the list and restores global push of impl cases
|
||||
@@ -55,8 +57,9 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
#include <Grid/qcd/action/fermion/ImprovedStaggeredFermion5D.h>
|
||||
#include <Grid/qcd/action/fermion/CayleyFermion5D.h> // Cayley types
|
||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallFermion.h>
|
||||
#include <Grid/qcd/action/fermion/DomainWallEOFAFermion.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusFermion.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
#include <Grid/qcd/action/fermion/ZMobiusFermion.h>
|
||||
#include <Grid/qcd/action/fermion/SchurDiagTwoKappa.h>
|
||||
#include <Grid/qcd/action/fermion/ScaledShamirFermion.h>
|
||||
@@ -113,6 +116,14 @@ typedef DomainWallFermion<WilsonImplRL> DomainWallFermionRL;
|
||||
typedef DomainWallFermion<WilsonImplFH> DomainWallFermionFH;
|
||||
typedef DomainWallFermion<WilsonImplDF> DomainWallFermionDF;
|
||||
|
||||
typedef DomainWallEOFAFermion<WilsonImplR> DomainWallEOFAFermionR;
|
||||
typedef DomainWallEOFAFermion<WilsonImplF> DomainWallEOFAFermionF;
|
||||
typedef DomainWallEOFAFermion<WilsonImplD> DomainWallEOFAFermionD;
|
||||
|
||||
typedef DomainWallEOFAFermion<WilsonImplRL> DomainWallEOFAFermionRL;
|
||||
typedef DomainWallEOFAFermion<WilsonImplFH> DomainWallEOFAFermionFH;
|
||||
typedef DomainWallEOFAFermion<WilsonImplDF> DomainWallEOFAFermionDF;
|
||||
|
||||
typedef MobiusFermion<WilsonImplR> MobiusFermionR;
|
||||
typedef MobiusFermion<WilsonImplF> MobiusFermionF;
|
||||
typedef MobiusFermion<WilsonImplD> MobiusFermionD;
|
||||
@@ -121,6 +132,14 @@ typedef MobiusFermion<WilsonImplRL> MobiusFermionRL;
|
||||
typedef MobiusFermion<WilsonImplFH> MobiusFermionFH;
|
||||
typedef MobiusFermion<WilsonImplDF> MobiusFermionDF;
|
||||
|
||||
typedef MobiusEOFAFermion<WilsonImplR> MobiusEOFAFermionR;
|
||||
typedef MobiusEOFAFermion<WilsonImplF> MobiusEOFAFermionF;
|
||||
typedef MobiusEOFAFermion<WilsonImplD> MobiusEOFAFermionD;
|
||||
|
||||
typedef MobiusEOFAFermion<WilsonImplRL> MobiusEOFAFermionRL;
|
||||
typedef MobiusEOFAFermion<WilsonImplFH> MobiusEOFAFermionFH;
|
||||
typedef MobiusEOFAFermion<WilsonImplDF> MobiusEOFAFermionDF;
|
||||
|
||||
typedef ZMobiusFermion<ZWilsonImplR> ZMobiusFermionR;
|
||||
typedef ZMobiusFermion<ZWilsonImplF> ZMobiusFermionF;
|
||||
typedef ZMobiusFermion<ZWilsonImplD> ZMobiusFermionD;
|
||||
@@ -129,7 +148,7 @@ typedef ZMobiusFermion<ZWilsonImplRL> ZMobiusFermionRL;
|
||||
typedef ZMobiusFermion<ZWilsonImplFH> ZMobiusFermionFH;
|
||||
typedef ZMobiusFermion<ZWilsonImplDF> ZMobiusFermionDF;
|
||||
|
||||
// Ls vectorised
|
||||
// Ls vectorised
|
||||
typedef DomainWallFermion<DomainWallVec5dImplR> DomainWallFermionVec5dR;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplF> DomainWallFermionVec5dF;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplD> DomainWallFermionVec5dD;
|
||||
@@ -138,6 +157,14 @@ typedef DomainWallFermion<DomainWallVec5dImplRL> DomainWallFermionVec5dRL;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplFH> DomainWallFermionVec5dFH;
|
||||
typedef DomainWallFermion<DomainWallVec5dImplDF> DomainWallFermionVec5dDF;
|
||||
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplR> DomainWallEOFAFermionVec5dR;
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplF> DomainWallEOFAFermionVec5dF;
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplD> DomainWallEOFAFermionVec5dD;
|
||||
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplRL> DomainWallEOFAFermionVec5dRL;
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplFH> DomainWallEOFAFermionVec5dFH;
|
||||
typedef DomainWallEOFAFermion<DomainWallVec5dImplDF> DomainWallEOFAFermionVec5dDF;
|
||||
|
||||
typedef MobiusFermion<DomainWallVec5dImplR> MobiusFermionVec5dR;
|
||||
typedef MobiusFermion<DomainWallVec5dImplF> MobiusFermionVec5dF;
|
||||
typedef MobiusFermion<DomainWallVec5dImplD> MobiusFermionVec5dD;
|
||||
@@ -146,6 +173,14 @@ typedef MobiusFermion<DomainWallVec5dImplRL> MobiusFermionVec5dRL;
|
||||
typedef MobiusFermion<DomainWallVec5dImplFH> MobiusFermionVec5dFH;
|
||||
typedef MobiusFermion<DomainWallVec5dImplDF> MobiusFermionVec5dDF;
|
||||
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplR> MobiusEOFAFermionVec5dR;
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplF> MobiusEOFAFermionVec5dF;
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplD> MobiusEOFAFermionVec5dD;
|
||||
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplRL> MobiusEOFAFermionVec5dRL;
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplFH> MobiusEOFAFermionVec5dFH;
|
||||
typedef MobiusEOFAFermion<DomainWallVec5dImplDF> MobiusEOFAFermionVec5dDF;
|
||||
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplR> ZMobiusFermionVec5dR;
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplF> ZMobiusFermionVec5dF;
|
||||
typedef ZMobiusFermion<ZDomainWallVec5dImplD> ZMobiusFermionVec5dD;
|
||||
@@ -206,6 +241,14 @@ typedef DomainWallFermion<GparityWilsonImplRL> GparityDomainWallFermionRL;
|
||||
typedef DomainWallFermion<GparityWilsonImplFH> GparityDomainWallFermionFH;
|
||||
typedef DomainWallFermion<GparityWilsonImplDF> GparityDomainWallFermionDF;
|
||||
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplR> GparityDomainWallEOFAFermionR;
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplF> GparityDomainWallEOFAFermionF;
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplD> GparityDomainWallEOFAFermionD;
|
||||
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplRL> GparityDomainWallEOFAFermionRL;
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplFH> GparityDomainWallEOFAFermionFH;
|
||||
typedef DomainWallEOFAFermion<GparityWilsonImplDF> GparityDomainWallEOFAFermionDF;
|
||||
|
||||
typedef WilsonTMFermion<GparityWilsonImplR> GparityWilsonTMFermionR;
|
||||
typedef WilsonTMFermion<GparityWilsonImplF> GparityWilsonTMFermionF;
|
||||
typedef WilsonTMFermion<GparityWilsonImplD> GparityWilsonTMFermionD;
|
||||
@@ -222,6 +265,14 @@ typedef MobiusFermion<GparityWilsonImplRL> GparityMobiusFermionRL;
|
||||
typedef MobiusFermion<GparityWilsonImplFH> GparityMobiusFermionFH;
|
||||
typedef MobiusFermion<GparityWilsonImplDF> GparityMobiusFermionDF;
|
||||
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplR> GparityMobiusEOFAFermionR;
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplF> GparityMobiusEOFAFermionF;
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplD> GparityMobiusEOFAFermionD;
|
||||
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplRL> GparityMobiusEOFAFermionRL;
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplFH> GparityMobiusEOFAFermionFH;
|
||||
typedef MobiusEOFAFermion<GparityWilsonImplDF> GparityMobiusEOFAFermionDF;
|
||||
|
||||
typedef ImprovedStaggeredFermion<StaggeredImplR> ImprovedStaggeredFermionR;
|
||||
typedef ImprovedStaggeredFermion<StaggeredImplF> ImprovedStaggeredFermionF;
|
||||
typedef ImprovedStaggeredFermion<StaggeredImplD> ImprovedStaggeredFermionD;
|
||||
@@ -237,4 +288,11 @@ typedef ImprovedStaggeredFermion5D<StaggeredVec5dImplD> ImprovedStaggeredFermion
|
||||
|
||||
}}
|
||||
|
||||
////////////////////
|
||||
// Scalar QED actions
|
||||
// TODO: this needs to move to another header after rename to Fermion.h
|
||||
////////////////////
|
||||
#include <Grid/qcd/action/scalar/Scalar.h>
|
||||
#include <Grid/qcd/action/gauge/Photon.h>
|
||||
|
||||
#endif
|
||||
|
||||
@@ -538,6 +538,12 @@ class GparityWilsonImpl : public ConjugateGaugeImpl<GaugeImplTypes<S, Nrepresent
|
||||
|
||||
}
|
||||
|
||||
|
||||
template <class ref>
|
||||
inline void loadLinkElement(Simd ®, ref &memory) {
|
||||
reg = memory;
|
||||
}
|
||||
|
||||
inline void DoubleStore(GridBase *GaugeGrid,DoubledGaugeField &Uds,const GaugeField &Umu)
|
||||
{
|
||||
conformable(Uds._grid,GaugeGrid);
|
||||
|
||||
@@ -230,8 +230,15 @@ void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOr
|
||||
{
|
||||
Compressor compressor;
|
||||
int LLs = in._grid->_rdimensions[0];
|
||||
|
||||
|
||||
|
||||
DhopTotalTime -= usecond();
|
||||
DhopCommTime -= usecond();
|
||||
st.HaloExchange(in,compressor);
|
||||
DhopCommTime += usecond();
|
||||
|
||||
DhopComputeTime -= usecond();
|
||||
// Dhop takes the 4d grid from U, and makes a 5d index for fermion
|
||||
if (dag == DaggerYes) {
|
||||
parallel_for (int ss = 0; ss < U._grid->oSites(); ss++) {
|
||||
@@ -244,12 +251,15 @@ void ImprovedStaggeredFermion5D<Impl>::DhopInternal(StencilImpl & st, LebesgueOr
|
||||
Kernels::DhopSite(st,lo,U,UUU,st.CommBuf(),LLs,sU,in,out);
|
||||
}
|
||||
}
|
||||
DhopComputeTime += usecond();
|
||||
DhopTotalTime += usecond();
|
||||
}
|
||||
|
||||
|
||||
template<class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls+=1;
|
||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||
conformable(in._grid,out._grid); // drops the cb check
|
||||
|
||||
@@ -261,6 +271,7 @@ void ImprovedStaggeredFermion5D<Impl>::DhopOE(const FermionField &in, FermionFie
|
||||
template<class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls+=1;
|
||||
conformable(in._grid,FermionRedBlackGrid()); // verifies half grid
|
||||
conformable(in._grid,out._grid); // drops the cb check
|
||||
|
||||
@@ -272,6 +283,7 @@ void ImprovedStaggeredFermion5D<Impl>::DhopEO(const FermionField &in, FermionFie
|
||||
template<class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField &out,int dag)
|
||||
{
|
||||
DhopCalls+=2;
|
||||
conformable(in._grid,FermionGrid()); // verifies full grid
|
||||
conformable(in._grid,out._grid);
|
||||
|
||||
@@ -280,6 +292,54 @@ void ImprovedStaggeredFermion5D<Impl>::Dhop(const FermionField &in, FermionField
|
||||
DhopInternal(Stencil,Lebesgue,Umu,UUUmu,in,out,dag);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::Report(void)
|
||||
{
|
||||
std::vector<int> latt = GridDefaultLatt();
|
||||
RealD volume = Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||
RealD NP = _FourDimGrid->_Nprocessors;
|
||||
RealD NN = _FourDimGrid->NodeCount();
|
||||
|
||||
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D Number of DhopEO Calls : "
|
||||
<< DhopCalls << std::endl;
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D TotalTime /Calls : "
|
||||
<< DhopTotalTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D CommTime /Calls : "
|
||||
<< DhopCommTime / DhopCalls << " us" << std::endl;
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D ComputeTime/Calls : "
|
||||
<< DhopComputeTime / DhopCalls << " us" << std::endl;
|
||||
|
||||
// Average the compute time
|
||||
_FourDimGrid->GlobalSum(DhopComputeTime);
|
||||
DhopComputeTime/=NP;
|
||||
|
||||
RealD mflops = 1154*volume*DhopCalls/DhopComputeTime/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call : " << mflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank : " << mflops/NP << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node : " << mflops/NN << std::endl;
|
||||
|
||||
RealD Fullmflops = 1154*volume*DhopCalls/(DhopTotalTime)/2; // 2 for red black counting
|
||||
std::cout << GridLogMessage << "Average mflops/s per call (full) : " << Fullmflops << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per rank (full): " << Fullmflops/NP << std::endl;
|
||||
std::cout << GridLogMessage << "Average mflops/s per call per node (full): " << Fullmflops/NN << std::endl;
|
||||
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D Stencil" <<std::endl; Stencil.Report();
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "ImprovedStaggeredFermion5D StencilOdd" <<std::endl; StencilOdd.Report();
|
||||
}
|
||||
template<class Impl>
|
||||
void ImprovedStaggeredFermion5D<Impl>::ZeroCounters(void)
|
||||
{
|
||||
DhopCalls = 0;
|
||||
DhopTotalTime = 0;
|
||||
DhopCommTime = 0;
|
||||
DhopComputeTime = 0;
|
||||
Stencil.ZeroCounters();
|
||||
StencilEven.ZeroCounters();
|
||||
StencilOdd.ZeroCounters();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// Implement the general interface. Here we use SAME mass on all slices
|
||||
|
||||
@@ -55,6 +55,16 @@ namespace QCD {
|
||||
FermionField _tmp;
|
||||
FermionField &tmp(void) { return _tmp; }
|
||||
|
||||
////////////////////////////////////////
|
||||
// Performance monitoring
|
||||
////////////////////////////////////////
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopTotalTime;
|
||||
double DhopCalls;
|
||||
double DhopCommTime;
|
||||
double DhopComputeTime;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Implement the abstract base
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
||||
502
lib/qcd/action/fermion/MobiusEOFAFermion.cc
Normal file
502
lib/qcd/action/fermion/MobiusEOFAFermion.cc
Normal file
@@ -0,0 +1,502 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermion.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid_Eigen_Dense.h>
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
template<class Impl>
|
||||
MobiusEOFAFermion<Impl>::MobiusEOFAFermion(
|
||||
GaugeField &_Umu,
|
||||
GridCartesian &FiveDimGrid,
|
||||
GridRedBlackCartesian &FiveDimRedBlackGrid,
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
RealD _mq1, RealD _mq2, RealD _mq3,
|
||||
RealD _shift, int _pm, RealD _M5,
|
||||
RealD _b, RealD _c, const ImplParams &p) :
|
||||
AbstractEOFAFermion<Impl>(_Umu, FiveDimGrid, FiveDimRedBlackGrid,
|
||||
FourDimGrid, FourDimRedBlackGrid, _mq1, _mq2, _mq3,
|
||||
_shift, _pm, _M5, _b, _c, p)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
RealD eps = 1.0;
|
||||
Approx::zolotarev_data *zdata = Approx::higham(eps, this->Ls);
|
||||
assert(zdata->n == this->Ls);
|
||||
|
||||
std::cout << GridLogMessage << "MobiusEOFAFermion (b=" << _b <<
|
||||
",c=" << _c << ") with Ls=" << Ls << std::endl;
|
||||
this->SetCoefficientsTanh(zdata, _b, _c);
|
||||
std::cout << GridLogMessage << "EOFA parameters: (mq1=" << _mq1 <<
|
||||
",mq2=" << _mq2 << ",mq3=" << _mq3 << ",shift=" << _shift <<
|
||||
",pm=" << _pm << ")" << std::endl;
|
||||
|
||||
Approx::zolotarev_free(zdata);
|
||||
|
||||
if(_shift != 0.0){
|
||||
SetCoefficientsPrecondShiftOps();
|
||||
} else {
|
||||
Mooee_shift.resize(Ls, 0.0);
|
||||
MooeeInv_shift_lc.resize(Ls, 0.0);
|
||||
MooeeInv_shift_norm.resize(Ls, 0.0);
|
||||
MooeeInvDag_shift_lc.resize(Ls, 0.0);
|
||||
MooeeInvDag_shift_norm.resize(Ls, 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
/***************************************************************
|
||||
/* Additional EOFA operators only called outside the inverter.
|
||||
/* Since speed is not essential, simple axpby-style
|
||||
/* implementations should be fine.
|
||||
/***************************************************************/
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::Omega(const FermionField& psi, FermionField& Din, int sign, int dag)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
RealD alpha = this->alpha;
|
||||
|
||||
Din = zero;
|
||||
if((sign == 1) && (dag == 0)) { // \Omega_{+}
|
||||
for(int s=0; s<Ls; ++s){
|
||||
axpby_ssp(Din, 0.0, psi, 2.0*std::pow(1.0-alpha,Ls-s-1)/std::pow(1.0+alpha,Ls-s), psi, s, 0);
|
||||
}
|
||||
} else if((sign == -1) && (dag == 0)) { // \Omega_{-}
|
||||
for(int s=0; s<Ls; ++s){
|
||||
axpby_ssp(Din, 0.0, psi, 2.0*std::pow(1.0-alpha,s)/std::pow(1.0+alpha,s+1), psi, s, 0);
|
||||
}
|
||||
} else if((sign == 1 ) && (dag == 1)) { // \Omega_{+}^{\dagger}
|
||||
for(int sp=0; sp<Ls; ++sp){
|
||||
axpby_ssp(Din, 1.0, Din, 2.0*std::pow(1.0-alpha,Ls-sp-1)/std::pow(1.0+alpha,Ls-sp), psi, 0, sp);
|
||||
}
|
||||
} else if((sign == -1) && (dag == 1)) { // \Omega_{-}^{\dagger}
|
||||
for(int sp=0; sp<Ls; ++sp){
|
||||
axpby_ssp(Din, 1.0, Din, 2.0*std::pow(1.0-alpha,sp)/std::pow(1.0+alpha,sp+1), psi, 0, sp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is the operator relating the usual Ddwf to TWQCD's EOFA Dirac operator (arXiv:1706.05843, Eqn. 6).
|
||||
// It also relates the preconditioned and unpreconditioned systems described in Appendix B.2.
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::Dtilde(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
RealD b = 0.5 * ( 1.0 + this->alpha );
|
||||
RealD c = 0.5 * ( 1.0 - this->alpha );
|
||||
RealD mq1 = this->mq1;
|
||||
|
||||
for(int s=0; s<Ls; ++s){
|
||||
if(s == 0) {
|
||||
axpby_ssp_pminus(chi, b, psi, -c, psi, s, s+1);
|
||||
axpby_ssp_pplus (chi, 1.0, chi, mq1*c, psi, s, Ls-1);
|
||||
} else if(s == (Ls-1)) {
|
||||
axpby_ssp_pminus(chi, b, psi, mq1*c, psi, s, 0);
|
||||
axpby_ssp_pplus (chi, 1.0, chi, -c, psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pminus(chi, b, psi, -c, psi, s, s+1);
|
||||
axpby_ssp_pplus (chi, 1.0, chi, -c, psi, s, s-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::DtildeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
RealD m = this->mq1;
|
||||
RealD c = 0.5 * this->alpha;
|
||||
RealD d = 0.5;
|
||||
|
||||
RealD DtInv_p(0.0), DtInv_m(0.0);
|
||||
RealD N = std::pow(c+d,Ls) + m*std::pow(c-d,Ls);
|
||||
FermionField tmp(this->FermionGrid());
|
||||
|
||||
for(int s=0; s<Ls; ++s){
|
||||
for(int sp=0; sp<Ls; ++sp){
|
||||
|
||||
DtInv_p = m * std::pow(-1.0,s-sp+1) * std::pow(c-d,Ls+s-sp) / std::pow(c+d,s-sp+1) / N;
|
||||
DtInv_p += (s < sp) ? 0.0 : std::pow(-1.0,s-sp) * std::pow(c-d,s-sp) / std::pow(c+d,s-sp+1);
|
||||
DtInv_m = m * std::pow(-1.0,sp-s+1) * std::pow(c-d,Ls+sp-s) / std::pow(c+d,sp-s+1) / N;
|
||||
DtInv_m += (s > sp) ? 0.0 : std::pow(-1.0,sp-s) * std::pow(c-d,sp-s) / std::pow(c+d,sp-s+1);
|
||||
|
||||
if(sp == 0){
|
||||
axpby_ssp_pplus (tmp, 0.0, tmp, DtInv_p, psi, s, sp);
|
||||
axpby_ssp_pminus(tmp, 0.0, tmp, DtInv_m, psi, s, sp);
|
||||
} else {
|
||||
axpby_ssp_pplus (tmp, 1.0, tmp, DtInv_p, psi, s, sp);
|
||||
axpby_ssp_pminus(tmp, 1.0, tmp, DtInv_m, psi, s, sp);
|
||||
}
|
||||
|
||||
}}
|
||||
}
|
||||
|
||||
/*****************************************************************************************************/
|
||||
|
||||
template<class Impl>
|
||||
RealD MobiusEOFAFermion<Impl>::M(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField Din(psi._grid);
|
||||
|
||||
this->Meooe5D(psi, Din);
|
||||
this->DW(Din, chi, DaggerNo);
|
||||
axpby(chi, 1.0, 1.0, chi, psi);
|
||||
this->M5D(psi, chi);
|
||||
return(norm2(chi));
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
RealD MobiusEOFAFermion<Impl>::Mdag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField Din(psi._grid);
|
||||
|
||||
this->DW(psi, Din, DaggerYes);
|
||||
this->MeooeDag5D(Din, chi);
|
||||
this->M5Ddag(psi, chi);
|
||||
axpby(chi, 1.0, 1.0, chi, psi);
|
||||
return(norm2(chi));
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
/* Performance critical fermion operators called inside the inverter
|
||||
/********************************************************************/
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
std::vector<Coeff_t> diag(Ls,1.0);
|
||||
std::vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1] = this->mq1;
|
||||
std::vector<Coeff_t> lower(Ls,-1.0); lower[0] = this->mq1;
|
||||
|
||||
// no shift term
|
||||
if(this->shift == 0.0){ this->M5D(psi, chi, chi, lower, diag, upper); }
|
||||
|
||||
// fused M + shift operation
|
||||
else{ this->M5D_shift(psi, chi, chi, lower, diag, upper, Mooee_shift); }
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
std::vector<Coeff_t> diag(Ls,1.0);
|
||||
std::vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1] = this->mq1;
|
||||
std::vector<Coeff_t> lower(Ls,-1.0); lower[0] = this->mq1;
|
||||
|
||||
// no shift term
|
||||
if(this->shift == 0.0){ this->M5Ddag(psi, chi, chi, lower, diag, upper); }
|
||||
|
||||
// fused M + shift operation
|
||||
else{ this->M5Ddag_shift(psi, chi, chi, lower, diag, upper, Mooee_shift); }
|
||||
}
|
||||
|
||||
// half checkerboard operations
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::Mooee(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
// coefficients of Mooee
|
||||
std::vector<Coeff_t> diag = this->bee;
|
||||
std::vector<Coeff_t> upper(Ls);
|
||||
std::vector<Coeff_t> lower(Ls);
|
||||
for(int s=0; s<Ls; s++){
|
||||
upper[s] = -this->cee[s];
|
||||
lower[s] = -this->cee[s];
|
||||
}
|
||||
upper[Ls-1] *= -this->mq1;
|
||||
lower[0] *= -this->mq1;
|
||||
|
||||
// no shift term
|
||||
if(this->shift == 0.0){ this->M5D(psi, psi, chi, lower, diag, upper); }
|
||||
|
||||
// fused M + shift operation
|
||||
else { this->M5D_shift(psi, psi, chi, lower, diag, upper, Mooee_shift); }
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
// coefficients of MooeeDag
|
||||
std::vector<Coeff_t> diag = this->bee;
|
||||
std::vector<Coeff_t> upper(Ls);
|
||||
std::vector<Coeff_t> lower(Ls);
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
upper[s] = -this->cee[s+1];
|
||||
lower[s] = this->mq1*this->cee[Ls-1];
|
||||
} else if(s==(Ls-1)) {
|
||||
upper[s] = this->mq1*this->cee[0];
|
||||
lower[s] = -this->cee[s-1];
|
||||
} else {
|
||||
upper[s] = -this->cee[s+1];
|
||||
lower[s] = -this->cee[s-1];
|
||||
}
|
||||
}
|
||||
|
||||
// no shift term
|
||||
if(this->shift == 0.0){ this->M5Ddag(psi, psi, chi, lower, diag, upper); }
|
||||
|
||||
// fused M + shift operation
|
||||
else{ this->M5Ddag_shift(psi, psi, chi, lower, diag, upper, Mooee_shift); }
|
||||
}
|
||||
|
||||
/****************************************************************************************/
|
||||
|
||||
// Computes coefficients for applying Cayley preconditioned shift operators
|
||||
// (Mooee + \Delta) --> Mooee_shift
|
||||
// (Mooee + \Delta)^{-1} --> MooeeInv_shift_lc, MooeeInv_shift_norm
|
||||
// (Mooee + \Delta)^{-dag} --> MooeeInvDag_shift_lc, MooeeInvDag_shift_norm
|
||||
// For the latter two cases, the operation takes the form
|
||||
// [ (Mooee + \Delta)^{-1} \psi ]_{i} = Mooee_{ij} \psi_{j} +
|
||||
// ( MooeeInv_shift_norm )_{i} ( \sum_{j} [ MooeeInv_shift_lc ]_{j} P_{pm} \psi_{j} )
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::SetCoefficientsPrecondShiftOps()
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int pm = this->pm;
|
||||
RealD alpha = this->alpha;
|
||||
RealD k = this->k;
|
||||
RealD mq1 = this->mq1;
|
||||
RealD shift = this->shift;
|
||||
|
||||
// Initialize
|
||||
Mooee_shift.resize(Ls);
|
||||
MooeeInv_shift_lc.resize(Ls);
|
||||
MooeeInv_shift_norm.resize(Ls);
|
||||
MooeeInvDag_shift_lc.resize(Ls);
|
||||
MooeeInvDag_shift_norm.resize(Ls);
|
||||
|
||||
// Construct Mooee_shift
|
||||
int idx(0);
|
||||
Coeff_t N = ( (pm == 1) ? 1.0 : -1.0 ) * (2.0*shift*k) *
|
||||
( std::pow(alpha+1.0,Ls) + mq1*std::pow(alpha-1.0,Ls) );
|
||||
for(int s=0; s<Ls; ++s){
|
||||
idx = (pm == 1) ? (s) : (Ls-1-s);
|
||||
Mooee_shift[idx] = N * std::pow(-1.0,s) * std::pow(alpha-1.0,s) / std::pow(alpha+1.0,Ls+s+1);
|
||||
}
|
||||
|
||||
// Tridiagonal solve for MooeeInvDag_shift_lc
|
||||
{
|
||||
Coeff_t m(0.0);
|
||||
std::vector<Coeff_t> d = Mooee_shift;
|
||||
std::vector<Coeff_t> u(Ls,0.0);
|
||||
std::vector<Coeff_t> y(Ls,0.0);
|
||||
std::vector<Coeff_t> q(Ls,0.0);
|
||||
if(pm == 1){ u[0] = 1.0; }
|
||||
else{ u[Ls-1] = 1.0; }
|
||||
|
||||
// Tridiagonal matrix algorithm + Sherman-Morrison formula
|
||||
//
|
||||
// We solve
|
||||
// ( Mooee' + u \otimes v ) MooeeInvDag_shift_lc = Mooee_shift
|
||||
// where Mooee' is the tridiagonal part of Mooee_{+}, and
|
||||
// u = (1,0,...,0) and v = (0,...,0,mq1*cee[0]) are chosen
|
||||
// so that the outer-product u \otimes v gives the (0,Ls-1)
|
||||
// entry of Mooee_{+}.
|
||||
//
|
||||
// We do this as two solves: Mooee'*y = d and Mooee'*q = u,
|
||||
// and then construct the solution to the original system
|
||||
// MooeeInvDag_shift_lc = y - <v,y> / ( 1 + <v,q> ) q
|
||||
if(pm == 1){
|
||||
for(int s=1; s<Ls; ++s){
|
||||
m = -this->cee[s] / this->bee[s-1];
|
||||
d[s] -= m*d[s-1];
|
||||
u[s] -= m*u[s-1];
|
||||
}
|
||||
}
|
||||
y[Ls-1] = d[Ls-1] / this->bee[Ls-1];
|
||||
q[Ls-1] = u[Ls-1] / this->bee[Ls-1];
|
||||
for(int s=Ls-2; s>=0; --s){
|
||||
if(pm == 1){
|
||||
y[s] = d[s] / this->bee[s];
|
||||
q[s] = u[s] / this->bee[s];
|
||||
} else {
|
||||
y[s] = ( d[s] + this->cee[s]*y[s+1] ) / this->bee[s];
|
||||
q[s] = ( u[s] + this->cee[s]*q[s+1] ) / this->bee[s];
|
||||
}
|
||||
}
|
||||
|
||||
// Construct MooeeInvDag_shift_lc
|
||||
for(int s=0; s<Ls; ++s){
|
||||
if(pm == 1){
|
||||
MooeeInvDag_shift_lc[s] = y[s] - mq1*this->cee[0]*y[Ls-1] /
|
||||
(1.0+mq1*this->cee[0]*q[Ls-1]) * q[s];
|
||||
} else {
|
||||
MooeeInvDag_shift_lc[s] = y[s] - mq1*this->cee[Ls-1]*y[0] /
|
||||
(1.0+mq1*this->cee[Ls-1]*q[0]) * q[s];
|
||||
}
|
||||
}
|
||||
|
||||
// Compute remaining coefficients
|
||||
N = (pm == 1) ? (1.0 + MooeeInvDag_shift_lc[Ls-1]) : (1.0 + MooeeInvDag_shift_lc[0]);
|
||||
for(int s=0; s<Ls; ++s){
|
||||
|
||||
// MooeeInv_shift_lc
|
||||
if(pm == 1){ MooeeInv_shift_lc[s] = std::pow(this->bee[s],s) * std::pow(this->cee[s],Ls-1-s); }
|
||||
else{ MooeeInv_shift_lc[s] = std::pow(this->bee[s],Ls-1-s) * std::pow(this->cee[s],s); }
|
||||
|
||||
// MooeeInv_shift_norm
|
||||
MooeeInv_shift_norm[s] = -MooeeInvDag_shift_lc[s] /
|
||||
( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N;
|
||||
|
||||
// MooeeInvDag_shift_norm
|
||||
if(pm == 1){ MooeeInvDag_shift_norm[s] = -std::pow(this->bee[s],s) * std::pow(this->cee[s],Ls-1-s) /
|
||||
( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N; }
|
||||
else{ MooeeInvDag_shift_norm[s] = -std::pow(this->bee[s],Ls-1-s) * std::pow(this->cee[s],s) /
|
||||
( std::pow(this->bee[s],Ls) + mq1*std::pow(this->cee[s],Ls) ) / N; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Recompute coefficients for a different value of shift constant
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::RefreshShiftCoefficients(RealD new_shift)
|
||||
{
|
||||
this->shift = new_shift;
|
||||
if(new_shift != 0.0){
|
||||
SetCoefficientsPrecondShiftOps();
|
||||
} else {
|
||||
int Ls = this->Ls;
|
||||
Mooee_shift.resize(Ls,0.0);
|
||||
MooeeInv_shift_lc.resize(Ls,0.0);
|
||||
MooeeInv_shift_norm.resize(Ls,0.0);
|
||||
MooeeInvDag_shift_lc.resize(Ls,0.0);
|
||||
MooeeInvDag_shift_norm.resize(Ls,0.0);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInternalCompute(int dag, int inv,
|
||||
Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
|
||||
GridBase* grid = this->FermionRedBlackGrid();
|
||||
int LLs = grid->_rdimensions[0];
|
||||
|
||||
if(LLs == Ls){ return; } // Not vectorised in 5th direction
|
||||
|
||||
Eigen::MatrixXcd Pplus = Eigen::MatrixXcd::Zero(Ls,Ls);
|
||||
Eigen::MatrixXcd Pminus = Eigen::MatrixXcd::Zero(Ls,Ls);
|
||||
|
||||
for(int s=0; s<Ls; s++){
|
||||
Pplus(s,s) = this->bee[s];
|
||||
Pminus(s,s) = this->bee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pminus(s,s+1) = -this->cee[s];
|
||||
Pplus(s+1,s) = -this->cee[s+1];
|
||||
}
|
||||
|
||||
Pplus (0,Ls-1) = this->mq1*this->cee[0];
|
||||
Pminus(Ls-1,0) = this->mq1*this->cee[Ls-1];
|
||||
|
||||
if(this->shift != 0.0){
|
||||
RealD c = 0.5 * this->alpha;
|
||||
RealD d = 0.5;
|
||||
RealD N = this->shift * this->k * ( std::pow(c+d,Ls) + this->mq1*std::pow(c-d,Ls) );
|
||||
if(this->pm == 1) {
|
||||
for(int s=0; s<Ls; ++s){
|
||||
Pplus(s,Ls-1) += N * std::pow(-1.0,s) * std::pow(c-d,s) / std::pow(c+d,Ls+s+1);
|
||||
}
|
||||
} else {
|
||||
for(int s=0; s<Ls; ++s){
|
||||
Pminus(s,0) += N * std::pow(-1.0,s+1) * std::pow(c-d,Ls-1-s) / std::pow(c+d,2*Ls-s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Eigen::MatrixXcd PplusMat ;
|
||||
Eigen::MatrixXcd PminusMat;
|
||||
|
||||
if(inv) {
|
||||
PplusMat = Pplus.inverse();
|
||||
PminusMat = Pminus.inverse();
|
||||
} else {
|
||||
PplusMat = Pplus;
|
||||
PminusMat = Pminus;
|
||||
}
|
||||
|
||||
if(dag){
|
||||
PplusMat.adjointInPlace();
|
||||
PminusMat.adjointInPlace();
|
||||
}
|
||||
|
||||
typedef typename SiteHalfSpinor::scalar_type scalar_type;
|
||||
const int Nsimd = Simd::Nsimd();
|
||||
Matp.resize(Ls*LLs);
|
||||
Matm.resize(Ls*LLs);
|
||||
|
||||
for(int s2=0; s2<Ls; s2++){
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
int istride = LLs;
|
||||
int ostride = 1;
|
||||
Simd Vp;
|
||||
Simd Vm;
|
||||
scalar_type *sp = (scalar_type*) &Vp;
|
||||
scalar_type *sm = (scalar_type*) &Vm;
|
||||
for(int l=0; l<Nsimd; l++){
|
||||
if(switcheroo<Coeff_t>::iscomplex()) {
|
||||
sp[l] = PplusMat (l*istride+s1*ostride,s2);
|
||||
sm[l] = PminusMat(l*istride+s1*ostride,s2);
|
||||
} else {
|
||||
// if real
|
||||
scalar_type tmp;
|
||||
tmp = PplusMat (l*istride+s1*ostride,s2);
|
||||
sp[l] = scalar_type(tmp.real(),tmp.real());
|
||||
tmp = PminusMat(l*istride+s1*ostride,s2);
|
||||
sm[l] = scalar_type(tmp.real(),tmp.real());
|
||||
}
|
||||
}
|
||||
Matp[LLs*s2+s1] = Vp;
|
||||
Matm[LLs*s2+s1] = Vm;
|
||||
}}
|
||||
}
|
||||
|
||||
FermOpTemplateInstantiate(MobiusEOFAFermion);
|
||||
GparityFermOpTemplateInstantiate(MobiusEOFAFermion);
|
||||
|
||||
}}
|
||||
133
lib/qcd/action/fermion/MobiusEOFAFermion.h
Normal file
133
lib/qcd/action/fermion/MobiusEOFAFermion.h
Normal file
@@ -0,0 +1,133 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermion.h
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef GRID_QCD_MOBIUS_EOFA_FERMION_H
|
||||
#define GRID_QCD_MOBIUS_EOFA_FERMION_H
|
||||
|
||||
#include <Grid/qcd/action/fermion/AbstractEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
template<class Impl>
|
||||
class MobiusEOFAFermion : public AbstractEOFAFermion<Impl>
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
public:
|
||||
// Shift operator coefficients for red-black preconditioned Mobius EOFA
|
||||
std::vector<Coeff_t> Mooee_shift;
|
||||
std::vector<Coeff_t> MooeeInv_shift_lc;
|
||||
std::vector<Coeff_t> MooeeInv_shift_norm;
|
||||
std::vector<Coeff_t> MooeeInvDag_shift_lc;
|
||||
std::vector<Coeff_t> MooeeInvDag_shift_norm;
|
||||
|
||||
virtual void Instantiatable(void) {};
|
||||
|
||||
// EOFA-specific operations
|
||||
virtual void Omega (const FermionField& in, FermionField& out, int sign, int dag);
|
||||
virtual void Dtilde (const FermionField& in, FermionField& out);
|
||||
virtual void DtildeInv (const FermionField& in, FermionField& out);
|
||||
|
||||
// override multiply
|
||||
virtual RealD M (const FermionField& in, FermionField& out);
|
||||
virtual RealD Mdag (const FermionField& in, FermionField& out);
|
||||
|
||||
// half checkerboard operations
|
||||
virtual void Mooee (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeDag (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInv (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInv_shift (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInvDag (const FermionField& in, FermionField& out);
|
||||
virtual void MooeeInvDag_shift(const FermionField& in, FermionField& out);
|
||||
|
||||
virtual void M5D (const FermionField& psi, FermionField& chi);
|
||||
virtual void M5Ddag (const FermionField& psi, FermionField& chi);
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
// Instantiate different versions depending on Impl
|
||||
/////////////////////////////////////////////////////
|
||||
void M5D(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper);
|
||||
|
||||
void M5D_shift(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs);
|
||||
|
||||
void M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper);
|
||||
|
||||
void M5Ddag_shift(const FermionField& psi, const FermionField& phi, FermionField& chi,
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs);
|
||||
|
||||
void MooeeInternal(const FermionField& in, FermionField& out, int dag, int inv);
|
||||
|
||||
void MooeeInternalCompute(int dag, int inv, Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
void MooeeInternalAsm(const FermionField& in, FermionField& out, int LLs, int site,
|
||||
Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
void MooeeInternalZAsm(const FermionField& in, FermionField& out, int LLs, int site,
|
||||
Vector<iSinglet<Simd>>& Matp, Vector<iSinglet<Simd>>& Matm);
|
||||
|
||||
virtual void RefreshShiftCoefficients(RealD new_shift);
|
||||
|
||||
// Constructors
|
||||
MobiusEOFAFermion(GaugeField& _Umu, GridCartesian& FiveDimGrid, GridRedBlackCartesian& FiveDimRedBlackGrid,
|
||||
GridCartesian& FourDimGrid, GridRedBlackCartesian& FourDimRedBlackGrid,
|
||||
RealD _mq1, RealD _mq2, RealD _mq3, RealD _shift, int pm,
|
||||
RealD _M5, RealD _b, RealD _c, const ImplParams& p=ImplParams());
|
||||
|
||||
protected:
|
||||
void SetCoefficientsPrecondShiftOps(void);
|
||||
};
|
||||
}}
|
||||
|
||||
#define INSTANTIATE_DPERP_MOBIUS_EOFA(A)\
|
||||
template void MobiusEOFAFermion<A>::M5D(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper); \
|
||||
template void MobiusEOFAFermion<A>::M5D_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper, std::vector<Coeff_t>& shift_coeffs); \
|
||||
template void MobiusEOFAFermion<A>::M5Ddag(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper); \
|
||||
template void MobiusEOFAFermion<A>::M5Ddag_shift(const FermionField& psi, const FermionField& phi, FermionField& chi, \
|
||||
std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper, std::vector<Coeff_t>& shift_coeffs); \
|
||||
template void MobiusEOFAFermion<A>::MooeeInv(const FermionField& psi, FermionField& chi); \
|
||||
template void MobiusEOFAFermion<A>::MooeeInv_shift(const FermionField& psi, FermionField& chi); \
|
||||
template void MobiusEOFAFermion<A>::MooeeInvDag(const FermionField& psi, FermionField& chi); \
|
||||
template void MobiusEOFAFermion<A>::MooeeInvDag_shift(const FermionField& psi, FermionField& chi);
|
||||
|
||||
#undef MOBIUS_EOFA_DPERP_DENSE
|
||||
#define MOBIUS_EOFA_DPERP_CACHE
|
||||
#undef MOBIUS_EOFA_DPERP_LINALG
|
||||
#define MOBIUS_EOFA_DPERP_VEC
|
||||
|
||||
#endif
|
||||
429
lib/qcd/action/fermion/MobiusEOFAFermioncache.cc
Normal file
429
lib/qcd/action/fermion/MobiusEOFAFermioncache.cc
Normal file
@@ -0,0 +1,429 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermioncache.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
// FIXME -- make a version of these routines with site loop outermost for cache reuse.
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D(const FermionField &psi, const FermionField &phi, FermionField &chi,
|
||||
std::vector<Coeff_t> &lower, std::vector<Coeff_t> &diag, std::vector<Coeff_t> &upper)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
GridBase *grid = psi._grid;
|
||||
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
for(int s=0; s<Ls; s++){
|
||||
auto tmp = psi._odata[0];
|
||||
if(s==0){
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5m(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D_shift(const FermionField &psi, const FermionField &phi, FermionField &chi,
|
||||
std::vector<Coeff_t> &lower, std::vector<Coeff_t> &diag, std::vector<Coeff_t> &upper,
|
||||
std::vector<Coeff_t> &shift_coeffs)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int shift_s = (this->pm == 1) ? (Ls-1) : 0; // s-component modified by shift operator
|
||||
GridBase *grid = psi._grid;
|
||||
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
for(int s=0; s<Ls; s++){
|
||||
auto tmp = psi._odata[0];
|
||||
if(s==0){
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5m(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5m(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5p(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
if(this->pm == 1){ spProj5p(tmp, psi._odata[ss+shift_s]); }
|
||||
else{ spProj5m(tmp, psi._odata[ss+shift_s]); }
|
||||
chi[ss+s] = chi[ss+s] + shift_coeffs[s]*tmp;
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag(const FermionField &psi, const FermionField &phi, FermionField &chi,
|
||||
std::vector<Coeff_t> &lower, std::vector<Coeff_t> &diag, std::vector<Coeff_t> &upper)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
GridBase *grid = psi._grid;
|
||||
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
auto tmp = psi._odata[0];
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5p(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag_shift(const FermionField &psi, const FermionField &phi, FermionField &chi,
|
||||
std::vector<Coeff_t> &lower, std::vector<Coeff_t> &diag, std::vector<Coeff_t> &upper,
|
||||
std::vector<Coeff_t> &shift_coeffs)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int shift_s = (this->pm == 1) ? (Ls-1) : 0; // s-component modified by shift operator
|
||||
GridBase *grid = psi._grid;
|
||||
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// Flops = 6.0*(Nc*Ns) *Ls*vol
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
chi[ss+Ls-1] = zero;
|
||||
auto tmp = psi._odata[0];
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+Ls-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else if(s==(Ls-1)) {
|
||||
spProj5p(tmp, psi._odata[ss+0]);
|
||||
chi[ss+s] = chi[ss+s] + diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
} else {
|
||||
spProj5p(tmp, psi._odata[ss+s+1]);
|
||||
chi[ss+s] = diag[s]*phi[ss+s] + upper[s]*tmp;
|
||||
spProj5m(tmp, psi._odata[ss+s-1]);
|
||||
chi[ss+s] = chi[ss+s] + lower[s]*tmp;
|
||||
}
|
||||
if(this->pm == 1){ spProj5p(tmp, psi._odata[ss+s]); }
|
||||
else{ spProj5m(tmp, psi._odata[ss+s]); }
|
||||
chi[ss+shift_s] = chi[ss+shift_s] + shift_coeffs[s]*tmp;
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
if(this->shift != 0.0){ MooeeInv_shift(psi,chi); return; }
|
||||
|
||||
GridBase *grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
|
||||
auto tmp = psi._odata[0];
|
||||
|
||||
// Apply (L^{\prime})^{-1}
|
||||
chi[ss] = psi[ss]; // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5p(tmp, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - this->lee[s-1]*tmp;
|
||||
}
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
spProj5m(tmp, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - this->leem[s]*tmp;
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[s] + 1/d chi[s]
|
||||
spProj5p(tmp, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/this->dee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls-1])*tmp;
|
||||
}
|
||||
chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1];
|
||||
|
||||
// Apply U^{-1}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
spProj5m(tmp, chi[ss+s+1]);
|
||||
chi[ss+s] = chi[ss+s] - this->uee[s]*tmp;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv_shift(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
GridBase *grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
|
||||
auto tmp1 = psi._odata[0];
|
||||
auto tmp2 = psi._odata[0];
|
||||
auto tmp2_spProj = psi._odata[0];
|
||||
|
||||
// Apply (L^{\prime})^{-1} and accumulate MooeeInv_shift_lc[j]*psi[j] in tmp2
|
||||
chi[ss] = psi[ss]; // chi[0]=psi[0]
|
||||
tmp2 = MooeeInv_shift_lc[0]*psi[ss];
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5p(tmp1, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - this->lee[s-1]*tmp1;
|
||||
tmp2 = tmp2 + MooeeInv_shift_lc[s]*psi[ss+s];
|
||||
}
|
||||
if(this->pm == 1){ spProj5p(tmp2_spProj, tmp2);}
|
||||
else{ spProj5m(tmp2_spProj, tmp2); }
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
spProj5m(tmp1, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - this->leem[s]*tmp1;
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[s] + 1/d chi[s]
|
||||
spProj5p(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/this->dee[s])*chi[ss+s] - (this->ueem[s]/this->dee[Ls-1])*tmp1;
|
||||
}
|
||||
// chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1] + MooeeInv_shift_norm[Ls-1]*tmp2_spProj;
|
||||
chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1];
|
||||
spProj5m(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] + MooeeInv_shift_norm[Ls-1]*tmp2_spProj;
|
||||
|
||||
// Apply U^{-1} and add shift term
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
chi[ss+s] = chi[ss+s] - this->uee[s]*tmp1;
|
||||
spProj5m(tmp1, chi[ss+s]);
|
||||
chi[ss+s] = chi[ss+s] + MooeeInv_shift_norm[s]*tmp2_spProj;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
if(this->shift != 0.0){ MooeeInvDag_shift(psi,chi); return; }
|
||||
|
||||
GridBase *grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
|
||||
auto tmp = psi._odata[0];
|
||||
|
||||
// Apply (U^{\prime})^{-dag}
|
||||
chi[ss] = psi[ss];
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5m(tmp, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - this->uee[s-1]*tmp;
|
||||
}
|
||||
|
||||
// U_m^{-\dag}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5p(tmp, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - this->ueem[s]*tmp;
|
||||
}
|
||||
|
||||
// L_m^{-\dag} D^{-dag}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5m(tmp, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/this->dee[s])*chi[ss+s] - (this->leem[s]/this->dee[Ls-1])*tmp;
|
||||
}
|
||||
chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1];
|
||||
|
||||
// Apply L^{-dag}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
spProj5p(tmp, chi[ss+s+1]);
|
||||
chi[ss+s] = chi[ss+s] - this->lee[s]*tmp;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag_shift(const FermionField &psi, FermionField &chi)
|
||||
{
|
||||
GridBase *grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=Ls){
|
||||
|
||||
auto tmp1 = psi._odata[0];
|
||||
auto tmp2 = psi._odata[0];
|
||||
auto tmp2_spProj = psi._odata[0];
|
||||
|
||||
// Apply (U^{\prime})^{-dag} and accumulate MooeeInvDag_shift_lc[j]*psi[j] in tmp2
|
||||
chi[ss] = psi[ss];
|
||||
tmp2 = MooeeInvDag_shift_lc[0]*psi[ss];
|
||||
for(int s=1; s<Ls; s++){
|
||||
spProj5m(tmp1, chi[ss+s-1]);
|
||||
chi[ss+s] = psi[ss+s] - this->uee[s-1]*tmp1;
|
||||
tmp2 = tmp2 + MooeeInvDag_shift_lc[s]*psi[ss+s];
|
||||
}
|
||||
if(this->pm == 1){ spProj5p(tmp2_spProj, tmp2);}
|
||||
else{ spProj5m(tmp2_spProj, tmp2); }
|
||||
|
||||
// U_m^{-\dag}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5p(tmp1, chi[ss+s]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] - this->ueem[s]*tmp1;
|
||||
}
|
||||
|
||||
// L_m^{-\dag} D^{-dag}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
spProj5m(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+s] = (1.0/this->dee[s])*chi[ss+s] - (this->leem[s]/this->dee[Ls-1])*tmp1;
|
||||
}
|
||||
chi[ss+Ls-1] = (1.0/this->dee[Ls-1])*chi[ss+Ls-1];
|
||||
spProj5p(tmp1, chi[ss+Ls-1]);
|
||||
chi[ss+Ls-1] = chi[ss+Ls-1] + MooeeInvDag_shift_norm[Ls-1]*tmp2_spProj;
|
||||
|
||||
// Apply L^{-dag}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
chi[ss+s] = chi[ss+s] - this->lee[s]*tmp1;
|
||||
spProj5p(tmp1, chi[ss+s]);
|
||||
chi[ss+s] = chi[ss+s] + MooeeInvDag_shift_norm[s]*tmp2_spProj;
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
#ifdef MOBIUS_EOFA_DPERP_CACHE
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplD);
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
184
lib/qcd/action/fermion/MobiusEOFAFermiondense.cc
Normal file
184
lib/qcd/action/fermion/MobiusEOFAFermiondense.cc
Normal file
@@ -0,0 +1,184 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermiondense.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid_Eigen_Dense.h>
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
/*
|
||||
* Dense matrix versions of routines
|
||||
*/
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int LLs = psi._grid->_rdimensions[0];
|
||||
int vol = psi._grid->oSites()/LLs;
|
||||
|
||||
int pm = this->pm;
|
||||
RealD shift = this->shift;
|
||||
RealD alpha = this->alpha;
|
||||
RealD k = this->k;
|
||||
RealD mq1 = this->mq1;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
assert(Ls==LLs);
|
||||
|
||||
Eigen::MatrixXd Pplus = Eigen::MatrixXd::Zero(Ls,Ls);
|
||||
Eigen::MatrixXd Pminus = Eigen::MatrixXd::Zero(Ls,Ls);
|
||||
|
||||
for(int s=0;s<Ls;s++){
|
||||
Pplus(s,s) = this->bee[s];
|
||||
Pminus(s,s) = this->bee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pminus(s,s+1) = -this->cee[s];
|
||||
}
|
||||
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
Pplus(s+1,s) = -this->cee[s+1];
|
||||
}
|
||||
Pplus (0,Ls-1) = mq1*this->cee[0];
|
||||
Pminus(Ls-1,0) = mq1*this->cee[Ls-1];
|
||||
|
||||
if(shift != 0.0){
|
||||
Coeff_t N = 2.0 * ( std::pow(alpha+1.0,Ls) + mq1*std::pow(alpha-1.0,Ls) );
|
||||
for(int s=0; s<Ls; ++s){
|
||||
if(pm == 1){ Pplus(s,Ls-1) += shift * k * N * std::pow(-1.0,s) * std::pow(alpha-1.0,s) / std::pow(alpha+1.0,Ls+s+1); }
|
||||
else{ Pminus(Ls-1-s,Ls-1) -= shift * k * N * std::pow(-1.0,s) * std::pow(alpha-1.0,s) / std::pow(alpha+1.0,Ls+s+1); }
|
||||
}
|
||||
}
|
||||
|
||||
Eigen::MatrixXd PplusMat ;
|
||||
Eigen::MatrixXd PminusMat;
|
||||
|
||||
if(inv){
|
||||
PplusMat = Pplus.inverse();
|
||||
PminusMat = Pminus.inverse();
|
||||
} else {
|
||||
PplusMat = Pplus;
|
||||
PminusMat = Pminus;
|
||||
}
|
||||
|
||||
if(dag){
|
||||
PplusMat.adjointInPlace();
|
||||
PminusMat.adjointInPlace();
|
||||
}
|
||||
|
||||
// For the non-vectorised s-direction this is simple
|
||||
|
||||
for(auto site=0; site<vol; site++){
|
||||
|
||||
SiteSpinor SiteChi;
|
||||
SiteHalfSpinor SitePplus;
|
||||
SiteHalfSpinor SitePminus;
|
||||
|
||||
for(int s1=0; s1<Ls; s1++){
|
||||
SiteChi = zero;
|
||||
for(int s2=0; s2<Ls; s2++){
|
||||
int lex2 = s2 + Ls*site;
|
||||
if(PplusMat(s1,s2) != 0.0){
|
||||
spProj5p(SitePplus,psi[lex2]);
|
||||
accumRecon5p(SiteChi, PplusMat(s1,s2)*SitePplus);
|
||||
}
|
||||
if(PminusMat(s1,s2) != 0.0){
|
||||
spProj5m(SitePminus, psi[lex2]);
|
||||
accumRecon5m(SiteChi, PminusMat(s1,s2)*SitePminus);
|
||||
}
|
||||
}
|
||||
chi[s1+Ls*site] = SiteChi*0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef MOBIUS_EOFA_DPERP_DENSE
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplD);
|
||||
|
||||
template void MobiusEOFAFermion<GparityWilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<GparityWilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<WilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<WilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZWilsonImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZWilsonImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF);
|
||||
|
||||
template void MobiusEOFAFermion<GparityWilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<GparityWilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<WilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<WilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZWilsonImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZWilsonImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
290
lib/qcd/action/fermion/MobiusEOFAFermionssp.cc
Normal file
290
lib/qcd/action/fermion/MobiusEOFAFermionssp.cc
Normal file
@@ -0,0 +1,290 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermionssp.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
// FIXME -- make a version of these routines with site loop outermost for cache reuse.
|
||||
// Pminus fowards
|
||||
// Pplus backwards
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D_shift(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pplus (chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pminus(chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pplus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, s, Ls-1); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, s, 0); }
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag_shift(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(s==0) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, Ls-1);
|
||||
} else if (s==(Ls-1)) {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, 0);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
} else {
|
||||
axpby_ssp_pplus (chi, diag[s], phi, upper[s], psi, s, s+1);
|
||||
axpby_ssp_pminus(chi, one, chi, lower[s], psi, s, s-1);
|
||||
}
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, Ls-1, s); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, 0, s); }
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
if(this->shift != 0.0){ MooeeInv_shift(psi,chi); return; }
|
||||
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
// Apply (L^{\prime})^{-1}
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pplus(chi, one, psi, -this->lee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
||||
}
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
axpby_ssp_pminus(chi, one, chi, -this->leem[s], chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one/this->dee[s], chi, -this->ueem[s]/this->dee[Ls-1], chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp(chi, one/this->dee[Ls-1], chi, czero, chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply U^{-1}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls]
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
// Apply (L^{\prime})^{-1}
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
axpby_ssp(tmp, czero, tmp, this->MooeeInv_shift_lc[0], psi, 0, 0);
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pplus(chi, one, psi, -this->lee[s-1], chi, s, s-1);// recursion Psi[s] -lee P_+ chi[s-1]
|
||||
axpby_ssp(tmp, one, tmp, this->MooeeInv_shift_lc[s], psi, 0, s);
|
||||
}
|
||||
|
||||
// L_m^{-1}
|
||||
for(int s=0; s<Ls-1; s++){ // Chi[ee] = 1 - sum[s<Ls-1] -leem[s]P_- chi
|
||||
axpby_ssp_pminus(chi, one, chi, -this->leem[s], chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// U_m^{-1} D^{-1}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one/this->dee[s], chi, -this->ueem[s]/this->dee[Ls-1], chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp(chi, one/this->dee[Ls-1], chi, czero, chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply U^{-1} and add shift term
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInv_shift_norm[Ls-1], tmp, Ls-1, 0); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInv_shift_norm[Ls-1], tmp, Ls-1, 0); }
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pminus(chi, one, chi, -this->uee[s], chi, s, s+1); // chi[Ls]
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInv_shift_norm[s], tmp, s, 0); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInv_shift_norm[s], tmp, s, 0); }
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
if(this->shift != 0.0){ MooeeInvDag_shift(psi,chi); return; }
|
||||
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
// Apply (U^{\prime})^{-dagger}
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pminus(chi, one, psi, -conjugate(this->uee[s-1]), chi, s, s-1);
|
||||
}
|
||||
|
||||
// U_m^{-\dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->ueem[s]), chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// L_m^{-\dagger} D^{-dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pminus(chi, one/conjugate(this->dee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp(chi, one/conjugate(this->dee[Ls-1]), chi, czero, chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply L^{-dagger}
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls]
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
Coeff_t one(1.0);
|
||||
Coeff_t czero(0.0);
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
int Ls = this->Ls;
|
||||
|
||||
FermionField tmp(psi._grid);
|
||||
|
||||
// Apply (U^{\prime})^{-dagger} and accumulate (MooeeInvDag_shift_lc)_{j} \psi_{j} in tmp[0]
|
||||
axpby_ssp(chi, one, psi, czero, psi, 0, 0); // chi[0]=psi[0]
|
||||
axpby_ssp(tmp, czero, tmp, this->MooeeInvDag_shift_lc[0], psi, 0, 0);
|
||||
for(int s=1; s<Ls; s++){
|
||||
axpby_ssp_pminus(chi, one, psi, -conjugate(this->uee[s-1]), chi, s, s-1);
|
||||
axpby_ssp(tmp, one, tmp, this->MooeeInvDag_shift_lc[s], psi, 0, s);
|
||||
}
|
||||
|
||||
// U_m^{-\dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->ueem[s]), chi, Ls-1, s);
|
||||
}
|
||||
|
||||
// L_m^{-\dagger} D^{-dagger}
|
||||
for(int s=0; s<Ls-1; s++){
|
||||
axpby_ssp_pminus(chi, one/conjugate(this->dee[s]), chi, -conjugate(this->leem[s]/this->dee[Ls-1]), chi, s, Ls-1);
|
||||
}
|
||||
axpby_ssp(chi, one/conjugate(this->dee[Ls-1]), chi, czero, chi, Ls-1, Ls-1);
|
||||
|
||||
// Apply L^{-dagger} and add shift
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInvDag_shift_norm[Ls-1], tmp, Ls-1, 0); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInvDag_shift_norm[Ls-1], tmp, Ls-1, 0); }
|
||||
for(int s=Ls-2; s>=0; s--){
|
||||
axpby_ssp_pplus(chi, one, chi, -conjugate(this->lee[s]), chi, s, s+1); // chi[Ls]
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, this->MooeeInvDag_shift_norm[s], tmp, s, 0); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, this->MooeeInvDag_shift_norm[s], tmp, s, 0); }
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef MOBIUS_EOFA_DPERP_LINALG
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplD);
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(WilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(GparityWilsonImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZWilsonImplDF);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
983
lib/qcd/action/fermion/MobiusEOFAFermionvec.cc
Normal file
983
lib/qcd/action/fermion/MobiusEOFAFermionvec.cc
Normal file
@@ -0,0 +1,983 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/MobiusEOFAFermionvec.cc
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||
#include <Grid/qcd/action/fermion/MobiusEOFAFermion.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
/*
|
||||
* Dense matrix versions of routines
|
||||
*/
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInv_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerNo, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInvDag_shift(const FermionField& psi, FermionField& chi)
|
||||
{
|
||||
this->MooeeInternal(psi, chi, DaggerYes, InverseYes);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
const int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd>> u(LLs);
|
||||
Vector<iSinglet<Simd>> l(LLs);
|
||||
Vector<iSinglet<Simd>> d(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
|
||||
for(int o=0; o<LLs; o++){ // outer
|
||||
for(int i=0; i<nsimd; i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
assert(Nc == 3);
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
#if 0
|
||||
|
||||
alignas(64) SiteHalfSpinor hp;
|
||||
alignas(64) SiteHalfSpinor hm;
|
||||
alignas(64) SiteSpinor fp;
|
||||
alignas(64) SiteSpinor fm;
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
int vp = (v+1)%LLs;
|
||||
int vm = (v+LLs-1)%LLs;
|
||||
|
||||
spProj5m(hp, psi[ss+vp]);
|
||||
spProj5p(hm, psi[ss+vm]);
|
||||
|
||||
if (vp <= v){ rotate(hp, hp, 1); }
|
||||
if (vm >= v){ rotate(hm, hm, nsimd-1); }
|
||||
|
||||
hp = 0.5*hp;
|
||||
hm = 0.5*hm;
|
||||
|
||||
spRecon5m(fp, hp);
|
||||
spRecon5p(fm, hm);
|
||||
|
||||
chi[ss+v] = d[v]*phi[ss+v];
|
||||
chi[ss+v] = chi[ss+v] + u[v]*fp;
|
||||
chi[ss+v] = chi[ss+v] + l[v]*fm;
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v == LLs-1) ? 0 : v+1;
|
||||
int vm = (v == 0) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(2)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(2)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(2)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(3)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(3)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(3)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(0)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(0)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(0)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(1)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(1)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(1)(2);
|
||||
|
||||
if(vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
// Can force these to real arithmetic and save 2x.
|
||||
Simd p_00 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00);
|
||||
Simd p_01 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01);
|
||||
Simd p_02 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02);
|
||||
Simd p_10 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10);
|
||||
Simd p_11 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11);
|
||||
Simd p_12 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12);
|
||||
Simd p_20 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_21 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_22 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_30 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_31 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_32 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5D_shift(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs)
|
||||
{
|
||||
#if 0
|
||||
|
||||
this->M5D(psi, phi, chi, lower, diag, upper);
|
||||
|
||||
// FIXME: possible gain from vectorizing shift operation as well?
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, s, Ls-1); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, s, 0); }
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
const int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd>> u(LLs);
|
||||
Vector<iSinglet<Simd>> l(LLs);
|
||||
Vector<iSinglet<Simd>> d(LLs);
|
||||
Vector<iSinglet<Simd>> s(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
scalar_type* s_p = (scalar_type*) &s[0];
|
||||
|
||||
for(int o=0; o<LLs; o++){ // outer
|
||||
for(int i=0; i<nsimd; i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
s_p[ss] = shift_coeffs[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
assert(Nc == 3);
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
int vs = (this->pm == 1) ? LLs-1 : 0;
|
||||
Simd hs_00 = (this->pm == 1) ? psi[ss+vs]()(2)(0) : psi[ss+vs]()(0)(0);
|
||||
Simd hs_01 = (this->pm == 1) ? psi[ss+vs]()(2)(1) : psi[ss+vs]()(0)(1);
|
||||
Simd hs_02 = (this->pm == 1) ? psi[ss+vs]()(2)(2) : psi[ss+vs]()(0)(2);
|
||||
Simd hs_10 = (this->pm == 1) ? psi[ss+vs]()(3)(0) : psi[ss+vs]()(1)(0);
|
||||
Simd hs_11 = (this->pm == 1) ? psi[ss+vs]()(3)(1) : psi[ss+vs]()(1)(1);
|
||||
Simd hs_12 = (this->pm == 1) ? psi[ss+vs]()(3)(2) : psi[ss+vs]()(1)(2);
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v == LLs-1) ? 0 : v+1;
|
||||
int vm = (v == 0) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(2)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(2)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(2)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(3)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(3)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(3)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(0)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(0)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(0)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(1)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(1)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(1)(2);
|
||||
|
||||
if(vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(this->pm == 1 && vs <= v){
|
||||
hs_00.v = Optimization::Rotate::tRotate<2>(hs_00.v);
|
||||
hs_01.v = Optimization::Rotate::tRotate<2>(hs_01.v);
|
||||
hs_02.v = Optimization::Rotate::tRotate<2>(hs_02.v);
|
||||
hs_10.v = Optimization::Rotate::tRotate<2>(hs_10.v);
|
||||
hs_11.v = Optimization::Rotate::tRotate<2>(hs_11.v);
|
||||
hs_12.v = Optimization::Rotate::tRotate<2>(hs_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
if(this->pm == -1 && vs >= v){
|
||||
hs_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_00.v);
|
||||
hs_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_01.v);
|
||||
hs_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_02.v);
|
||||
hs_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_10.v);
|
||||
hs_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_11.v);
|
||||
hs_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_12.v);
|
||||
}
|
||||
|
||||
// Can force these to real arithmetic and save 2x.
|
||||
Simd p_00 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_00);
|
||||
Simd p_01 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_01);
|
||||
Simd p_02 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_02);
|
||||
Simd p_10 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_10);
|
||||
Simd p_11 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_11);
|
||||
Simd p_12 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_12);
|
||||
Simd p_20 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_00)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_21 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_01)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_22 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_02)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_30 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_10)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_31 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_11)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_32 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_12)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
}
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper)
|
||||
{
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd>> u(LLs);
|
||||
Vector<iSinglet<Simd>> l(LLs);
|
||||
Vector<iSinglet<Simd>> d(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
|
||||
for(int o=0; o<LLs; o++){ // outer
|
||||
for(int i=0; i<nsimd; i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
#if 0
|
||||
|
||||
alignas(64) SiteHalfSpinor hp;
|
||||
alignas(64) SiteHalfSpinor hm;
|
||||
alignas(64) SiteSpinor fp;
|
||||
alignas(64) SiteSpinor fm;
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
int vp = (v+1)%LLs;
|
||||
int vm = (v+LLs-1)%LLs;
|
||||
|
||||
spProj5p(hp, psi[ss+vp]);
|
||||
spProj5m(hm, psi[ss+vm]);
|
||||
|
||||
if(vp <= v){ rotate(hp, hp, 1); }
|
||||
if(vm >= v){ rotate(hm, hm, nsimd-1); }
|
||||
|
||||
hp = hp*0.5;
|
||||
hm = hm*0.5;
|
||||
spRecon5p(fp, hp);
|
||||
spRecon5m(fm, hm);
|
||||
|
||||
chi[ss+v] = d[v]*phi[ss+v]+u[v]*fp;
|
||||
chi[ss+v] = chi[ss+v] +l[v]*fm;
|
||||
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v == LLs-1) ? 0 : v+1;
|
||||
int vm = (v == 0 ) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(0)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(0)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(0)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(1)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(1)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(1)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(2)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(2)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(2)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(3)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(3)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(3)(2);
|
||||
|
||||
if (vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
Simd p_00 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_01 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_02 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_10 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_11 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_12 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
Simd p_20 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00);
|
||||
Simd p_21 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01);
|
||||
Simd p_22 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02);
|
||||
Simd p_30 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10);
|
||||
Simd p_31 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11);
|
||||
Simd p_32 = switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::M5Ddag_shift(const FermionField& psi, const FermionField& phi,
|
||||
FermionField& chi, std::vector<Coeff_t>& lower, std::vector<Coeff_t>& diag, std::vector<Coeff_t>& upper,
|
||||
std::vector<Coeff_t>& shift_coeffs)
|
||||
{
|
||||
#if 0
|
||||
|
||||
this->M5Ddag(psi, phi, chi, lower, diag, upper);
|
||||
|
||||
// FIXME: possible gain from vectorizing shift operation as well?
|
||||
Coeff_t one(1.0);
|
||||
int Ls = this->Ls;
|
||||
for(int s=0; s<Ls; s++){
|
||||
if(this->pm == 1){ axpby_ssp_pplus(chi, one, chi, shift_coeffs[s], psi, Ls-1, s); }
|
||||
else{ axpby_ssp_pminus(chi, one, chi, shift_coeffs[s], psi, 0, s); }
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
GridBase* grid = psi._grid;
|
||||
int Ls = this->Ls;
|
||||
int LLs = grid->_rdimensions[0];
|
||||
int nsimd = Simd::Nsimd();
|
||||
|
||||
Vector<iSinglet<Simd>> u(LLs);
|
||||
Vector<iSinglet<Simd>> l(LLs);
|
||||
Vector<iSinglet<Simd>> d(LLs);
|
||||
Vector<iSinglet<Simd>> s(LLs);
|
||||
|
||||
assert(Ls/LLs == nsimd);
|
||||
assert(phi.checkerboard == psi.checkerboard);
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
// just directly address via type pun
|
||||
typedef typename Simd::scalar_type scalar_type;
|
||||
scalar_type* u_p = (scalar_type*) &u[0];
|
||||
scalar_type* l_p = (scalar_type*) &l[0];
|
||||
scalar_type* d_p = (scalar_type*) &d[0];
|
||||
scalar_type* s_p = (scalar_type*) &s[0];
|
||||
|
||||
for(int o=0; o<LLs; o++){ // outer
|
||||
for(int i=0; i<nsimd; i++){ //inner
|
||||
int s = o + i*LLs;
|
||||
int ss = o*nsimd + i;
|
||||
u_p[ss] = upper[s];
|
||||
l_p[ss] = lower[s];
|
||||
d_p[ss] = diag[s];
|
||||
s_p[ss] = shift_coeffs[s];
|
||||
}}
|
||||
|
||||
this->M5Dcalls++;
|
||||
this->M5Dtime -= usecond();
|
||||
|
||||
parallel_for(int ss=0; ss<grid->oSites(); ss+=LLs){ // adds LLs
|
||||
|
||||
int vs = (this->pm == 1) ? LLs-1 : 0;
|
||||
Simd hs_00 = (this->pm == 1) ? psi[ss+vs]()(0)(0) : psi[ss+vs]()(2)(0);
|
||||
Simd hs_01 = (this->pm == 1) ? psi[ss+vs]()(0)(1) : psi[ss+vs]()(2)(1);
|
||||
Simd hs_02 = (this->pm == 1) ? psi[ss+vs]()(0)(2) : psi[ss+vs]()(2)(2);
|
||||
Simd hs_10 = (this->pm == 1) ? psi[ss+vs]()(1)(0) : psi[ss+vs]()(3)(0);
|
||||
Simd hs_11 = (this->pm == 1) ? psi[ss+vs]()(1)(1) : psi[ss+vs]()(3)(1);
|
||||
Simd hs_12 = (this->pm == 1) ? psi[ss+vs]()(1)(2) : psi[ss+vs]()(3)(2);
|
||||
|
||||
for(int v=0; v<LLs; v++){
|
||||
|
||||
vprefetch(psi[ss+v+LLs]);
|
||||
|
||||
int vp = (v == LLs-1) ? 0 : v+1;
|
||||
int vm = (v == 0 ) ? LLs-1 : v-1;
|
||||
|
||||
Simd hp_00 = psi[ss+vp]()(0)(0);
|
||||
Simd hp_01 = psi[ss+vp]()(0)(1);
|
||||
Simd hp_02 = psi[ss+vp]()(0)(2);
|
||||
Simd hp_10 = psi[ss+vp]()(1)(0);
|
||||
Simd hp_11 = psi[ss+vp]()(1)(1);
|
||||
Simd hp_12 = psi[ss+vp]()(1)(2);
|
||||
|
||||
Simd hm_00 = psi[ss+vm]()(2)(0);
|
||||
Simd hm_01 = psi[ss+vm]()(2)(1);
|
||||
Simd hm_02 = psi[ss+vm]()(2)(2);
|
||||
Simd hm_10 = psi[ss+vm]()(3)(0);
|
||||
Simd hm_11 = psi[ss+vm]()(3)(1);
|
||||
Simd hm_12 = psi[ss+vm]()(3)(2);
|
||||
|
||||
if (vp <= v){
|
||||
hp_00.v = Optimization::Rotate::tRotate<2>(hp_00.v);
|
||||
hp_01.v = Optimization::Rotate::tRotate<2>(hp_01.v);
|
||||
hp_02.v = Optimization::Rotate::tRotate<2>(hp_02.v);
|
||||
hp_10.v = Optimization::Rotate::tRotate<2>(hp_10.v);
|
||||
hp_11.v = Optimization::Rotate::tRotate<2>(hp_11.v);
|
||||
hp_12.v = Optimization::Rotate::tRotate<2>(hp_12.v);
|
||||
}
|
||||
|
||||
if(this->pm == 1 && vs <= v){
|
||||
hs_00.v = Optimization::Rotate::tRotate<2>(hs_00.v);
|
||||
hs_01.v = Optimization::Rotate::tRotate<2>(hs_01.v);
|
||||
hs_02.v = Optimization::Rotate::tRotate<2>(hs_02.v);
|
||||
hs_10.v = Optimization::Rotate::tRotate<2>(hs_10.v);
|
||||
hs_11.v = Optimization::Rotate::tRotate<2>(hs_11.v);
|
||||
hs_12.v = Optimization::Rotate::tRotate<2>(hs_12.v);
|
||||
}
|
||||
|
||||
if(vm >= v){
|
||||
hm_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_00.v);
|
||||
hm_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_01.v);
|
||||
hm_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_02.v);
|
||||
hm_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_10.v);
|
||||
hm_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_11.v);
|
||||
hm_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hm_12.v);
|
||||
}
|
||||
|
||||
if(this->pm == -1 && vs >= v){
|
||||
hs_00.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_00.v);
|
||||
hs_01.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_01.v);
|
||||
hs_02.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_02.v);
|
||||
hs_10.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_10.v);
|
||||
hs_11.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_11.v);
|
||||
hs_12.v = Optimization::Rotate::tRotate<2*Simd::Nsimd()-2>(hs_12.v);
|
||||
}
|
||||
|
||||
Simd p_00 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_00)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_00);
|
||||
Simd p_01 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_01)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_01);
|
||||
Simd p_02 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_02)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(0)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_02);
|
||||
Simd p_10 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_10)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(0)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_10);
|
||||
Simd p_11 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_11)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(1)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_11);
|
||||
Simd p_12 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_12)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(1)(2)) + switcheroo<Coeff_t>::mult(u[v]()()(), hp_12);
|
||||
Simd p_20 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_00)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_00);
|
||||
Simd p_21 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_01)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_01);
|
||||
Simd p_22 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(2)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_02)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_02);
|
||||
Simd p_30 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(0)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_10)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_10);
|
||||
Simd p_31 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(1)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_11)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_11);
|
||||
Simd p_32 = (this->pm == 1) ? switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12)
|
||||
: switcheroo<Coeff_t>::mult(d[v]()()(), phi[ss+v]()(3)(2)) + switcheroo<Coeff_t>::mult(l[v]()()(), hm_12)
|
||||
+ switcheroo<Coeff_t>::mult(s[v]()()(), hs_12);
|
||||
|
||||
vstream(chi[ss+v]()(0)(0), p_00);
|
||||
vstream(chi[ss+v]()(0)(1), p_01);
|
||||
vstream(chi[ss+v]()(0)(2), p_02);
|
||||
vstream(chi[ss+v]()(1)(0), p_10);
|
||||
vstream(chi[ss+v]()(1)(1), p_11);
|
||||
vstream(chi[ss+v]()(1)(2), p_12);
|
||||
vstream(chi[ss+v]()(2)(0), p_20);
|
||||
vstream(chi[ss+v]()(2)(1), p_21);
|
||||
vstream(chi[ss+v]()(2)(2), p_22);
|
||||
vstream(chi[ss+v]()(3)(0), p_30);
|
||||
vstream(chi[ss+v]()(3)(1), p_31);
|
||||
vstream(chi[ss+v]()(3)(2), p_32);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
this->M5Dtime += usecond();
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef AVX512
|
||||
#include<simd/Intel512common.h>
|
||||
#include<simd/Intel512avx.h>
|
||||
#include<simd/Intel512single.h>
|
||||
#endif
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInternalAsm(const FermionField& psi, FermionField& chi,
|
||||
int LLs, int site, Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
#ifndef AVX512
|
||||
{
|
||||
SiteHalfSpinor BcastP;
|
||||
SiteHalfSpinor BcastM;
|
||||
SiteHalfSpinor SiteChiP;
|
||||
SiteHalfSpinor SiteChiM;
|
||||
|
||||
// Ls*Ls * 2 * 12 * vol flops
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
|
||||
for(int s2=0; s2<LLs; s2++){
|
||||
for(int l=0; l < Simd::Nsimd(); l++){ // simd lane
|
||||
|
||||
int s = s2 + l*LLs;
|
||||
int lex = s2 + LLs*site;
|
||||
|
||||
if( s2==0 && l==0 ){
|
||||
SiteChiP=zero;
|
||||
SiteChiM=zero;
|
||||
}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vbroadcast(BcastP()(sp)(co), psi[lex]()(sp)(co), l);
|
||||
}}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vbroadcast(BcastM()(sp)(co), psi[lex]()(sp+2)(co), l);
|
||||
}}
|
||||
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
SiteChiP()(sp)(co) = real_madd(Matp[LLs*s+s1]()()(), BcastP()(sp)(co), SiteChiP()(sp)(co)); // 1100 us.
|
||||
SiteChiM()(sp)(co) = real_madd(Matm[LLs*s+s1]()()(), BcastM()(sp)(co), SiteChiM()(sp)(co)); // each found by commenting out
|
||||
}}
|
||||
}}
|
||||
|
||||
{
|
||||
int lex = s1 + LLs*site;
|
||||
for(int sp=0; sp<2; sp++){
|
||||
for(int co=0; co<Nc; co++){
|
||||
vstream(chi[lex]()(sp)(co), SiteChiP()(sp)(co));
|
||||
vstream(chi[lex]()(sp+2)(co), SiteChiM()(sp)(co));
|
||||
}}
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
{
|
||||
// pointers
|
||||
// MASK_REGS;
|
||||
#define Chi_00 %%zmm1
|
||||
#define Chi_01 %%zmm2
|
||||
#define Chi_02 %%zmm3
|
||||
#define Chi_10 %%zmm4
|
||||
#define Chi_11 %%zmm5
|
||||
#define Chi_12 %%zmm6
|
||||
#define Chi_20 %%zmm7
|
||||
#define Chi_21 %%zmm8
|
||||
#define Chi_22 %%zmm9
|
||||
#define Chi_30 %%zmm10
|
||||
#define Chi_31 %%zmm11
|
||||
#define Chi_32 %%zmm12
|
||||
|
||||
#define BCAST0 %%zmm13
|
||||
#define BCAST1 %%zmm14
|
||||
#define BCAST2 %%zmm15
|
||||
#define BCAST3 %%zmm16
|
||||
#define BCAST4 %%zmm17
|
||||
#define BCAST5 %%zmm18
|
||||
#define BCAST6 %%zmm19
|
||||
#define BCAST7 %%zmm20
|
||||
#define BCAST8 %%zmm21
|
||||
#define BCAST9 %%zmm22
|
||||
#define BCAST10 %%zmm23
|
||||
#define BCAST11 %%zmm24
|
||||
|
||||
int incr = LLs*LLs*sizeof(iSinglet<Simd>);
|
||||
|
||||
for(int s1=0; s1<LLs; s1++){
|
||||
|
||||
for(int s2=0; s2<LLs; s2++){
|
||||
|
||||
int lex = s2 + LLs*site;
|
||||
uint64_t a0 = (uint64_t) &Matp[LLs*s2+s1]; // should be cacheable
|
||||
uint64_t a1 = (uint64_t) &Matm[LLs*s2+s1];
|
||||
uint64_t a2 = (uint64_t) &psi[lex];
|
||||
|
||||
for(int l=0; l<Simd::Nsimd(); l++){ // simd lane
|
||||
|
||||
if((s2+l)==0) {
|
||||
asm(
|
||||
VPREFETCH1(0,%2) VPREFETCH1(0,%1)
|
||||
VPREFETCH1(12,%2) VPREFETCH1(13,%2)
|
||||
VPREFETCH1(14,%2) VPREFETCH1(15,%2)
|
||||
VBCASTCDUP(0,%2,BCAST0)
|
||||
VBCASTCDUP(1,%2,BCAST1)
|
||||
VBCASTCDUP(2,%2,BCAST2)
|
||||
VBCASTCDUP(3,%2,BCAST3)
|
||||
VBCASTCDUP(4,%2,BCAST4) VMULMEM(0,%0,BCAST0,Chi_00)
|
||||
VBCASTCDUP(5,%2,BCAST5) VMULMEM(0,%0,BCAST1,Chi_01)
|
||||
VBCASTCDUP(6,%2,BCAST6) VMULMEM(0,%0,BCAST2,Chi_02)
|
||||
VBCASTCDUP(7,%2,BCAST7) VMULMEM(0,%0,BCAST3,Chi_10)
|
||||
VBCASTCDUP(8,%2,BCAST8) VMULMEM(0,%0,BCAST4,Chi_11)
|
||||
VBCASTCDUP(9,%2,BCAST9) VMULMEM(0,%0,BCAST5,Chi_12)
|
||||
VBCASTCDUP(10,%2,BCAST10) VMULMEM(0,%1,BCAST6,Chi_20)
|
||||
VBCASTCDUP(11,%2,BCAST11) VMULMEM(0,%1,BCAST7,Chi_21)
|
||||
VMULMEM(0,%1,BCAST8,Chi_22)
|
||||
VMULMEM(0,%1,BCAST9,Chi_30)
|
||||
VMULMEM(0,%1,BCAST10,Chi_31)
|
||||
VMULMEM(0,%1,BCAST11,Chi_32)
|
||||
: : "r" (a0), "r" (a1), "r" (a2) );
|
||||
} else {
|
||||
asm(
|
||||
VBCASTCDUP(0,%2,BCAST0) VMADDMEM(0,%0,BCAST0,Chi_00)
|
||||
VBCASTCDUP(1,%2,BCAST1) VMADDMEM(0,%0,BCAST1,Chi_01)
|
||||
VBCASTCDUP(2,%2,BCAST2) VMADDMEM(0,%0,BCAST2,Chi_02)
|
||||
VBCASTCDUP(3,%2,BCAST3) VMADDMEM(0,%0,BCAST3,Chi_10)
|
||||
VBCASTCDUP(4,%2,BCAST4) VMADDMEM(0,%0,BCAST4,Chi_11)
|
||||
VBCASTCDUP(5,%2,BCAST5) VMADDMEM(0,%0,BCAST5,Chi_12)
|
||||
VBCASTCDUP(6,%2,BCAST6) VMADDMEM(0,%1,BCAST6,Chi_20)
|
||||
VBCASTCDUP(7,%2,BCAST7) VMADDMEM(0,%1,BCAST7,Chi_21)
|
||||
VBCASTCDUP(8,%2,BCAST8) VMADDMEM(0,%1,BCAST8,Chi_22)
|
||||
VBCASTCDUP(9,%2,BCAST9) VMADDMEM(0,%1,BCAST9,Chi_30)
|
||||
VBCASTCDUP(10,%2,BCAST10) VMADDMEM(0,%1,BCAST10,Chi_31)
|
||||
VBCASTCDUP(11,%2,BCAST11) VMADDMEM(0,%1,BCAST11,Chi_32)
|
||||
: : "r" (a0), "r" (a1), "r" (a2) );
|
||||
}
|
||||
|
||||
a0 = a0 + incr;
|
||||
a1 = a1 + incr;
|
||||
a2 = a2 + sizeof(Simd::scalar_type);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
int lexa = s1+LLs*site;
|
||||
asm (
|
||||
VSTORE(0,%0,Chi_00) VSTORE(1 ,%0,Chi_01) VSTORE(2 ,%0,Chi_02)
|
||||
VSTORE(3,%0,Chi_10) VSTORE(4 ,%0,Chi_11) VSTORE(5 ,%0,Chi_12)
|
||||
VSTORE(6,%0,Chi_20) VSTORE(7 ,%0,Chi_21) VSTORE(8 ,%0,Chi_22)
|
||||
VSTORE(9,%0,Chi_30) VSTORE(10,%0,Chi_31) VSTORE(11,%0,Chi_32)
|
||||
: : "r" ((uint64_t)&chi[lexa]) : "memory" );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#undef Chi_00
|
||||
#undef Chi_01
|
||||
#undef Chi_02
|
||||
#undef Chi_10
|
||||
#undef Chi_11
|
||||
#undef Chi_12
|
||||
#undef Chi_20
|
||||
#undef Chi_21
|
||||
#undef Chi_22
|
||||
#undef Chi_30
|
||||
#undef Chi_31
|
||||
#undef Chi_32
|
||||
|
||||
#undef BCAST0
|
||||
#undef BCAST1
|
||||
#undef BCAST2
|
||||
#undef BCAST3
|
||||
#undef BCAST4
|
||||
#undef BCAST5
|
||||
#undef BCAST6
|
||||
#undef BCAST7
|
||||
#undef BCAST8
|
||||
#undef BCAST9
|
||||
#undef BCAST10
|
||||
#undef BCAST11
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
// Z-mobius version
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInternalZAsm(const FermionField& psi, FermionField& chi,
|
||||
int LLs, int site, Vector<iSinglet<Simd> >& Matp, Vector<iSinglet<Simd> >& Matm)
|
||||
{
|
||||
std::cout << "Error: zMobius not implemented for EOFA" << std::endl;
|
||||
exit(-1);
|
||||
};
|
||||
|
||||
template<class Impl>
|
||||
void MobiusEOFAFermion<Impl>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv)
|
||||
{
|
||||
int Ls = this->Ls;
|
||||
int LLs = psi._grid->_rdimensions[0];
|
||||
int vol = psi._grid->oSites()/LLs;
|
||||
|
||||
chi.checkerboard = psi.checkerboard;
|
||||
|
||||
Vector<iSinglet<Simd>> Matp;
|
||||
Vector<iSinglet<Simd>> Matm;
|
||||
Vector<iSinglet<Simd>>* _Matp;
|
||||
Vector<iSinglet<Simd>>* _Matm;
|
||||
|
||||
// MooeeInternalCompute(dag,inv,Matp,Matm);
|
||||
if(inv && dag){
|
||||
_Matp = &this->MatpInvDag;
|
||||
_Matm = &this->MatmInvDag;
|
||||
}
|
||||
|
||||
if(inv && (!dag)){
|
||||
_Matp = &this->MatpInv;
|
||||
_Matm = &this->MatmInv;
|
||||
}
|
||||
|
||||
if(!inv){
|
||||
MooeeInternalCompute(dag, inv, Matp, Matm);
|
||||
_Matp = &Matp;
|
||||
_Matm = &Matm;
|
||||
}
|
||||
|
||||
assert(_Matp->size() == Ls*LLs);
|
||||
|
||||
this->MooeeInvCalls++;
|
||||
this->MooeeInvTime -= usecond();
|
||||
|
||||
if(switcheroo<Coeff_t>::iscomplex()){
|
||||
parallel_for(auto site=0; site<vol; site++){
|
||||
MooeeInternalZAsm(psi, chi, LLs, site, *_Matp, *_Matm);
|
||||
}
|
||||
} else {
|
||||
parallel_for(auto site=0; site<vol; site++){
|
||||
MooeeInternalAsm(psi, chi, LLs, site, *_Matp, *_Matm);
|
||||
}
|
||||
}
|
||||
|
||||
this->MooeeInvTime += usecond();
|
||||
}
|
||||
|
||||
#ifdef MOBIUS_EOFA_DPERP_VEC
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplD);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplF);
|
||||
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(DomainWallVec5dImplFH);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplDF);
|
||||
INSTANTIATE_DPERP_MOBIUS_EOFA(ZDomainWallVec5dImplFH);
|
||||
|
||||
template void MobiusEOFAFermion<DomainWallVec5dImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<DomainWallVec5dImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZDomainWallVec5dImplF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZDomainWallVec5dImplD>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
template void MobiusEOFAFermion<DomainWallVec5dImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<DomainWallVec5dImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZDomainWallVec5dImplFH>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
template void MobiusEOFAFermion<ZDomainWallVec5dImplDF>::MooeeInternal(const FermionField& psi, FermionField& chi, int dag, int inv);
|
||||
|
||||
#endif
|
||||
|
||||
}}
|
||||
@@ -238,7 +238,33 @@ template<typename HCS,typename HS,typename S> using WilsonCompressor = WilsonCom
|
||||
template<class vobj,class cobj>
|
||||
class WilsonStencil : public CartesianStencil<vobj,cobj> {
|
||||
public:
|
||||
|
||||
double timer0;
|
||||
double timer1;
|
||||
double timer2;
|
||||
double timer3;
|
||||
double timer4;
|
||||
double timer5;
|
||||
double timer6;
|
||||
uint64_t callsi;
|
||||
void ZeroCountersi(void)
|
||||
{
|
||||
timer0=0;
|
||||
timer1=0;
|
||||
timer2=0;
|
||||
timer3=0;
|
||||
timer4=0;
|
||||
timer5=0;
|
||||
timer6=0;
|
||||
callsi=0;
|
||||
}
|
||||
void Reporti(int calls)
|
||||
{
|
||||
if ( timer0 ) std::cout << GridLogMessage << " timer0 (HaloGatherOpt) " <<timer0/calls <<std::endl;
|
||||
if ( timer1 ) std::cout << GridLogMessage << " timer1 (Communicate) " <<timer1/calls <<std::endl;
|
||||
if ( timer2 ) std::cout << GridLogMessage << " timer2 (CommsMerge ) " <<timer2/calls <<std::endl;
|
||||
if ( timer3 ) std::cout << GridLogMessage << " timer3 (commsMergeShm) " <<timer3/calls <<std::endl;
|
||||
if ( timer4 ) std::cout << GridLogMessage << " timer4 " <<timer4 <<std::endl;
|
||||
}
|
||||
typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
|
||||
|
||||
std::vector<int> same_node;
|
||||
@@ -252,6 +278,7 @@ public:
|
||||
: CartesianStencil<vobj,cobj> (grid,npoints,checkerboard,directions,distances) ,
|
||||
same_node(npoints)
|
||||
{
|
||||
ZeroCountersi();
|
||||
surface_list.resize(0);
|
||||
};
|
||||
|
||||
@@ -261,7 +288,6 @@ public:
|
||||
// Here we know the distance is 1 for WilsonStencil
|
||||
for(int point=0;point<this->_npoints;point++){
|
||||
same_node[point] = this->SameNode(point);
|
||||
// std::cout << " dir " <<point<<" same_node " <<same_node[point]<<std::endl;
|
||||
}
|
||||
|
||||
for(int site = 0 ;site< vol4;site++){
|
||||
@@ -282,17 +308,28 @@ public:
|
||||
{
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
this->HaloExchangeOptGather(source,compress);
|
||||
this->CommunicateBegin(reqs);
|
||||
this->CommunicateComplete(reqs);
|
||||
double t1=usecond();
|
||||
// Asynchronous MPI calls multidirectional, Isend etc...
|
||||
// this->CommunicateBegin(reqs);
|
||||
// this->CommunicateComplete(reqs);
|
||||
// Non-overlapped directions within a thread. Asynchronous calls except MPI3, threaded up to comm threads ways.
|
||||
this->Communicate();
|
||||
double t2=usecond(); timer1 += t2-t1;
|
||||
this->CommsMerge(compress);
|
||||
double t3=usecond(); timer2 += t3-t2;
|
||||
this->CommsMergeSHM(compress);
|
||||
double t4=usecond(); timer3 += t4-t3;
|
||||
}
|
||||
|
||||
template <class compressor>
|
||||
void HaloExchangeOptGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
this->Prepare();
|
||||
double t0=usecond();
|
||||
this->HaloGatherOpt(source,compress);
|
||||
double t1=usecond();
|
||||
timer0 += t1-t0;
|
||||
callsi++;
|
||||
}
|
||||
|
||||
template <class compressor>
|
||||
@@ -304,7 +341,9 @@ public:
|
||||
typedef typename compressor::SiteHalfSpinor SiteHalfSpinor;
|
||||
typedef typename compressor::SiteHalfCommSpinor SiteHalfCommSpinor;
|
||||
|
||||
this->mpi3synctime_g-=usecond();
|
||||
this->_grid->StencilBarrier();
|
||||
this->mpi3synctime_g+=usecond();
|
||||
|
||||
assert(source._grid==this->_grid);
|
||||
this->halogtime-=usecond();
|
||||
@@ -323,7 +362,6 @@ public:
|
||||
int dag = compress.dag;
|
||||
int face_idx=0;
|
||||
if ( dag ) {
|
||||
// std::cout << " Optimised Dagger compress " <<std::endl;
|
||||
assert(same_node[Xp]==this->HaloGatherDir(source,XpCompress,Xp,face_idx));
|
||||
assert(same_node[Yp]==this->HaloGatherDir(source,YpCompress,Yp,face_idx));
|
||||
assert(same_node[Zp]==this->HaloGatherDir(source,ZpCompress,Zp,face_idx));
|
||||
|
||||
@@ -123,22 +123,24 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
||||
int vol4;
|
||||
vol4=FourDimGrid.oSites();
|
||||
Stencil.BuildSurfaceList(LLs,vol4);
|
||||
|
||||
vol4=FourDimRedBlackGrid.oSites();
|
||||
StencilEven.BuildSurfaceList(LLs,vol4);
|
||||
StencilOdd.BuildSurfaceList(LLs,vol4);
|
||||
|
||||
std::cout << GridLogMessage << " SurfaceLists "<< Stencil.surface_list.size()
|
||||
<<" " << StencilEven.surface_list.size()<<std::endl;
|
||||
// std::cout << GridLogMessage << " SurfaceLists "<< Stencil.surface_list.size()
|
||||
// <<" " << StencilEven.surface_list.size()<<std::endl;
|
||||
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::Report(void)
|
||||
{
|
||||
std::vector<int> latt = GridDefaultLatt();
|
||||
RealD volume = Ls; for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||
RealD NP = _FourDimGrid->_Nprocessors;
|
||||
RealD NN = _FourDimGrid->NodeCount();
|
||||
RealD NP = _FourDimGrid->_Nprocessors;
|
||||
RealD NN = _FourDimGrid->NodeCount();
|
||||
RealD volume = Ls;
|
||||
std::vector<int> latt = _FourDimGrid->GlobalDimensions();
|
||||
for(int mu=0;mu<Nd;mu++) volume=volume*latt[mu];
|
||||
|
||||
if ( DhopCalls > 0 ) {
|
||||
std::cout << GridLogMessage << "#### Dhop calls report " << std::endl;
|
||||
@@ -184,6 +186,11 @@ void WilsonFermion5D<Impl>::Report(void)
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilEven"<<std::endl; StencilEven.Report();
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd" <<std::endl; StencilOdd.Report();
|
||||
}
|
||||
if ( DhopCalls > 0){
|
||||
std::cout << GridLogMessage << "WilsonFermion5D Stencil Reporti()" <<std::endl; Stencil.Reporti(DhopCalls);
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilEven Reporti()"<<std::endl; StencilEven.Reporti(DhopCalls);
|
||||
std::cout << GridLogMessage << "WilsonFermion5D StencilOdd Reporti()" <<std::endl; StencilOdd.Reporti(DhopCalls);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
@@ -203,6 +210,9 @@ void WilsonFermion5D<Impl>::ZeroCounters(void) {
|
||||
Stencil.ZeroCounters();
|
||||
StencilEven.ZeroCounters();
|
||||
StencilOdd.ZeroCounters();
|
||||
Stencil.ZeroCountersi();
|
||||
StencilEven.ZeroCountersi();
|
||||
StencilOdd.ZeroCountersi();
|
||||
}
|
||||
|
||||
|
||||
@@ -379,7 +389,6 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
// assert((dag==DaggerNo) ||(dag==DaggerYes));
|
||||
typedef CartesianCommunicator::CommsRequest_t CommsRequest_t;
|
||||
|
||||
Compressor compressor(dag);
|
||||
|
||||
@@ -388,46 +397,70 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
|
||||
|
||||
DhopFaceTime-=usecond();
|
||||
st.HaloExchangeOptGather(in,compressor);
|
||||
DhopFaceTime+=usecond();
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
|
||||
// Rely on async comms; start comms before merge of local data
|
||||
DhopCommTime-=usecond();
|
||||
st.CommunicateBegin(reqs);
|
||||
|
||||
DhopFaceTime-=usecond();
|
||||
st.CommsMergeSHM(compressor);
|
||||
st.CommsMergeSHM(compressor);// Could do this inside parallel region overlapped with comms
|
||||
DhopFaceTime+=usecond();
|
||||
|
||||
// Perhaps use omp task and region
|
||||
#pragma omp parallel
|
||||
double ctime=0;
|
||||
double ptime=0;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Ugly explicit thread mapping introduced for OPA reasons.
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#pragma omp parallel reduction(max:ctime) reduction(max:ptime)
|
||||
{
|
||||
int tid = omp_get_thread_num();
|
||||
int nthreads = omp_get_num_threads();
|
||||
int me = omp_get_thread_num();
|
||||
int myoff, mywork;
|
||||
|
||||
GridThread::GetWork(len,me-1,mywork,myoff,nthreads-1);
|
||||
int sF = LLs * myoff;
|
||||
|
||||
if ( me == 0 ) {
|
||||
st.CommunicateComplete(reqs);
|
||||
DhopCommTime+=usecond();
|
||||
} else {
|
||||
// Interior links in stencil
|
||||
if ( me==1 ) DhopComputeTime-=usecond();
|
||||
if (dag == DaggerYes) Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
|
||||
else Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,myoff,LLs,mywork,in,out,1,0);
|
||||
if ( me==1 ) DhopComputeTime+=usecond();
|
||||
int ncomms = CartesianCommunicator::nCommThreads;
|
||||
if (ncomms == -1) ncomms = 1;
|
||||
assert(nthreads > ncomms);
|
||||
if (tid >= ncomms) {
|
||||
double start = usecond();
|
||||
nthreads -= ncomms;
|
||||
int ttid = tid - ncomms;
|
||||
int n = U._grid->oSites();
|
||||
int chunk = n / nthreads;
|
||||
int rem = n % nthreads;
|
||||
int myblock, myn;
|
||||
if (ttid < rem) {
|
||||
myblock = ttid * chunk + ttid;
|
||||
myn = chunk+1;
|
||||
} else {
|
||||
myblock = ttid*chunk + rem;
|
||||
myn = chunk;
|
||||
}
|
||||
|
||||
// do the compute
|
||||
if (dag == DaggerYes) {
|
||||
for (int ss = myblock; ss < myblock+myn; ++ss) {
|
||||
int sU = ss;
|
||||
int sF = LLs * sU;
|
||||
Kernels::DhopSiteDag(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out,1,0);
|
||||
}
|
||||
} else {
|
||||
for (int ss = myblock; ss < myblock+myn; ++ss) {
|
||||
int sU = ss;
|
||||
int sF = LLs * sU;
|
||||
Kernels::DhopSite(st,lo,U,st.CommBuf(),sF,sU,LLs,1,in,out,1,0);
|
||||
}
|
||||
}
|
||||
ptime = usecond() - start;
|
||||
}
|
||||
{
|
||||
double start = usecond();
|
||||
st.CommunicateThreaded();
|
||||
ctime = usecond() - start;
|
||||
}
|
||||
}
|
||||
DhopCommTime += ctime;
|
||||
DhopComputeTime+=ptime;
|
||||
|
||||
// First to enter, last to leave timing
|
||||
st.CollateThreads();
|
||||
|
||||
DhopFaceTime-=usecond();
|
||||
st.CommsMerge(compressor);
|
||||
DhopFaceTime+=usecond();
|
||||
|
||||
// Load imbalance alert. Should use dynamic schedule OMP for loop
|
||||
// Perhaps create a list of only those sites with face work, and
|
||||
// load balance process the list.
|
||||
DhopComputeTime2-=usecond();
|
||||
if (dag == DaggerYes) {
|
||||
int sz=st.surface_list.size();
|
||||
@@ -448,11 +481,9 @@ void WilsonFermion5D<Impl>::DhopInternalOverlappedComms(StencilImpl & st, Lebesg
|
||||
#else
|
||||
assert(0);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
template<class Impl>
|
||||
void WilsonFermion5D<Impl>::DhopInternalSerialComms(StencilImpl & st, LebesgueOrder &lo,
|
||||
DoubledGaugeField & U,
|
||||
|
||||
@@ -30,60 +30,181 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
#define REGISTER
|
||||
|
||||
#define LOAD_CHIMU \
|
||||
{const SiteSpinor & ref (in._odata[offset]); \
|
||||
Chimu_00=ref()(0)(0);\
|
||||
Chimu_01=ref()(0)(1);\
|
||||
Chimu_02=ref()(0)(2);\
|
||||
Chimu_10=ref()(1)(0);\
|
||||
Chimu_11=ref()(1)(1);\
|
||||
Chimu_12=ref()(1)(2);\
|
||||
Chimu_20=ref()(2)(0);\
|
||||
Chimu_21=ref()(2)(1);\
|
||||
Chimu_22=ref()(2)(2);\
|
||||
Chimu_30=ref()(3)(0);\
|
||||
Chimu_31=ref()(3)(1);\
|
||||
Chimu_32=ref()(3)(2);}
|
||||
#define LOAD_CHIMU_BODY(F) \
|
||||
Chimu_00=ref(F)(0)(0); \
|
||||
Chimu_01=ref(F)(0)(1); \
|
||||
Chimu_02=ref(F)(0)(2); \
|
||||
Chimu_10=ref(F)(1)(0); \
|
||||
Chimu_11=ref(F)(1)(1); \
|
||||
Chimu_12=ref(F)(1)(2); \
|
||||
Chimu_20=ref(F)(2)(0); \
|
||||
Chimu_21=ref(F)(2)(1); \
|
||||
Chimu_22=ref(F)(2)(2); \
|
||||
Chimu_30=ref(F)(3)(0); \
|
||||
Chimu_31=ref(F)(3)(1); \
|
||||
Chimu_32=ref(F)(3)(2)
|
||||
|
||||
#define LOAD_CHI\
|
||||
{const SiteHalfSpinor &ref(buf[offset]); \
|
||||
Chi_00 = ref()(0)(0);\
|
||||
Chi_01 = ref()(0)(1);\
|
||||
Chi_02 = ref()(0)(2);\
|
||||
Chi_10 = ref()(1)(0);\
|
||||
Chi_11 = ref()(1)(1);\
|
||||
Chi_12 = ref()(1)(2);}
|
||||
#define LOAD_CHIMU(DIR,F,PERM) \
|
||||
{ const SiteSpinor & ref (in._odata[offset]); LOAD_CHIMU_BODY(F); }
|
||||
|
||||
#define LOAD_CHI_BODY(F) \
|
||||
Chi_00 = ref(F)(0)(0);\
|
||||
Chi_01 = ref(F)(0)(1);\
|
||||
Chi_02 = ref(F)(0)(2);\
|
||||
Chi_10 = ref(F)(1)(0);\
|
||||
Chi_11 = ref(F)(1)(1);\
|
||||
Chi_12 = ref(F)(1)(2)
|
||||
|
||||
#define LOAD_CHI(DIR,F,PERM) \
|
||||
{const SiteHalfSpinor &ref(buf[offset]); LOAD_CHI_BODY(F); }
|
||||
|
||||
|
||||
//G-parity implementations using in-place intrinsic ops
|
||||
|
||||
//1l 1h -> 1h 1l
|
||||
//0l 0h , 1h 1l -> 0l 1h 0h,1l
|
||||
//0h,1l -> 1l,0h
|
||||
//if( (distance == 1 && !perm_will_occur) || (distance == -1 && perm_will_occur) )
|
||||
//Pulled fermion through forwards face, GPBC on upper component
|
||||
//Need 0= 0l 1h 1= 1l 0h
|
||||
//else if( (distance == -1 && !perm) || (distance == 1 && perm) )
|
||||
//Pulled fermion through backwards face, GPBC on lower component
|
||||
//Need 0= 1l 0h 1= 0l 1h
|
||||
|
||||
//1l 1h -> 1h 1l
|
||||
//0l 0h , 1h 1l -> 0l 1h 0h,1l
|
||||
#define DO_TWIST_0L_1H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
|
||||
permute##PERM(tmp1, ref(1)(S)(C)); \
|
||||
exchange##PERM(tmp2,tmp3, ref(0)(S)(C), tmp1); \
|
||||
INTO = tmp2;
|
||||
|
||||
//0l 0h -> 0h 0l
|
||||
//1l 1h, 0h 0l -> 1l 0h, 1h 0l
|
||||
#define DO_TWIST_1L_0H(INTO,S,C,F, PERM, tmp1, tmp2, tmp3) \
|
||||
permute##PERM(tmp1, ref(0)(S)(C)); \
|
||||
exchange##PERM(tmp2,tmp3, ref(1)(S)(C), tmp1); \
|
||||
INTO = tmp2;
|
||||
|
||||
|
||||
|
||||
|
||||
#define LOAD_CHI_SETUP(DIR,F) \
|
||||
g = F; \
|
||||
direction = st._directions[DIR]; \
|
||||
distance = st._distances[DIR]; \
|
||||
sl = st._grid->_simd_layout[direction]; \
|
||||
inplace_twist = 0; \
|
||||
if(SE->_around_the_world && this->Params.twists[DIR % 4]){ \
|
||||
if(sl == 1){ \
|
||||
g = (F+1) % 2; \
|
||||
}else{ \
|
||||
inplace_twist = 1; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
|
||||
{ const SiteSpinor &ref(in._odata[offset]); \
|
||||
LOAD_CHI_SETUP(DIR,F); \
|
||||
if(!inplace_twist){ \
|
||||
LOAD_CHIMU_BODY(g); \
|
||||
}else{ \
|
||||
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
|
||||
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
|
||||
DO_TWIST_0L_1H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
|
||||
}else{ \
|
||||
DO_TWIST_1L_0H(Chimu_00,0,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_01,0,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chimu_02,0,2,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_10,1,0,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chimu_11,1,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_12,1,2,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chimu_20,2,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_21,2,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chimu_22,2,2,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_30,3,0,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chimu_31,3,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chimu_32,3,2,F,PERM, U_11,U_20,U_21); \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
#define LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM) \
|
||||
{ const SiteHalfSpinor &ref(buf[offset]); \
|
||||
LOAD_CHI_SETUP(DIR,F); \
|
||||
if(!inplace_twist){ \
|
||||
LOAD_CHI_BODY(g); \
|
||||
}else{ \
|
||||
if( ( F==0 && ((distance == 1 && !perm) || (distance == -1 && perm)) ) || \
|
||||
( F==1 && ((distance == -1 && !perm) || (distance == 1 && perm)) ) ){ \
|
||||
DO_TWIST_0L_1H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_0L_1H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
|
||||
DO_TWIST_0L_1H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
|
||||
DO_TWIST_0L_1H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_0L_1H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
|
||||
}else{ \
|
||||
DO_TWIST_1L_0H(Chi_00,0,0,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chi_01,0,1,F,PERM, U_11,U_20,U_21); \
|
||||
DO_TWIST_1L_0H(Chi_02,0,2,F,PERM, UChi_00,UChi_01,UChi_02); \
|
||||
DO_TWIST_1L_0H(Chi_10,1,0,F,PERM, UChi_10,UChi_11,UChi_12); \
|
||||
DO_TWIST_1L_0H(Chi_11,1,1,F,PERM, U_00,U_01,U_10); \
|
||||
DO_TWIST_1L_0H(Chi_12,1,2,F,PERM, U_11,U_20,U_21); \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
#define LOAD_CHI_GPARITY(DIR,F,PERM) LOAD_CHI_GPARITY_INPLACE_TWIST(DIR,F,PERM)
|
||||
#define LOAD_CHIMU_GPARITY(DIR,F,PERM) LOAD_CHIMU_GPARITY_INPLACE_TWIST(DIR,F,PERM)
|
||||
|
||||
// To splat or not to splat depends on the implementation
|
||||
#define MULT_2SPIN(A)\
|
||||
{auto & ref(U._odata[sU](A)); \
|
||||
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
||||
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
||||
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
||||
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
||||
UChi_00 = U_00*Chi_00;\
|
||||
UChi_10 = U_00*Chi_10;\
|
||||
UChi_01 = U_10*Chi_00;\
|
||||
UChi_11 = U_10*Chi_10;\
|
||||
UChi_02 = U_20*Chi_00;\
|
||||
UChi_12 = U_20*Chi_10;\
|
||||
UChi_00+= U_01*Chi_01;\
|
||||
UChi_10+= U_01*Chi_11;\
|
||||
UChi_01+= U_11*Chi_01;\
|
||||
UChi_11+= U_11*Chi_11;\
|
||||
UChi_02+= U_21*Chi_01;\
|
||||
UChi_12+= U_21*Chi_11;\
|
||||
Impl::loadLinkElement(U_00,ref()(0,2)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,2)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,2)); \
|
||||
UChi_00+= U_00*Chi_02;\
|
||||
UChi_10+= U_00*Chi_12;\
|
||||
UChi_01+= U_10*Chi_02;\
|
||||
UChi_11+= U_10*Chi_12;\
|
||||
UChi_02+= U_20*Chi_02;\
|
||||
UChi_12+= U_20*Chi_12;}
|
||||
#define MULT_2SPIN_BODY \
|
||||
Impl::loadLinkElement(U_00,ref()(0,0)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,0)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,0)); \
|
||||
Impl::loadLinkElement(U_01,ref()(0,1)); \
|
||||
Impl::loadLinkElement(U_11,ref()(1,1)); \
|
||||
Impl::loadLinkElement(U_21,ref()(2,1)); \
|
||||
UChi_00 = U_00*Chi_00; \
|
||||
UChi_10 = U_00*Chi_10; \
|
||||
UChi_01 = U_10*Chi_00; \
|
||||
UChi_11 = U_10*Chi_10; \
|
||||
UChi_02 = U_20*Chi_00; \
|
||||
UChi_12 = U_20*Chi_10; \
|
||||
UChi_00+= U_01*Chi_01; \
|
||||
UChi_10+= U_01*Chi_11; \
|
||||
UChi_01+= U_11*Chi_01; \
|
||||
UChi_11+= U_11*Chi_11; \
|
||||
UChi_02+= U_21*Chi_01; \
|
||||
UChi_12+= U_21*Chi_11; \
|
||||
Impl::loadLinkElement(U_00,ref()(0,2)); \
|
||||
Impl::loadLinkElement(U_10,ref()(1,2)); \
|
||||
Impl::loadLinkElement(U_20,ref()(2,2)); \
|
||||
UChi_00+= U_00*Chi_02; \
|
||||
UChi_10+= U_00*Chi_12; \
|
||||
UChi_01+= U_10*Chi_02; \
|
||||
UChi_11+= U_10*Chi_12; \
|
||||
UChi_02+= U_20*Chi_02; \
|
||||
UChi_12+= U_20*Chi_12
|
||||
|
||||
|
||||
#define MULT_2SPIN(A,F) \
|
||||
{auto & ref(U._odata[sU](A)); MULT_2SPIN_BODY; }
|
||||
|
||||
#define MULT_2SPIN_GPARITY(A,F) \
|
||||
{auto & ref(U._odata[sU](F)(A)); MULT_2SPIN_BODY; }
|
||||
|
||||
|
||||
#define PERMUTE_DIR(dir) \
|
||||
@@ -307,84 +428,87 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
result_31-= UChi_11; \
|
||||
result_32-= UChi_12;
|
||||
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU; \
|
||||
LOAD_CHIMU_IMPL(DIR,F,PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
LOAD_CHI_IMPL(DIR,F,PERM); \
|
||||
} \
|
||||
MULT_2SPIN(DIR); \
|
||||
MULT_2SPIN_IMPL(DIR,F); \
|
||||
RECON;
|
||||
|
||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
|
||||
|
||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU; \
|
||||
LOAD_CHIMU_IMPL(DIR,F,PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else if ( st.same_node[DIR] ) { \
|
||||
LOAD_CHI; \
|
||||
LOAD_CHI_IMPL(DIR,F,PERM); \
|
||||
} \
|
||||
if (local || st.same_node[DIR] ) { \
|
||||
MULT_2SPIN(DIR); \
|
||||
MULT_2SPIN_IMPL(DIR,F); \
|
||||
RECON; \
|
||||
}
|
||||
|
||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
|
||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
|
||||
LOAD_CHI; \
|
||||
MULT_2SPIN(DIR); \
|
||||
LOAD_CHI_IMPL(DIR,F,PERM); \
|
||||
MULT_2SPIN_IMPL(DIR,F); \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
}
|
||||
|
||||
#define HAND_RESULT(ss) \
|
||||
#define HAND_RESULT(ss,F) \
|
||||
{ \
|
||||
SiteSpinor & ref (out._odata[ss]); \
|
||||
vstream(ref()(0)(0),result_00); \
|
||||
vstream(ref()(0)(1),result_01); \
|
||||
vstream(ref()(0)(2),result_02); \
|
||||
vstream(ref()(1)(0),result_10); \
|
||||
vstream(ref()(1)(1),result_11); \
|
||||
vstream(ref()(1)(2),result_12); \
|
||||
vstream(ref()(2)(0),result_20); \
|
||||
vstream(ref()(2)(1),result_21); \
|
||||
vstream(ref()(2)(2),result_22); \
|
||||
vstream(ref()(3)(0),result_30); \
|
||||
vstream(ref()(3)(1),result_31); \
|
||||
vstream(ref()(3)(2),result_32); \
|
||||
vstream(ref(F)(0)(0),result_00); \
|
||||
vstream(ref(F)(0)(1),result_01); \
|
||||
vstream(ref(F)(0)(2),result_02); \
|
||||
vstream(ref(F)(1)(0),result_10); \
|
||||
vstream(ref(F)(1)(1),result_11); \
|
||||
vstream(ref(F)(1)(2),result_12); \
|
||||
vstream(ref(F)(2)(0),result_20); \
|
||||
vstream(ref(F)(2)(1),result_21); \
|
||||
vstream(ref(F)(2)(2),result_22); \
|
||||
vstream(ref(F)(3)(0),result_30); \
|
||||
vstream(ref(F)(3)(1),result_31); \
|
||||
vstream(ref(F)(3)(2),result_32); \
|
||||
}
|
||||
|
||||
#define HAND_RESULT_EXT(ss) \
|
||||
#define HAND_RESULT_EXT(ss,F) \
|
||||
if (nmu){ \
|
||||
SiteSpinor & ref (out._odata[ss]); \
|
||||
ref()(0)(0)+=result_00; \
|
||||
ref()(0)(1)+=result_01; \
|
||||
ref()(0)(2)+=result_02; \
|
||||
ref()(1)(0)+=result_10; \
|
||||
ref()(1)(1)+=result_11; \
|
||||
ref()(1)(2)+=result_12; \
|
||||
ref()(2)(0)+=result_20; \
|
||||
ref()(2)(1)+=result_21; \
|
||||
ref()(2)(2)+=result_22; \
|
||||
ref()(3)(0)+=result_30; \
|
||||
ref()(3)(1)+=result_31; \
|
||||
ref()(3)(2)+=result_32; \
|
||||
ref(F)(0)(0)+=result_00; \
|
||||
ref(F)(0)(1)+=result_01; \
|
||||
ref(F)(0)(2)+=result_02; \
|
||||
ref(F)(1)(0)+=result_10; \
|
||||
ref(F)(1)(1)+=result_11; \
|
||||
ref(F)(1)(2)+=result_12; \
|
||||
ref(F)(2)(0)+=result_20; \
|
||||
ref(F)(2)(1)+=result_21; \
|
||||
ref(F)(2)(2)+=result_22; \
|
||||
ref(F)(3)(0)+=result_30; \
|
||||
ref(F)(3)(1)+=result_31; \
|
||||
ref(F)(3)(2)+=result_32; \
|
||||
}
|
||||
|
||||
|
||||
@@ -463,15 +587,18 @@ WilsonKernels<Impl>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGauge
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
#define HAND_DOP_SITE(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT(ss,F)
|
||||
|
||||
HAND_DOP_SITE(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
@@ -485,16 +612,19 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,Doub
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
|
||||
#define HAND_DOP_SITE_DAG(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT(ss,F)
|
||||
|
||||
HAND_DOP_SITE_DAG(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
@@ -509,16 +639,20 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGa
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
|
||||
#define HAND_DOP_SITE_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
ZERO_RESULT; \
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT(ss,F)
|
||||
|
||||
HAND_DOP_SITE_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
@@ -532,16 +666,20 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,D
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT(ss);
|
||||
|
||||
#define HAND_DOP_SITE_DAG_INT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
ZERO_RESULT; \
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_INT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT(ss,F)
|
||||
|
||||
HAND_DOP_SITE_DAG_INT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
template<class Impl> void
|
||||
@@ -557,16 +695,20 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGa
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM);
|
||||
HAND_RESULT_EXT(ss);
|
||||
|
||||
#define HAND_DOP_SITE_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
ZERO_RESULT; \
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xp,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Yp,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zp,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tp,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xm,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Ym,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zm,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tm,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT_EXT(ss,F)
|
||||
|
||||
HAND_DOP_SITE_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
@@ -581,16 +723,20 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,D
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM);
|
||||
HAND_RESULT_EXT(ss);
|
||||
|
||||
#define HAND_DOP_SITE_DAG_EXT(F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL) \
|
||||
ZERO_RESULT; \
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(YP_PROJ,2,Yp,YP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(ZP_PROJ,1,Zp,ZP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(TP_PROJ,0,Tp,TP_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(XM_PROJ,3,Xm,XM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(YM_PROJ,2,Ym,YM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(ZM_PROJ,1,Zm,ZM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_STENCIL_LEG_EXT(TM_PROJ,0,Tm,TM_RECON_ACCUM,F,LOAD_CHI_IMPL,LOAD_CHIMU_IMPL,MULT_2SPIN_IMPL); \
|
||||
HAND_RESULT_EXT(ss,F)
|
||||
|
||||
HAND_DOP_SITE_DAG_EXT(, LOAD_CHI,LOAD_CHIMU,MULT_2SPIN);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////
|
||||
@@ -646,11 +792,124 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,D
|
||||
const FermionField &in, \
|
||||
FermionField &out){ assert(0); } \
|
||||
|
||||
HAND_SPECIALISE_EMPTY(GparityWilsonImplF);
|
||||
HAND_SPECIALISE_EMPTY(GparityWilsonImplD);
|
||||
HAND_SPECIALISE_EMPTY(GparityWilsonImplFH);
|
||||
HAND_SPECIALISE_EMPTY(GparityWilsonImplDF);
|
||||
|
||||
|
||||
#define HAND_SPECIALISE_GPARITY(IMPL) \
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSite(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
StencilEntry *SE; \
|
||||
HAND_DOP_SITE(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
HAND_DOP_SITE(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> \
|
||||
void WilsonKernels<IMPL>::HandDhopSiteDag(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
StencilEntry *SE; \
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
HAND_DOP_SITE_DAG(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
HAND_DOP_SITE_DAG(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
StencilEntry *SE; \
|
||||
HAND_DOP_SITE_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
HAND_DOP_SITE_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> \
|
||||
void WilsonKernels<IMPL>::HandDhopSiteDagInt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
StencilEntry *SE; \
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
HAND_DOP_SITE_DAG_INT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
HAND_DOP_SITE_DAG_INT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
\
|
||||
template<> void \
|
||||
WilsonKernels<IMPL>::HandDhopSiteExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
StencilEntry *SE; \
|
||||
int nmu=0; \
|
||||
HAND_DOP_SITE_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
nmu = 0; \
|
||||
HAND_DOP_SITE_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
} \
|
||||
template<> \
|
||||
void WilsonKernels<IMPL>::HandDhopSiteDagExt(StencilImpl &st,LebesgueOrder &lo,DoubledGaugeField &U,SiteHalfSpinor *buf, \
|
||||
int ss,int sU,const FermionField &in, FermionField &out) \
|
||||
{ \
|
||||
typedef IMPL Impl; \
|
||||
typedef typename Simd::scalar_type S; \
|
||||
typedef typename Simd::vector_type V; \
|
||||
\
|
||||
HAND_DECLARATIONS(ignore); \
|
||||
\
|
||||
StencilEntry *SE; \
|
||||
int offset,local,perm, ptype, g, direction, distance, sl, inplace_twist; \
|
||||
int nmu=0; \
|
||||
HAND_DOP_SITE_DAG_EXT(0, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
nmu = 0; \
|
||||
HAND_DOP_SITE_DAG_EXT(1, LOAD_CHI_GPARITY,LOAD_CHIMU_GPARITY,MULT_2SPIN_GPARITY); \
|
||||
}
|
||||
|
||||
|
||||
HAND_SPECIALISE_GPARITY(GparityWilsonImplF);
|
||||
HAND_SPECIALISE_GPARITY(GparityWilsonImplD);
|
||||
HAND_SPECIALISE_GPARITY(GparityWilsonImplFH);
|
||||
HAND_SPECIALISE_GPARITY(GparityWilsonImplDF);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
////////////// Wilson ; uses this implementation /////////////////////
|
||||
|
||||
#define INSTANTIATE_THEM(A) \
|
||||
|
||||
286
lib/qcd/action/gauge/Photon.h
Normal file
286
lib/qcd/action/gauge/Photon.h
Normal file
@@ -0,0 +1,286 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/gauge/Photon.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#ifndef QCD_PHOTON_ACTION_H
|
||||
#define QCD_PHOTON_ACTION_H
|
||||
|
||||
namespace Grid{
|
||||
namespace QCD{
|
||||
template <class S>
|
||||
class QedGimpl
|
||||
{
|
||||
public:
|
||||
typedef S Simd;
|
||||
|
||||
template <typename vtype>
|
||||
using iImplGaugeLink = iScalar<iScalar<iScalar<vtype>>>;
|
||||
template <typename vtype>
|
||||
using iImplGaugeField = iVector<iScalar<iScalar<vtype>>, Nd>;
|
||||
|
||||
typedef iImplGaugeLink<Simd> SiteLink;
|
||||
typedef iImplGaugeField<Simd> SiteField;
|
||||
typedef SiteField SiteComplex;
|
||||
|
||||
typedef Lattice<SiteLink> LinkField;
|
||||
typedef Lattice<SiteField> Field;
|
||||
typedef Field ComplexField;
|
||||
};
|
||||
|
||||
typedef QedGimpl<vComplex> QedGimplR;
|
||||
|
||||
template<class Gimpl>
|
||||
class Photon
|
||||
{
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
GRID_SERIALIZABLE_ENUM(Gauge, undef, feynman, 1, coulomb, 2, landau, 3);
|
||||
GRID_SERIALIZABLE_ENUM(ZmScheme, undef, qedL, 1, qedTL, 2);
|
||||
public:
|
||||
Photon(Gauge gauge, ZmScheme zmScheme);
|
||||
virtual ~Photon(void) = default;
|
||||
void FreePropagator(const GaugeField &in, GaugeField &out);
|
||||
void MomentumSpacePropagator(const GaugeField &in, GaugeField &out);
|
||||
void StochasticWeight(GaugeLinkField &weight);
|
||||
void StochasticField(GaugeField &out, GridParallelRNG &rng);
|
||||
void StochasticField(GaugeField &out, GridParallelRNG &rng,
|
||||
const GaugeLinkField &weight);
|
||||
private:
|
||||
void invKHatSquared(GaugeLinkField &out);
|
||||
void zmSub(GaugeLinkField &out);
|
||||
private:
|
||||
Gauge gauge_;
|
||||
ZmScheme zmScheme_;
|
||||
};
|
||||
|
||||
typedef Photon<QedGimplR> PhotonR;
|
||||
|
||||
template<class Gimpl>
|
||||
Photon<Gimpl>::Photon(Gauge gauge, ZmScheme zmScheme)
|
||||
: gauge_(gauge), zmScheme_(zmScheme)
|
||||
{}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::FreePropagator (const GaugeField &in,GaugeField &out)
|
||||
{
|
||||
FFT theFFT(in._grid);
|
||||
|
||||
GaugeField in_k(in._grid);
|
||||
GaugeField prop_k(in._grid);
|
||||
|
||||
theFFT.FFT_all_dim(in_k,in,FFT::forward);
|
||||
MomentumSpacePropagator(prop_k,in_k);
|
||||
theFFT.FFT_all_dim(out,prop_k,FFT::backward);
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::invKHatSquared(GaugeLinkField &out)
|
||||
{
|
||||
GridBase *grid = out._grid;
|
||||
GaugeLinkField kmu(grid), one(grid);
|
||||
const unsigned int nd = grid->_ndimension;
|
||||
std::vector<int> &l = grid->_fdimensions;
|
||||
std::vector<int> zm(nd,0);
|
||||
TComplex Tone = Complex(1.0,0.0);
|
||||
TComplex Tzero= Complex(0.0,0.0);
|
||||
|
||||
one = Complex(1.0,0.0);
|
||||
out = zero;
|
||||
for(int mu = 0; mu < nd; mu++)
|
||||
{
|
||||
Real twoPiL = M_PI*2./l[mu];
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
kmu = 2.*sin(.5*twoPiL*kmu);
|
||||
out = out + kmu*kmu;
|
||||
}
|
||||
pokeSite(Tone, out, zm);
|
||||
out = one/out;
|
||||
pokeSite(Tzero, out, zm);
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::zmSub(GaugeLinkField &out)
|
||||
{
|
||||
GridBase *grid = out._grid;
|
||||
const unsigned int nd = grid->_ndimension;
|
||||
|
||||
switch (zmScheme_)
|
||||
{
|
||||
case ZmScheme::qedTL:
|
||||
{
|
||||
std::vector<int> zm(nd,0);
|
||||
TComplex Tzero = Complex(0.0,0.0);
|
||||
|
||||
pokeSite(Tzero, out, zm);
|
||||
|
||||
break;
|
||||
}
|
||||
case ZmScheme::qedL:
|
||||
{
|
||||
LatticeInteger spNrm(grid), coor(grid);
|
||||
GaugeLinkField z(grid);
|
||||
|
||||
spNrm = zero;
|
||||
for(int d = 0; d < grid->_ndimension - 1; d++)
|
||||
{
|
||||
LatticeCoordinate(coor,d);
|
||||
spNrm = spNrm + coor*coor;
|
||||
}
|
||||
out = where(spNrm == Integer(0), 0.*out, out);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::MomentumSpacePropagator(const GaugeField &in,
|
||||
GaugeField &out)
|
||||
{
|
||||
GridBase *grid = out._grid;
|
||||
LatticeComplex k2Inv(grid);
|
||||
|
||||
invKHatSquared(k2Inv);
|
||||
zmSub(k2Inv);
|
||||
|
||||
out = in*k2Inv;
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::StochasticWeight(GaugeLinkField &weight)
|
||||
{
|
||||
auto *grid = dynamic_cast<GridCartesian *>(weight._grid);
|
||||
const unsigned int nd = grid->_ndimension;
|
||||
std::vector<int> latt_size = grid->_fdimensions;
|
||||
|
||||
Integer vol = 1;
|
||||
for(int d = 0; d < nd; d++)
|
||||
{
|
||||
vol = vol * latt_size[d];
|
||||
}
|
||||
invKHatSquared(weight);
|
||||
weight = sqrt(vol*real(weight));
|
||||
zmSub(weight);
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::StochasticField(GaugeField &out, GridParallelRNG &rng)
|
||||
{
|
||||
auto *grid = dynamic_cast<GridCartesian *>(out._grid);
|
||||
GaugeLinkField weight(grid);
|
||||
|
||||
StochasticWeight(weight);
|
||||
StochasticField(out, rng, weight);
|
||||
}
|
||||
|
||||
template<class Gimpl>
|
||||
void Photon<Gimpl>::StochasticField(GaugeField &out, GridParallelRNG &rng,
|
||||
const GaugeLinkField &weight)
|
||||
{
|
||||
auto *grid = dynamic_cast<GridCartesian *>(out._grid);
|
||||
const unsigned int nd = grid->_ndimension;
|
||||
GaugeLinkField r(grid);
|
||||
GaugeField aTilde(grid);
|
||||
FFT fft(grid);
|
||||
|
||||
for(int mu = 0; mu < nd; mu++)
|
||||
{
|
||||
gaussian(rng, r);
|
||||
r = weight*r;
|
||||
pokeLorentz(aTilde, r, mu);
|
||||
}
|
||||
fft.FFT_all_dim(out, aTilde, FFT::backward);
|
||||
|
||||
out = real(out);
|
||||
}
|
||||
// template<class Gimpl>
|
||||
// void Photon<Gimpl>::FeynmanGaugeMomentumSpacePropagator_L(GaugeField &out,
|
||||
// const GaugeField &in)
|
||||
// {
|
||||
//
|
||||
// FeynmanGaugeMomentumSpacePropagator_TL(out,in);
|
||||
//
|
||||
// GridBase *grid = out._grid;
|
||||
// LatticeInteger coor(grid);
|
||||
// GaugeField zz(grid); zz=zero;
|
||||
//
|
||||
// // xyzt
|
||||
// for(int d = 0; d < grid->_ndimension-1;d++){
|
||||
// LatticeCoordinate(coor,d);
|
||||
// out = where(coor==Integer(0),zz,out);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// template<class Gimpl>
|
||||
// void Photon<Gimpl>::FeynmanGaugeMomentumSpacePropagator_TL(GaugeField &out,
|
||||
// const GaugeField &in)
|
||||
// {
|
||||
//
|
||||
// // what type LatticeComplex
|
||||
// GridBase *grid = out._grid;
|
||||
// int nd = grid->_ndimension;
|
||||
//
|
||||
// typedef typename GaugeField::vector_type vector_type;
|
||||
// typedef typename GaugeField::scalar_type ScalComplex;
|
||||
// typedef Lattice<iSinglet<vector_type> > LatComplex;
|
||||
//
|
||||
// std::vector<int> latt_size = grid->_fdimensions;
|
||||
//
|
||||
// LatComplex denom(grid); denom= zero;
|
||||
// LatComplex one(grid); one = ScalComplex(1.0,0.0);
|
||||
// LatComplex kmu(grid);
|
||||
//
|
||||
// ScalComplex ci(0.0,1.0);
|
||||
// // momphase = n * 2pi / L
|
||||
// for(int mu=0;mu<Nd;mu++) {
|
||||
//
|
||||
// LatticeCoordinate(kmu,mu);
|
||||
//
|
||||
// RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
//
|
||||
// kmu = TwoPiL * kmu ;
|
||||
//
|
||||
// denom = denom + 4.0*sin(kmu*0.5)*sin(kmu*0.5); // Wilson term
|
||||
// }
|
||||
// std::vector<int> zero_mode(nd,0);
|
||||
// TComplexD Tone = ComplexD(1.0,0.0);
|
||||
// TComplexD Tzero= ComplexD(0.0,0.0);
|
||||
//
|
||||
// pokeSite(Tone,denom,zero_mode);
|
||||
//
|
||||
// denom= one/denom;
|
||||
//
|
||||
// pokeSite(Tzero,denom,zero_mode);
|
||||
//
|
||||
// out = zero;
|
||||
// out = in*denom;
|
||||
// };
|
||||
|
||||
}}
|
||||
#endif
|
||||
264
lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h
Normal file
264
lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h
Normal file
@@ -0,0 +1,264 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/pseudofermion/ExactOneFlavourRatio.h
|
||||
|
||||
Copyright (C) 2017
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: David Murphy <dmurphy@phys.columbia.edu>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
// Implementation of exact one flavour algorithm (EOFA) //
|
||||
// using fermion classes defined in: //
|
||||
// Grid/qcd/action/fermion/DomainWallEOFAFermion.h (Shamir) //
|
||||
// Grid/qcd/action/fermion/MobiusEOFAFermion.h (Mobius) //
|
||||
// arXiv: 1403.1683, 1706.05843 //
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H
|
||||
#define QCD_PSEUDOFERMION_EXACT_ONE_FLAVOUR_RATIO_H
|
||||
|
||||
namespace Grid{
|
||||
namespace QCD{
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
// Exact one flavour implementation of DWF determinant ratio //
|
||||
///////////////////////////////////////////////////////////////
|
||||
|
||||
template<class Impl>
|
||||
class ExactOneFlavourRatioPseudoFermionAction : public Action<typename Impl::GaugeField>
|
||||
{
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
typedef OneFlavourRationalParams Params;
|
||||
Params param;
|
||||
MultiShiftFunction PowerNegHalf;
|
||||
|
||||
private:
|
||||
bool use_heatbath_forecasting;
|
||||
AbstractEOFAFermion<Impl>& Lop; // the basic LH operator
|
||||
AbstractEOFAFermion<Impl>& Rop; // the basic RH operator
|
||||
SchurRedBlackDiagMooeeSolve<FermionField> Solver;
|
||||
FermionField Phi; // the pseudofermion field for this trajectory
|
||||
|
||||
public:
|
||||
ExactOneFlavourRatioPseudoFermionAction(AbstractEOFAFermion<Impl>& _Lop, AbstractEOFAFermion<Impl>& _Rop,
|
||||
OperatorFunction<FermionField>& S, Params& p, bool use_fc=false) : Lop(_Lop), Rop(_Rop), Solver(S),
|
||||
Phi(_Lop.FermionGrid()), param(p), use_heatbath_forecasting(use_fc)
|
||||
{
|
||||
AlgRemez remez(param.lo, param.hi, param.precision);
|
||||
|
||||
// MdagM^(+- 1/2)
|
||||
std::cout << GridLogMessage << "Generating degree " << param.degree << " for x^(-1/2)" << std::endl;
|
||||
remez.generateApprox(param.degree, 1, 2);
|
||||
PowerNegHalf.Init(remez, param.tolerance, true);
|
||||
};
|
||||
|
||||
virtual std::string action_name() { return "ExactOneFlavourRatioPseudoFermionAction"; }
|
||||
|
||||
virtual std::string LogParameters() {
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] Low :" << param.lo << std::endl;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] High :" << param.hi << std::endl;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] Max iterations :" << param.MaxIter << std::endl;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] Tolerance :" << param.tolerance << std::endl;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] Degree :" << param.degree << std::endl;
|
||||
sstream << GridLogMessage << "[" << action_name() << "] Precision :" << param.precision << std::endl;
|
||||
return sstream.str();
|
||||
}
|
||||
|
||||
// Spin projection
|
||||
void spProj(const FermionField& in, FermionField& out, int sign, int Ls)
|
||||
{
|
||||
if(sign == 1){ for(int s=0; s<Ls; ++s){ axpby_ssp_pplus(out, 0.0, in, 1.0, in, s, s); } }
|
||||
else{ for(int s=0; s<Ls; ++s){ axpby_ssp_pminus(out, 0.0, in, 1.0, in, s, s); } }
|
||||
}
|
||||
|
||||
// EOFA heatbath: see Eqn. (29) of arXiv:1706.05843
|
||||
// We generate a Gaussian noise vector \eta, and then compute
|
||||
// \Phi = M_{\rm EOFA}^{-1/2} * \eta
|
||||
// using a rational approximation to the inverse square root
|
||||
virtual void refresh(const GaugeField& U, GridParallelRNG& pRNG)
|
||||
{
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField eta (Lop.FermionGrid());
|
||||
FermionField CG_src (Lop.FermionGrid());
|
||||
FermionField CG_soln (Lop.FermionGrid());
|
||||
FermionField Forecast_src(Lop.FermionGrid());
|
||||
std::vector<FermionField> tmp(2, Lop.FermionGrid());
|
||||
|
||||
// Use chronological inverter to forecast solutions across poles
|
||||
std::vector<FermionField> prev_solns;
|
||||
if(use_heatbath_forecasting){ prev_solns.reserve(param.degree); }
|
||||
ChronoForecast<AbstractEOFAFermion<Impl>, FermionField> Forecast;
|
||||
|
||||
// Seed with Gaussian noise vector (var = 0.5)
|
||||
RealD scale = std::sqrt(0.5);
|
||||
gaussian(pRNG,eta);
|
||||
eta = eta * scale;
|
||||
printf("Heatbath source vector: <\\eta|\\eta> = %1.15e\n", norm2(eta));
|
||||
|
||||
// \Phi = ( \alpha_{0} + \sum_{k=1}^{N_{p}} \alpha_{l} * \gamma_{l} ) * \eta
|
||||
RealD N(PowerNegHalf.norm);
|
||||
for(int k=0; k<param.degree; ++k){ N += PowerNegHalf.residues[k] / ( 1.0 + PowerNegHalf.poles[k] ); }
|
||||
Phi = eta * N;
|
||||
|
||||
// LH terms:
|
||||
// \Phi = \Phi + k \sum_{k=1}^{N_{p}} P_{-} \Omega_{-}^{\dagger} ( H(mf)
|
||||
// - \gamma_{l} \Delta_{-}(mf,mb) P_{-} )^{-1} \Omega_{-} P_{-} \eta
|
||||
RealD gamma_l(0.0);
|
||||
spProj(eta, tmp[0], -1, Lop.Ls);
|
||||
Lop.Omega(tmp[0], tmp[1], -1, 0);
|
||||
G5R5(CG_src, tmp[1]);
|
||||
tmp[1] = zero;
|
||||
for(int k=0; k<param.degree; ++k){
|
||||
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
|
||||
Lop.RefreshShiftCoefficients(-gamma_l);
|
||||
if(use_heatbath_forecasting){ // Forecast CG guess using solutions from previous poles
|
||||
Lop.Mdag(CG_src, Forecast_src);
|
||||
CG_soln = Forecast(Lop, Forecast_src, prev_solns);
|
||||
Solver(Lop, CG_src, CG_soln);
|
||||
prev_solns.push_back(CG_soln);
|
||||
} else {
|
||||
CG_soln = zero; // Just use zero as the initial guess
|
||||
Solver(Lop, CG_src, CG_soln);
|
||||
}
|
||||
Lop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = tmp[1] + ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Lop.k ) * tmp[0];
|
||||
}
|
||||
Lop.Omega(tmp[1], tmp[0], -1, 1);
|
||||
spProj(tmp[0], tmp[1], -1, Lop.Ls);
|
||||
Phi = Phi + tmp[1];
|
||||
|
||||
// RH terms:
|
||||
// \Phi = \Phi - k \sum_{k=1}^{N_{p}} P_{+} \Omega_{+}^{\dagger} ( H(mb)
|
||||
// + \gamma_{l} \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \eta
|
||||
spProj(eta, tmp[0], 1, Rop.Ls);
|
||||
Rop.Omega(tmp[0], tmp[1], 1, 0);
|
||||
G5R5(CG_src, tmp[1]);
|
||||
tmp[1] = zero;
|
||||
if(use_heatbath_forecasting){ prev_solns.clear(); } // empirically, LH solns don't help for RH solves
|
||||
for(int k=0; k<param.degree; ++k){
|
||||
gamma_l = 1.0 / ( 1.0 + PowerNegHalf.poles[k] );
|
||||
Rop.RefreshShiftCoefficients(-gamma_l*PowerNegHalf.poles[k]);
|
||||
if(use_heatbath_forecasting){
|
||||
Rop.Mdag(CG_src, Forecast_src);
|
||||
CG_soln = Forecast(Rop, Forecast_src, prev_solns);
|
||||
Solver(Rop, CG_src, CG_soln);
|
||||
prev_solns.push_back(CG_soln);
|
||||
} else {
|
||||
CG_soln = zero;
|
||||
Solver(Rop, CG_src, CG_soln);
|
||||
}
|
||||
Rop.Dtilde(CG_soln, tmp[0]); // We actually solved Cayley preconditioned system: transform back
|
||||
tmp[1] = tmp[1] - ( PowerNegHalf.residues[k]*gamma_l*gamma_l*Rop.k ) * tmp[0];
|
||||
}
|
||||
Rop.Omega(tmp[1], tmp[0], 1, 1);
|
||||
spProj(tmp[0], tmp[1], 1, Rop.Ls);
|
||||
Phi = Phi + tmp[1];
|
||||
|
||||
// Reset shift coefficients for energy and force evals
|
||||
Lop.RefreshShiftCoefficients(0.0);
|
||||
Rop.RefreshShiftCoefficients(-1.0);
|
||||
};
|
||||
|
||||
// EOFA action: see Eqn. (10) of arXiv:1706.05843
|
||||
virtual RealD S(const GaugeField& U)
|
||||
{
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField spProj_Phi(Lop.FermionGrid());
|
||||
std::vector<FermionField> tmp(2, Lop.FermionGrid());
|
||||
|
||||
// S = <\Phi|\Phi>
|
||||
RealD action(norm2(Phi));
|
||||
|
||||
// LH term: S = S - k <\Phi| P_{-} \Omega_{-}^{\dagger} H(mf)^{-1} \Omega_{-} P_{-} |\Phi>
|
||||
spProj(Phi, spProj_Phi, -1, Lop.Ls);
|
||||
Lop.Omega(spProj_Phi, tmp[0], -1, 0);
|
||||
G5R5(tmp[1], tmp[0]);
|
||||
tmp[0] = zero;
|
||||
Solver(Lop, tmp[1], tmp[0]);
|
||||
Lop.Dtilde(tmp[0], tmp[1]); // We actually solved Cayley preconditioned system: transform back
|
||||
Lop.Omega(tmp[1], tmp[0], -1, 1);
|
||||
action -= Lop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
|
||||
// RH term: S = S + k <\Phi| P_{+} \Omega_{+}^{\dagger} ( H(mb)
|
||||
// - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{-} P_{-} |\Phi>
|
||||
spProj(Phi, spProj_Phi, 1, Rop.Ls);
|
||||
Rop.Omega(spProj_Phi, tmp[0], 1, 0);
|
||||
G5R5(tmp[1], tmp[0]);
|
||||
tmp[0] = zero;
|
||||
Solver(Rop, tmp[1], tmp[0]);
|
||||
Rop.Dtilde(tmp[0], tmp[1]);
|
||||
Rop.Omega(tmp[1], tmp[0], 1, 1);
|
||||
action += Rop.k * innerProduct(spProj_Phi, tmp[0]).real();
|
||||
|
||||
return action;
|
||||
};
|
||||
|
||||
// EOFA pseudofermion force: see Eqns. (34)-(36) of arXiv:1706.05843
|
||||
virtual void deriv(const GaugeField& U, GaugeField& dSdU)
|
||||
{
|
||||
Lop.ImportGauge(U);
|
||||
Rop.ImportGauge(U);
|
||||
|
||||
FermionField spProj_Phi (Lop.FermionGrid());
|
||||
FermionField Omega_spProj_Phi(Lop.FermionGrid());
|
||||
FermionField CG_src (Lop.FermionGrid());
|
||||
FermionField Chi (Lop.FermionGrid());
|
||||
FermionField g5_R5_Chi (Lop.FermionGrid());
|
||||
|
||||
GaugeField force(Lop.GaugeGrid());
|
||||
|
||||
// LH: dSdU = k \chi_{L}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{L}
|
||||
// \chi_{L} = H(mf)^{-1} \Omega_{-} P_{-} \Phi
|
||||
spProj(Phi, spProj_Phi, -1, Lop.Ls);
|
||||
Lop.Omega(spProj_Phi, Omega_spProj_Phi, -1, 0);
|
||||
G5R5(CG_src, Omega_spProj_Phi);
|
||||
spProj_Phi = zero;
|
||||
Solver(Lop, CG_src, spProj_Phi);
|
||||
Lop.Dtilde(spProj_Phi, Chi);
|
||||
G5R5(g5_R5_Chi, Chi);
|
||||
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
|
||||
dSdU = Lop.k * force;
|
||||
|
||||
// RH: dSdU = dSdU - k \chi_{R}^{\dagger} \gamma_{5} R_{5} ( \partial_{x,\mu} D_{w} ) \chi_{}
|
||||
// \chi_{R} = ( H(mb) - \Delta_{+}(mf,mb) P_{+} )^{-1} \Omega_{+} P_{+} \Phi
|
||||
spProj(Phi, spProj_Phi, 1, Rop.Ls);
|
||||
Rop.Omega(spProj_Phi, Omega_spProj_Phi, 1, 0);
|
||||
G5R5(CG_src, Omega_spProj_Phi);
|
||||
spProj_Phi = zero;
|
||||
Solver(Rop, CG_src, spProj_Phi);
|
||||
Rop.Dtilde(spProj_Phi, Chi);
|
||||
G5R5(g5_R5_Chi, Chi);
|
||||
Lop.MDeriv(force, g5_R5_Chi, Chi, DaggerNo);
|
||||
dSdU = dSdU - Rop.k * force;
|
||||
};
|
||||
};
|
||||
}}
|
||||
|
||||
#endif
|
||||
@@ -38,5 +38,6 @@ directory
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourRationalRatio.h>
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRational.h>
|
||||
#include <Grid/qcd/action/pseudofermion/OneFlavourEvenOddRationalRatio.h>
|
||||
#include <Grid/qcd/action/pseudofermion/ExactOneFlavourRatio.h>
|
||||
|
||||
#endif
|
||||
|
||||
@@ -31,6 +31,7 @@ directory
|
||||
|
||||
#include <Grid/qcd/action/scalar/ScalarImpl.h>
|
||||
#include <Grid/qcd/action/scalar/ScalarAction.h>
|
||||
#include <Grid/qcd/action/scalar/ScalarInteractionAction.h>
|
||||
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
@@ -39,6 +40,10 @@ namespace QCD {
|
||||
typedef ScalarAction<ScalarImplF> ScalarActionF;
|
||||
typedef ScalarAction<ScalarImplD> ScalarActionD;
|
||||
|
||||
template <int Colours, int Dimensions> using ScalarAdjActionR = ScalarInteractionAction<ScalarNxNAdjImplR<Colours>, Dimensions>;
|
||||
template <int Colours, int Dimensions> using ScalarAdjActionF = ScalarInteractionAction<ScalarNxNAdjImplF<Colours>, Dimensions>;
|
||||
template <int Colours, int Dimensions> using ScalarAdjActionD = ScalarInteractionAction<ScalarNxNAdjImplD<Colours>, Dimensions>;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,10 +6,10 @@
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@@ -35,50 +35,49 @@ directory
|
||||
|
||||
namespace Grid {
|
||||
// FIXME drop the QCD namespace everywhere here
|
||||
|
||||
template <class Impl>
|
||||
class ScalarAction : public QCD::Action<typename Impl::Field> {
|
||||
public:
|
||||
|
||||
template <class Impl>
|
||||
class ScalarAction : public QCD::Action<typename Impl::Field> {
|
||||
public:
|
||||
INHERIT_FIELD_TYPES(Impl);
|
||||
|
||||
private:
|
||||
|
||||
private:
|
||||
RealD mass_square;
|
||||
RealD lambda;
|
||||
|
||||
public:
|
||||
ScalarAction(RealD ms, RealD l) : mass_square(ms), lambda(l){};
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
public:
|
||||
ScalarAction(RealD ms, RealD l) : mass_square(ms), lambda(l) {}
|
||||
|
||||
virtual std::string LogParameters() {
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl;
|
||||
sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl;
|
||||
return sstream.str();
|
||||
|
||||
}
|
||||
|
||||
virtual std::string action_name(){return "ScalarAction";}
|
||||
|
||||
virtual void refresh(const Field &U,
|
||||
GridParallelRNG &pRNG){}; // noop as no pseudoferms
|
||||
|
||||
virtual std::string action_name() {return "ScalarAction";}
|
||||
|
||||
virtual void refresh(const Field &U, GridParallelRNG &pRNG) {} // noop as no pseudoferms
|
||||
|
||||
virtual RealD S(const Field &p) {
|
||||
return (mass_square * 0.5 + QCD::Nd) * ScalarObs<Impl>::sumphisquared(p) +
|
||||
(lambda / 24.) * ScalarObs<Impl>::sumphifourth(p) +
|
||||
ScalarObs<Impl>::sumphider(p);
|
||||
(lambda / 24.) * ScalarObs<Impl>::sumphifourth(p) +
|
||||
ScalarObs<Impl>::sumphider(p);
|
||||
};
|
||||
|
||||
|
||||
virtual void deriv(const Field &p,
|
||||
Field &force) {
|
||||
Field &force) {
|
||||
Field tmp(p._grid);
|
||||
Field p2(p._grid);
|
||||
ScalarObs<Impl>::phisquared(p2, p);
|
||||
tmp = -(Cshift(p, 0, -1) + Cshift(p, 0, 1));
|
||||
for (int mu = 1; mu < QCD::Nd; mu++) tmp -= Cshift(p, mu, -1) + Cshift(p, mu, 1);
|
||||
|
||||
force=+(mass_square + 2. * QCD::Nd) * p + (lambda / 6.) * p2 * p + tmp;
|
||||
};
|
||||
};
|
||||
|
||||
} // Grid
|
||||
|
||||
force =+(mass_square + 2. * QCD::Nd) * p + (lambda / 6.) * p2 * p + tmp;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
} // namespace Grid
|
||||
|
||||
#endif // SCALAR_ACTION_H
|
||||
|
||||
@@ -5,99 +5,158 @@
|
||||
namespace Grid {
|
||||
//namespace QCD {
|
||||
|
||||
template <class S>
|
||||
class ScalarImplTypes {
|
||||
public:
|
||||
template <class S>
|
||||
class ScalarImplTypes {
|
||||
public:
|
||||
typedef S Simd;
|
||||
|
||||
|
||||
template <typename vtype>
|
||||
using iImplField = iScalar<iScalar<iScalar<vtype> > >;
|
||||
|
||||
|
||||
typedef iImplField<Simd> SiteField;
|
||||
|
||||
template <typename vtype> using iImplScalar= iScalar<iScalar<iScalar<vtype > > >;
|
||||
typedef iImplScalar<Simd> ComplexField;
|
||||
typedef SiteField SitePropagator;
|
||||
typedef SiteField SiteComplex;
|
||||
|
||||
typedef Lattice<SiteField> Field;
|
||||
typedef Field ComplexField;
|
||||
typedef Field FermionField;
|
||||
typedef Field PropagatorField;
|
||||
|
||||
static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){
|
||||
gaussian(pRNG, P);
|
||||
}
|
||||
|
||||
|
||||
static inline Field projectForce(Field& P){return P;}
|
||||
|
||||
static inline void update_field(Field& P, Field& U, double ep){
|
||||
|
||||
static inline void update_field(Field& P, Field& U, double ep) {
|
||||
U += P*ep;
|
||||
}
|
||||
|
||||
static inline RealD FieldSquareNorm(Field& U){
|
||||
|
||||
static inline RealD FieldSquareNorm(Field& U) {
|
||||
return (- sum(trace(U*U))/2.0);
|
||||
}
|
||||
|
||||
|
||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
gaussian(pRNG, U);
|
||||
}
|
||||
|
||||
|
||||
static inline void TepidConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
gaussian(pRNG, U);
|
||||
}
|
||||
|
||||
|
||||
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
U = 1.0;
|
||||
}
|
||||
|
||||
static void MomentumSpacePropagator(Field &out, RealD m)
|
||||
{
|
||||
GridBase *grid = out._grid;
|
||||
Field kmu(grid), one(grid);
|
||||
const unsigned int nd = grid->_ndimension;
|
||||
std::vector<int> &l = grid->_fdimensions;
|
||||
|
||||
one = Complex(1.0,0.0);
|
||||
out = m*m;
|
||||
for(int mu = 0; mu < nd; mu++)
|
||||
{
|
||||
Real twoPiL = M_PI*2./l[mu];
|
||||
|
||||
LatticeCoordinate(kmu,mu);
|
||||
kmu = 2.*sin(.5*twoPiL*kmu);
|
||||
out = out + kmu*kmu;
|
||||
}
|
||||
out = one/out;
|
||||
}
|
||||
|
||||
static void FreePropagator(const Field &in, Field &out,
|
||||
const Field &momKernel)
|
||||
{
|
||||
FFT fft((GridCartesian *)in._grid);
|
||||
Field inFT(in._grid);
|
||||
|
||||
fft.FFT_all_dim(inFT, in, FFT::forward);
|
||||
inFT = inFT*momKernel;
|
||||
fft.FFT_all_dim(out, inFT, FFT::backward);
|
||||
}
|
||||
|
||||
static void FreePropagator(const Field &in, Field &out, RealD m)
|
||||
{
|
||||
Field momKernel(in._grid);
|
||||
|
||||
MomentumSpacePropagator(momKernel, m);
|
||||
FreePropagator(in, out, momKernel);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template <class S, unsigned int N>
|
||||
class ScalarMatrixImplTypes {
|
||||
class ScalarAdjMatrixImplTypes {
|
||||
public:
|
||||
typedef S Simd;
|
||||
typedef QCD::SU<N> Group;
|
||||
|
||||
template <typename vtype> using iImplField = iScalar<iScalar<iMatrix<vtype, N> > >;
|
||||
template <typename vtype>
|
||||
using iImplField = iScalar<iScalar<iMatrix<vtype, N>>>;
|
||||
template <typename vtype>
|
||||
using iImplComplex = iScalar<iScalar<iScalar<vtype>>>;
|
||||
|
||||
typedef iImplField<Simd> SiteField;
|
||||
typedef Lattice<SiteField> Field;
|
||||
typedef iImplField<Simd> SiteField;
|
||||
typedef SiteField SitePropagator;
|
||||
typedef iImplComplex<Simd> SiteComplex;
|
||||
|
||||
typedef Lattice<SiteField> Field;
|
||||
typedef Lattice<SiteComplex> ComplexField;
|
||||
typedef Field FermionField;
|
||||
typedef Field PropagatorField;
|
||||
|
||||
template <typename vtype> using iImplScalar= iScalar<iScalar<iScalar<vtype > > >;
|
||||
typedef iImplScalar<Simd> ComplexField;
|
||||
|
||||
|
||||
static inline void generate_momenta(Field& P, GridParallelRNG& pRNG){
|
||||
gaussian(pRNG, P);
|
||||
static inline void generate_momenta(Field& P, GridParallelRNG& pRNG) {
|
||||
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, P);
|
||||
}
|
||||
|
||||
static inline Field projectForce(Field& P){return P;}
|
||||
|
||||
static inline void update_field(Field& P, Field& U, double ep){
|
||||
|
||||
static inline Field projectForce(Field& P) {return P;}
|
||||
|
||||
static inline void update_field(Field& P, Field& U, double ep) {
|
||||
U += P*ep;
|
||||
}
|
||||
|
||||
static inline RealD FieldSquareNorm(Field& U){
|
||||
return (TensorRemove(- sum(trace(U*U))*0.5).real());
|
||||
|
||||
static inline RealD FieldSquareNorm(Field& U) {
|
||||
return (TensorRemove(sum(trace(U*U))).real());
|
||||
}
|
||||
|
||||
|
||||
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
gaussian(pRNG, U);
|
||||
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U);
|
||||
}
|
||||
|
||||
|
||||
static inline void TepidConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
gaussian(pRNG, U);
|
||||
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, U, 0.01);
|
||||
}
|
||||
|
||||
|
||||
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
|
||||
U = 1.0;
|
||||
U = zero;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
typedef ScalarImplTypes<vReal> ScalarImplR;
|
||||
typedef ScalarImplTypes<vRealF> ScalarImplF;
|
||||
typedef ScalarImplTypes<vRealD> ScalarImplD;
|
||||
typedef ScalarImplTypes<vComplex> ScalarImplCR;
|
||||
typedef ScalarImplTypes<vComplexF> ScalarImplCF;
|
||||
typedef ScalarImplTypes<vComplexD> ScalarImplCD;
|
||||
|
||||
// Hardcoding here the size of the matrices
|
||||
typedef ScalarAdjMatrixImplTypes<vComplex, QCD::Nc> ScalarAdjImplR;
|
||||
typedef ScalarAdjMatrixImplTypes<vComplexF, QCD::Nc> ScalarAdjImplF;
|
||||
typedef ScalarAdjMatrixImplTypes<vComplexD, QCD::Nc> ScalarAdjImplD;
|
||||
|
||||
template <int Colours > using ScalarNxNAdjImplR = ScalarAdjMatrixImplTypes<vComplex, Colours >;
|
||||
template <int Colours > using ScalarNxNAdjImplF = ScalarAdjMatrixImplTypes<vComplexF, Colours >;
|
||||
template <int Colours > using ScalarNxNAdjImplD = ScalarAdjMatrixImplTypes<vComplexD, Colours >;
|
||||
|
||||
//}
|
||||
}
|
||||
//}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -6,10 +6,7 @@
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido,cossu@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@@ -30,55 +27,122 @@ directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#ifndef SCALAR_ACTION_H
|
||||
#define SCALAR_ACTION_H
|
||||
#ifndef SCALAR_INT_ACTION_H
|
||||
#define SCALAR_INT_ACTION_H
|
||||
|
||||
|
||||
// Note: this action can completely absorb the ScalarAction for real float fields
|
||||
// use the scalarObjs to generalise the structure
|
||||
|
||||
namespace Grid {
|
||||
// FIXME drop the QCD namespace everywhere here
|
||||
|
||||
template <class Impl>
|
||||
|
||||
template <class Impl, int Ndim >
|
||||
class ScalarInteractionAction : public QCD::Action<typename Impl::Field> {
|
||||
public:
|
||||
INHERIT_FIELD_TYPES(Impl);
|
||||
|
||||
private:
|
||||
RealD mass_square;
|
||||
RealD lambda;
|
||||
|
||||
public:
|
||||
ScalarAction(RealD ms, RealD l) : mass_square(ms), lambda(l){};
|
||||
|
||||
virtual std::string LogParameters(){
|
||||
|
||||
typedef typename Field::vector_object vobj;
|
||||
typedef CartesianStencil<vobj,vobj> Stencil;
|
||||
|
||||
SimpleCompressor<vobj> compressor;
|
||||
int npoint = 2*Ndim;
|
||||
std::vector<int> directions;// = {0,1,2,3,0,1,2,3}; // forcing 4 dimensions
|
||||
std::vector<int> displacements;// = {1,1,1,1, -1,-1,-1,-1};
|
||||
|
||||
|
||||
public:
|
||||
|
||||
ScalarInteractionAction(RealD ms, RealD l) : mass_square(ms), lambda(l), displacements(2*Ndim,0), directions(2*Ndim,0){
|
||||
for (int mu = 0 ; mu < Ndim; mu++){
|
||||
directions[mu] = mu; directions[mu+Ndim] = mu;
|
||||
displacements[mu] = 1; displacements[mu+Ndim] = -1;
|
||||
}
|
||||
}
|
||||
|
||||
virtual std::string LogParameters() {
|
||||
std::stringstream sstream;
|
||||
sstream << GridLogMessage << "[ScalarAction] lambda : " << lambda << std::endl;
|
||||
sstream << GridLogMessage << "[ScalarAction] mass_square : " << mass_square << std::endl;
|
||||
return sstream.str();
|
||||
|
||||
}
|
||||
|
||||
virtual std::string action_name(){return "ScalarAction";}
|
||||
|
||||
virtual void refresh(const Field &U,
|
||||
GridParallelRNG &pRNG){}; // noop as no pseudoferms
|
||||
|
||||
|
||||
virtual std::string action_name() {return "ScalarAction";}
|
||||
|
||||
virtual void refresh(const Field &U, GridParallelRNG &pRNG) {}
|
||||
|
||||
virtual RealD S(const Field &p) {
|
||||
return (mass_square * 0.5 + QCD::Nd) * ScalarObs<Impl>::sumphisquared(p) +
|
||||
(lambda / 24.) * ScalarObs<Impl>::sumphifourth(p) +
|
||||
ScalarObs<Impl>::sumphider(p);
|
||||
assert(p._grid->Nd() == Ndim);
|
||||
static Stencil phiStencil(p._grid, npoint, 0, directions, displacements);
|
||||
phiStencil.HaloExchange(p, compressor);
|
||||
Field action(p._grid), pshift(p._grid), phisquared(p._grid);
|
||||
phisquared = p*p;
|
||||
action = (2.0*Ndim + mass_square)*phisquared - lambda/24.*phisquared*phisquared;
|
||||
for (int mu = 0; mu < Ndim; mu++) {
|
||||
// pshift = Cshift(p, mu, +1); // not efficient, implement with stencils
|
||||
parallel_for (int i = 0; i < p._grid->oSites(); i++) {
|
||||
int permute_type;
|
||||
StencilEntry *SE;
|
||||
vobj temp2;
|
||||
const vobj *temp, *t_p;
|
||||
|
||||
SE = phiStencil.GetEntry(permute_type, mu, i);
|
||||
t_p = &p._odata[i];
|
||||
if ( SE->_is_local ) {
|
||||
temp = &p._odata[SE->_offset];
|
||||
if ( SE->_permute ) {
|
||||
permute(temp2, *temp, permute_type);
|
||||
action._odata[i] -= temp2*(*t_p) + (*t_p)*temp2;
|
||||
} else {
|
||||
action._odata[i] -= (*temp)*(*t_p) + (*t_p)*(*temp);
|
||||
}
|
||||
} else {
|
||||
action._odata[i] -= phiStencil.CommBuf()[SE->_offset]*(*t_p) + (*t_p)*phiStencil.CommBuf()[SE->_offset];
|
||||
}
|
||||
}
|
||||
// action -= pshift*p + p*pshift;
|
||||
}
|
||||
// NB the trace in the algebra is normalised to 1/2
|
||||
// minus sign coming from the antihermitian fields
|
||||
return -(TensorRemove(sum(trace(action)))).real();
|
||||
};
|
||||
|
||||
virtual void deriv(const Field &p,
|
||||
Field &force) {
|
||||
Field tmp(p._grid);
|
||||
Field p2(p._grid);
|
||||
ScalarObs<Impl>::phisquared(p2, p);
|
||||
tmp = -(Cshift(p, 0, -1) + Cshift(p, 0, 1));
|
||||
for (int mu = 1; mu < QCD::Nd; mu++) tmp -= Cshift(p, mu, -1) + Cshift(p, mu, 1);
|
||||
|
||||
virtual void deriv(const Field &p, Field &force) {
|
||||
assert(p._grid->Nd() == Ndim);
|
||||
force = (2.0*Ndim + mass_square)*p - lambda/12.*p*p*p;
|
||||
// move this outside
|
||||
static Stencil phiStencil(p._grid, npoint, 0, directions, displacements);
|
||||
phiStencil.HaloExchange(p, compressor);
|
||||
|
||||
force=+(mass_square + 2. * QCD::Nd) * p + (lambda / 6.) * p2 * p + tmp;
|
||||
};
|
||||
//for (int mu = 0; mu < QCD::Nd; mu++) force -= Cshift(p, mu, -1) + Cshift(p, mu, 1);
|
||||
for (int point = 0; point < npoint; point++) {
|
||||
parallel_for (int i = 0; i < p._grid->oSites(); i++) {
|
||||
const vobj *temp;
|
||||
vobj temp2;
|
||||
int permute_type;
|
||||
StencilEntry *SE;
|
||||
SE = phiStencil.GetEntry(permute_type, point, i);
|
||||
|
||||
if ( SE->_is_local ) {
|
||||
temp = &p._odata[SE->_offset];
|
||||
if ( SE->_permute ) {
|
||||
permute(temp2, *temp, permute_type);
|
||||
force._odata[i] -= temp2;
|
||||
} else {
|
||||
force._odata[i] -= *temp;
|
||||
}
|
||||
} else {
|
||||
force._odata[i] -= phiStencil.CommBuf()[SE->_offset];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // Grid
|
||||
} // namespace Grid
|
||||
|
||||
#endif // SCALAR_ACTION_H
|
||||
#endif // SCALAR_INT_ACTION_H
|
||||
|
||||
@@ -207,6 +207,12 @@ using GenericHMCRunnerTemplate = HMCWrapperTemplate<Implementation, Integrator,
|
||||
typedef HMCWrapperTemplate<ScalarImplR, MinimumNorm2, ScalarFields>
|
||||
ScalarGenericHMCRunner;
|
||||
|
||||
typedef HMCWrapperTemplate<ScalarAdjImplR, MinimumNorm2, ScalarMatrixFields>
|
||||
ScalarAdjGenericHMCRunner;
|
||||
|
||||
template <int Colours>
|
||||
using ScalarNxNAdjGenericHMCRunner = HMCWrapperTemplate < ScalarNxNAdjImplR<Colours>, MinimumNorm2, ScalarNxNMatrixFields<Colours> >;
|
||||
|
||||
} // namespace QCD
|
||||
} // namespace Grid
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ struct HMCparameters: Serializable {
|
||||
|
||||
template < class ReaderClass >
|
||||
void initialize(Reader<ReaderClass> &TheReader){
|
||||
std::cout << "Reading HMC\n";
|
||||
std::cout << GridLogMessage << "Reading HMC\n";
|
||||
read(TheReader, "HMC", *this);
|
||||
}
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ class HMCResourceManager {
|
||||
// Grids
|
||||
//////////////////////////////////////////////////////////////
|
||||
|
||||
void AddGrid(std::string s, GridModule& M) {
|
||||
void AddGrid(const std::string s, GridModule& M) {
|
||||
// Check for name clashes
|
||||
auto search = Grids.find(s);
|
||||
if (search != Grids.end()) {
|
||||
@@ -174,14 +174,24 @@ class HMCResourceManager {
|
||||
exit(1);
|
||||
}
|
||||
Grids[s] = std::move(M);
|
||||
std::cout << GridLogMessage << "::::::::::::::::::::::::::::::::::::::::" <<std::endl;
|
||||
std::cout << GridLogMessage << "HMCResourceManager:" << std::endl;
|
||||
std::cout << GridLogMessage << "Created grid set with name '" << s << "' and decomposition for the full cartesian " << std::endl;
|
||||
Grids[s].show_full_decomposition();
|
||||
std::cout << GridLogMessage << "::::::::::::::::::::::::::::::::::::::::" <<std::endl;
|
||||
}
|
||||
|
||||
// Add a named grid set, 4d shortcut
|
||||
void AddFourDimGrid(std::string s) {
|
||||
void AddFourDimGrid(const std::string s) {
|
||||
GridFourDimModule<vComplex> Mod;
|
||||
AddGrid(s, Mod);
|
||||
}
|
||||
|
||||
// Add a named grid set, 4d shortcut + tweak simd lanes
|
||||
void AddFourDimGrid(const std::string s, const std::vector<int> simd_decomposition) {
|
||||
GridFourDimModule<vComplex> Mod(simd_decomposition);
|
||||
AddGrid(s, Mod);
|
||||
}
|
||||
|
||||
|
||||
GridCartesian* GetCartesian(std::string s = "") {
|
||||
@@ -253,6 +263,7 @@ class HMCResourceManager {
|
||||
template<class T, class... Types>
|
||||
void AddObservable(Types&&... Args){
|
||||
ObservablesList.push_back(std::unique_ptr<T>(new T(std::forward<Types>(Args)...)));
|
||||
ObservablesList.back()->print_parameters();
|
||||
}
|
||||
|
||||
std::vector<HmcObservable<typename ImplementationPolicy::Field>* > GetObservables(){
|
||||
@@ -297,4 +308,4 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
#endif // HMC_RESOURCE_MANAGER_H
|
||||
#endif // HMC_RESOURCE_MANAGER_H
|
||||
|
||||
@@ -33,28 +33,29 @@ directory
|
||||
namespace Grid {
|
||||
|
||||
// Resources
|
||||
// Modules for grids
|
||||
// Modules for grids
|
||||
|
||||
// Introduce another namespace HMCModules?
|
||||
|
||||
class GridModuleParameters: Serializable{
|
||||
class GridModuleParameters: Serializable{
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(GridModuleParameters,
|
||||
std::string, lattice,
|
||||
std::string, mpi);
|
||||
|
||||
std::vector<int> getLattice(){return strToVec<int>(lattice);}
|
||||
std::vector<int> getMpi() {return strToVec<int>(mpi);}
|
||||
std::vector<int> getLattice() const {return strToVec<int>(lattice);}
|
||||
std::vector<int> getMpi() const {return strToVec<int>(mpi);}
|
||||
|
||||
void check(){
|
||||
if (getLattice().size() != getMpi().size()) {
|
||||
std::cout << GridLogError
|
||||
|
||||
void check() const {
|
||||
if (getLattice().size() != getMpi().size() ) {
|
||||
std::cout << GridLogError
|
||||
<< "Error in GridModuleParameters: lattice and mpi dimensions "
|
||||
"do not match"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class ReaderClass>
|
||||
GridModuleParameters(Reader<ReaderClass>& Reader, std::string n = "LatticeGrid"):name(n) {
|
||||
@@ -75,51 +76,94 @@ private:
|
||||
// Lower level class
|
||||
class GridModule {
|
||||
public:
|
||||
GridCartesian* get_full() {
|
||||
GridCartesian* get_full() {
|
||||
std::cout << GridLogDebug << "Getting cartesian in module"<< std::endl;
|
||||
return grid_.get(); }
|
||||
GridRedBlackCartesian* get_rb() {
|
||||
GridRedBlackCartesian* get_rb() {
|
||||
std::cout << GridLogDebug << "Getting rb-cartesian in module"<< std::endl;
|
||||
return rbgrid_.get(); }
|
||||
|
||||
void set_full(GridCartesian* grid) { grid_.reset(grid); }
|
||||
void set_rb(GridRedBlackCartesian* rbgrid) { rbgrid_.reset(rbgrid); }
|
||||
void show_full_decomposition(){ grid_->show_decomposition(); }
|
||||
void show_rb_decomposition(){ rbgrid_->show_decomposition(); }
|
||||
|
||||
protected:
|
||||
std::unique_ptr<GridCartesian> grid_;
|
||||
std::unique_ptr<GridRedBlackCartesian> rbgrid_;
|
||||
|
||||
|
||||
};
|
||||
|
||||
////////////////////////////////////
|
||||
// Classes for the user
|
||||
////////////////////////////////////
|
||||
// Note: the space time grid should be out of the QCD namespace
|
||||
template< class vector_type>
|
||||
class GridFourDimModule : public GridModule {
|
||||
public:
|
||||
GridFourDimModule() {
|
||||
template <class vector_type>
|
||||
class GridFourDimModule : public GridModule
|
||||
{
|
||||
public:
|
||||
GridFourDimModule()
|
||||
{
|
||||
using namespace QCD;
|
||||
set_full(SpaceTimeGrid::makeFourDimGrid(
|
||||
GridDefaultLatt(), GridDefaultSimd(4, vector_type::Nsimd()),
|
||||
GridDefaultLatt(),
|
||||
GridDefaultSimd(4, vector_type::Nsimd()),
|
||||
GridDefaultMpi()));
|
||||
set_rb(SpaceTimeGrid::makeFourDimRedBlackGrid(grid_.get()));
|
||||
}
|
||||
|
||||
GridFourDimModule(GridModuleParameters Params) {
|
||||
GridFourDimModule(const std::vector<int> tweak_simd)
|
||||
{
|
||||
using namespace QCD;
|
||||
if (tweak_simd.size() != 4)
|
||||
{
|
||||
std::cout << GridLogError
|
||||
<< "Error in GridFourDimModule: SIMD size different from 4"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Checks that the product agrees with the expectation
|
||||
int simd_sum = 1;
|
||||
for (auto &n : tweak_simd)
|
||||
simd_sum *= n;
|
||||
std::cout << GridLogDebug << "TweakSIMD: " << tweak_simd << " Sum: " << simd_sum << std::endl;
|
||||
|
||||
if (simd_sum == vector_type::Nsimd())
|
||||
{
|
||||
set_full(SpaceTimeGrid::makeFourDimGrid(
|
||||
GridDefaultLatt(),
|
||||
tweak_simd,
|
||||
GridDefaultMpi()));
|
||||
set_rb(SpaceTimeGrid::makeFourDimRedBlackGrid(grid_.get()));
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << GridLogError
|
||||
<< "Error in GridFourDimModule: SIMD lanes must sum to "
|
||||
<< vector_type::Nsimd()
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
GridFourDimModule(const GridModuleParameters Params)
|
||||
{
|
||||
using namespace QCD;
|
||||
Params.check();
|
||||
std::vector<int> lattice_v = Params.getLattice();
|
||||
std::vector<int> mpi_v = Params.getMpi();
|
||||
if (lattice_v.size() == 4) {
|
||||
if (lattice_v.size() == 4)
|
||||
{
|
||||
set_full(SpaceTimeGrid::makeFourDimGrid(
|
||||
lattice_v, GridDefaultSimd(4, vector_type::Nsimd()),
|
||||
lattice_v,
|
||||
GridDefaultSimd(4, vector_type::Nsimd()),
|
||||
mpi_v));
|
||||
set_rb(SpaceTimeGrid::makeFourDimRedBlackGrid(grid_.get()));
|
||||
} else {
|
||||
std::cout << GridLogError
|
||||
<< "Error in GridFourDimModule: lattice dimension different from 4"
|
||||
<< std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout << GridLogError
|
||||
<< "Error in GridFourDimModule: lattice dimension different from 4"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ protected:
|
||||
}
|
||||
|
||||
virtual unsigned int Ls(){
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
virtual void print_parameters(){
|
||||
@@ -97,7 +97,7 @@ class HMC_FermionOperatorModuleFactory
|
||||
: public Factory < FermionOperatorModuleBase<QCD::FermionOperator<FermionImpl> > , Reader<ReaderClass> > {
|
||||
public:
|
||||
// use SINGLETON FUNCTOR MACRO HERE
|
||||
typedef Reader<ReaderClass> TheReader;
|
||||
typedef Reader<ReaderClass> TheReader;
|
||||
|
||||
HMC_FermionOperatorModuleFactory(const HMC_FermionOperatorModuleFactory& e) = delete;
|
||||
void operator=(const HMC_FermionOperatorModuleFactory& e) = delete;
|
||||
@@ -122,7 +122,7 @@ namespace QCD{
|
||||
// Modules
|
||||
class WilsonFermionParameters : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(WilsonFermionParameters,
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(WilsonFermionParameters,
|
||||
RealD, mass);
|
||||
};
|
||||
|
||||
@@ -144,7 +144,7 @@ class WilsonFermionModule: public FermionOperatorModule<WilsonFermion, FermionIm
|
||||
|
||||
class MobiusFermionParameters : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(MobiusFermionParameters,
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(MobiusFermionParameters,
|
||||
RealD, mass,
|
||||
RealD, M5,
|
||||
RealD, b,
|
||||
@@ -166,7 +166,7 @@ class MobiusFermionModule: public FermionOperatorModule<MobiusFermion, FermionIm
|
||||
auto GridMod = this->GridRefs[0];
|
||||
auto GridMod5d = this->GridRefs[1];
|
||||
typename FermionImpl::GaugeField U(GridMod->get_full());
|
||||
this->FOPtr.reset(new MobiusFermion<FermionImpl>( U, *(GridMod->get_full()), *(GridMod->get_rb()),
|
||||
this->FOPtr.reset(new MobiusFermion<FermionImpl>( U, *(GridMod->get_full()), *(GridMod->get_rb()),
|
||||
*(GridMod5d->get_full()), *(GridMod5d->get_rb()),
|
||||
this->Par_.mass, this->Par_.M5, this->Par_.b, this->Par_.c));
|
||||
}
|
||||
@@ -175,7 +175,7 @@ class MobiusFermionModule: public FermionOperatorModule<MobiusFermion, FermionIm
|
||||
|
||||
class DomainWallFermionParameters : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(DomainWallFermionParameters,
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(DomainWallFermionParameters,
|
||||
RealD, mass,
|
||||
RealD, M5,
|
||||
unsigned int, Ls);
|
||||
@@ -195,16 +195,49 @@ class DomainWallFermionModule: public FermionOperatorModule<DomainWallFermion, F
|
||||
auto GridMod = this->GridRefs[0];
|
||||
auto GridMod5d = this->GridRefs[1];
|
||||
typename FermionImpl::GaugeField U(GridMod->get_full());
|
||||
this->FOPtr.reset(new DomainWallFermion<FermionImpl>( U, *(GridMod->get_full()), *(GridMod->get_rb()),
|
||||
this->FOPtr.reset(new DomainWallFermion<FermionImpl>( U, *(GridMod->get_full()), *(GridMod->get_rb()),
|
||||
*(GridMod5d->get_full()), *(GridMod5d->get_rb()),
|
||||
this->Par_.mass, this->Par_.M5));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class DomainWallEOFAFermionParameters : Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(DomainWallEOFAFermionParameters,
|
||||
RealD, mq1,
|
||||
RealD, mq2,
|
||||
RealD, mq3,
|
||||
RealD, shift,
|
||||
int, pm,
|
||||
RealD, M5,
|
||||
unsigned int, Ls);
|
||||
};
|
||||
|
||||
template <class FermionImpl >
|
||||
class DomainWallEOFAFermionModule: public FermionOperatorModule<DomainWallEOFAFermion, FermionImpl, DomainWallEOFAFermionParameters> {
|
||||
typedef FermionOperatorModule<DomainWallEOFAFermion, FermionImpl, DomainWallEOFAFermionParameters> FermBase;
|
||||
using FermBase::FermBase; // for constructors
|
||||
|
||||
virtual unsigned int Ls(){
|
||||
return this->Par_.Ls;
|
||||
}
|
||||
|
||||
// acquire resource
|
||||
virtual void initialize(){
|
||||
auto GridMod = this->GridRefs[0];
|
||||
auto GridMod5d = this->GridRefs[1];
|
||||
typename FermionImpl::GaugeField U(GridMod->get_full());
|
||||
this->FOPtr.reset(new DomainWallEOFAFermion<FermionImpl>( U, *(GridMod->get_full()), *(GridMod->get_rb()),
|
||||
*(GridMod5d->get_full()), *(GridMod5d->get_rb()),
|
||||
this->Par_.mq1, this->Par_.mq2, this->Par_.mq3,
|
||||
this->Par_.shift, this->Par_.pm, this->Par_.M5));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
} // QCD
|
||||
} // Grid
|
||||
|
||||
|
||||
#endif //FERMIONOPERATOR_MODULES_H
|
||||
#endif //FERMIONOPERATOR_MODULES_H
|
||||
|
||||
@@ -84,8 +84,6 @@ class PlaquetteMod: public ObservableModule<PlaquetteLogger<Impl>, NoParameters>
|
||||
typedef ObservableModule<PlaquetteLogger<Impl>, NoParameters> ObsBase;
|
||||
using ObsBase::ObsBase; // for constructors
|
||||
|
||||
|
||||
|
||||
// acquire resource
|
||||
virtual void initialize(){
|
||||
this->ObservablePtr.reset(new PlaquetteLogger<Impl>());
|
||||
@@ -94,23 +92,22 @@ class PlaquetteMod: public ObservableModule<PlaquetteLogger<Impl>, NoParameters>
|
||||
PlaquetteMod(): ObsBase(NoParameters()){}
|
||||
};
|
||||
|
||||
|
||||
template < class Impl >
|
||||
class TopologicalChargeMod: public ObservableModule<TopologicalCharge<Impl>, NoParameters>{
|
||||
typedef ObservableModule<TopologicalCharge<Impl>, NoParameters> ObsBase;
|
||||
class TopologicalChargeMod: public ObservableModule<TopologicalCharge<Impl>, TopologyObsParameters>{
|
||||
typedef ObservableModule<TopologicalCharge<Impl>, TopologyObsParameters> ObsBase;
|
||||
using ObsBase::ObsBase; // for constructors
|
||||
|
||||
|
||||
|
||||
// acquire resource
|
||||
virtual void initialize(){
|
||||
this->ObservablePtr.reset(new TopologicalCharge<Impl>());
|
||||
this->ObservablePtr.reset(new TopologicalCharge<Impl>(this->Par_));
|
||||
}
|
||||
public:
|
||||
TopologicalChargeMod(): ObsBase(NoParameters()){}
|
||||
TopologicalChargeMod(TopologyObsParameters Par): ObsBase(Par){}
|
||||
TopologicalChargeMod(): ObsBase(){}
|
||||
};
|
||||
|
||||
|
||||
|
||||
}// QCD temporarily here
|
||||
|
||||
|
||||
|
||||
@@ -33,9 +33,45 @@ directory
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
struct TopologySmearingParameters : Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(TopologySmearingParameters,
|
||||
int, steps,
|
||||
float, step_size,
|
||||
int, meas_interval,
|
||||
float, maxTau);
|
||||
|
||||
TopologySmearingParameters(int s = 0, float ss = 0.0f, int mi = 0, float mT = 0.0f):
|
||||
steps(s), step_size(ss), meas_interval(mi), maxTau(mT){}
|
||||
|
||||
template < class ReaderClass >
|
||||
TopologySmearingParameters(Reader<ReaderClass>& Reader){
|
||||
read(Reader, "Smearing", *this);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct TopologyObsParameters : Serializable {
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(TopologyObsParameters,
|
||||
int, interval,
|
||||
bool, do_smearing,
|
||||
TopologySmearingParameters, Smearing);
|
||||
|
||||
TopologyObsParameters(int interval = 1, bool smearing = false):
|
||||
interval(interval), Smearing(smearing){}
|
||||
|
||||
template <class ReaderClass >
|
||||
TopologyObsParameters(Reader<ReaderClass>& Reader){
|
||||
read(Reader, "TopologyMeasurement", *this);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// this is only defined for a gauge theory
|
||||
template <class Impl>
|
||||
class TopologicalCharge : public HmcObservable<typename Impl::Field> {
|
||||
TopologyObsParameters Pars;
|
||||
|
||||
public:
|
||||
// here forces the Impl to be of gauge fields
|
||||
// if not the compiler will complain
|
||||
@@ -44,20 +80,39 @@ class TopologicalCharge : public HmcObservable<typename Impl::Field> {
|
||||
// necessary for HmcObservable compatibility
|
||||
typedef typename Impl::Field Field;
|
||||
|
||||
TopologicalCharge(int interval = 1, bool do_smearing = false):
|
||||
Pars(interval, do_smearing){}
|
||||
|
||||
TopologicalCharge(TopologyObsParameters P):Pars(P){
|
||||
std::cout << GridLogDebug << "Creating TopologicalCharge " << std::endl;
|
||||
}
|
||||
|
||||
void TrajectoryComplete(int traj,
|
||||
Field &U,
|
||||
GridSerialRNG &sRNG,
|
||||
GridParallelRNG &pRNG) {
|
||||
|
||||
Real q = WilsonLoops<Impl>::TopologicalCharge(U);
|
||||
if (traj%Pars.interval == 0){
|
||||
// Smearing
|
||||
Field Usmear = U;
|
||||
int def_prec = std::cout.precision();
|
||||
|
||||
if (Pars.do_smearing){
|
||||
// using wilson flow by default here
|
||||
WilsonFlow<PeriodicGimplR> WF(Pars.Smearing.steps, Pars.Smearing.step_size, Pars.Smearing.meas_interval);
|
||||
WF.smear_adaptive(Usmear, U, Pars.Smearing.maxTau);
|
||||
Real T0 = WF.energyDensityPlaquette(Usmear);
|
||||
std::cout << GridLogMessage << std::setprecision(std::numeric_limits<Real>::digits10 + 1)
|
||||
<< "T0 : [ " << traj << " ] "<< T0 << std::endl;
|
||||
}
|
||||
|
||||
int def_prec = std::cout.precision();
|
||||
Real q = WilsonLoops<Impl>::TopologicalCharge(Usmear);
|
||||
std::cout << GridLogMessage
|
||||
<< std::setprecision(std::numeric_limits<Real>::digits10 + 1)
|
||||
<< "Topological Charge: [ " << traj << " ] "<< q << std::endl;
|
||||
|
||||
std::cout << GridLogMessage
|
||||
<< std::setprecision(std::numeric_limits<Real>::digits10 + 1)
|
||||
<< "Topological Charge: [ " << traj << " ] "<< q << std::endl;
|
||||
|
||||
std::cout.precision(def_prec);
|
||||
std::cout.precision(def_prec);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
@@ -62,7 +62,10 @@ class Representations {
|
||||
|
||||
typedef Representations<FundamentalRepresentation> NoHirep;
|
||||
typedef Representations<EmptyRep<typename ScalarImplR::Field> > ScalarFields;
|
||||
//typedef Representations<EmptyRep<typename ScalarMatrixImplR::Field> > ScalarMatrixFields;
|
||||
typedef Representations<EmptyRep<typename ScalarAdjImplR::Field> > ScalarMatrixFields;
|
||||
|
||||
template < int Colours>
|
||||
using ScalarNxNMatrixFields = Representations<EmptyRep<typename ScalarNxNAdjImplR<Colours>::Field> >;
|
||||
|
||||
// Helper classes to access the elements
|
||||
// Strips the first N parameters from the tuple
|
||||
|
||||
@@ -108,7 +108,7 @@ void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, Real
|
||||
if (maxTau - taus < epsilon){
|
||||
epsilon = maxTau-taus;
|
||||
}
|
||||
std::cout << GridLogMessage << "Integration epsilon : " << epsilon << std::endl;
|
||||
//std::cout << GridLogMessage << "Integration epsilon : " << epsilon << std::endl;
|
||||
GaugeField Z(U._grid);
|
||||
GaugeField Zprime(U._grid);
|
||||
GaugeField tmp(U._grid), Uprime(U._grid);
|
||||
@@ -138,10 +138,10 @@ void WilsonFlow<Gimpl>::evolve_step_adaptive(typename Gimpl::GaugeField &U, Real
|
||||
// adjust integration step
|
||||
|
||||
taus += epsilon;
|
||||
std::cout << GridLogMessage << "Adjusting integration step with distance: " << diff << std::endl;
|
||||
//std::cout << GridLogMessage << "Adjusting integration step with distance: " << diff << std::endl;
|
||||
|
||||
epsilon = epsilon*0.95*std::pow(1e-4/diff,1./3.);
|
||||
std::cout << GridLogMessage << "New epsilon : " << epsilon << std::endl;
|
||||
//std::cout << GridLogMessage << "New epsilon : " << epsilon << std::endl;
|
||||
|
||||
}
|
||||
|
||||
@@ -166,7 +166,6 @@ void WilsonFlow<Gimpl>::smear(GaugeField& out, const GaugeField& in) const {
|
||||
out = in;
|
||||
for (unsigned int step = 1; step <= Nstep; step++) {
|
||||
auto start = std::chrono::high_resolution_clock::now();
|
||||
std::cout << GridLogMessage << "Evolution time :"<< tau(step) << std::endl;
|
||||
evolve_step(out);
|
||||
auto end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> diff = end - start;
|
||||
@@ -191,7 +190,7 @@ void WilsonFlow<Gimpl>::smear_adaptive(GaugeField& out, const GaugeField& in, Re
|
||||
unsigned int step = 0;
|
||||
do{
|
||||
step++;
|
||||
std::cout << GridLogMessage << "Evolution time :"<< taus << std::endl;
|
||||
//std::cout << GridLogMessage << "Evolution time :"<< taus << std::endl;
|
||||
evolve_step_adaptive(out, maxTau);
|
||||
std::cout << GridLogMessage << "[WilsonFlow] Energy density (plaq) : "
|
||||
<< step << " "
|
||||
|
||||
@@ -26,12 +26,14 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
/* END LEGAL */
|
||||
//#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
using namespace Grid::QCD;
|
||||
#ifndef GRID_QCD_GAUGE_FIX_H
|
||||
#define GRID_QCD_GAUGE_FIX_H
|
||||
namespace Grid {
|
||||
namespace QCD {
|
||||
|
||||
template <class Gimpl>
|
||||
class FourierAcceleratedGaugeFixer : public Gimpl {
|
||||
public:
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
typedef typename Gimpl::GaugeLinkField GaugeMat;
|
||||
@@ -186,3 +188,6 @@ class FourierAcceleratedGaugeFixer : public Gimpl {
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -716,8 +716,7 @@ template<typename GaugeField,typename GaugeMat>
|
||||
|
||||
for (int a = 0; a < AdjointDimension; a++) {
|
||||
generator(a, Ta);
|
||||
auto tmp = - 2.0 * (trace(timesI(Ta) * in)) * scale;// 2.0 for the normalization of the trace in the fundamental rep
|
||||
pokeColour(h_out, tmp, a);
|
||||
pokeColour(h_out, - 2.0 * (trace(timesI(Ta) * in)) * scale, a);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ namespace Grid {
|
||||
or element<T>::is_number;
|
||||
};
|
||||
|
||||
// Vector flatening utility class ////////////////////////////////////////////
|
||||
// Vector flattening utility class ////////////////////////////////////////////
|
||||
// Class to flatten a multidimensional std::vector
|
||||
template <typename V>
|
||||
class Flatten
|
||||
|
||||
@@ -65,10 +65,12 @@ Hdf5Reader::Hdf5Reader(const std::string &fileName)
|
||||
Hdf5Type<unsigned int>::type());
|
||||
}
|
||||
|
||||
void Hdf5Reader::push(const std::string &s)
|
||||
bool Hdf5Reader::push(const std::string &s)
|
||||
{
|
||||
group_ = group_.openGroup(s);
|
||||
path_.push_back(s);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Hdf5Reader::pop(void)
|
||||
|
||||
@@ -54,7 +54,7 @@ namespace Grid
|
||||
public:
|
||||
Hdf5Reader(const std::string &fileName);
|
||||
virtual ~Hdf5Reader(void) = default;
|
||||
void push(const std::string &s);
|
||||
bool push(const std::string &s);
|
||||
void pop(void);
|
||||
template <typename U>
|
||||
void readDefault(const std::string &s, U &output);
|
||||
|
||||
@@ -42,6 +42,7 @@ JSONWriter::~JSONWriter(void)
|
||||
|
||||
// write prettified JSON to file
|
||||
std::ofstream os(fileName_);
|
||||
//std::cout << "JSONWriter::~JSONWriter" << std::endl;
|
||||
os << std::setw(2) << json::parse(ss_.str()) << std::endl;
|
||||
}
|
||||
|
||||
@@ -56,6 +57,7 @@ void JSONWriter::push(const string &s)
|
||||
|
||||
void JSONWriter::pop(void)
|
||||
{
|
||||
//std::cout << "JSONWriter::pop" << std::endl;
|
||||
delete_comma();
|
||||
ss_ << "},";
|
||||
}
|
||||
@@ -67,20 +69,22 @@ void JSONWriter::delete_comma()
|
||||
ss_.str(dlast);
|
||||
}
|
||||
|
||||
|
||||
// here we are hitting a g++ bug (Bug 56480)
|
||||
// compiles fine with clang
|
||||
// have to wrap in the Grid namespace
|
||||
// annoying, but necessary for TravisCI
|
||||
namespace Grid
|
||||
{
|
||||
template<>
|
||||
void JSONWriter::writeDefault(const std::string &s,
|
||||
const std::string &x)
|
||||
void JSONWriter::writeDefault(const std::string &s, const std::string &x)
|
||||
{
|
||||
//std::cout << "JSONWriter::writeDefault(string) : " << s << std::endl;
|
||||
std::ostringstream os;
|
||||
os << std::boolalpha << x;
|
||||
if (s.size())
|
||||
ss_ << "\""<< s << "\" : \"" << x << "\" ," ;
|
||||
ss_ << "\""<< s << "\" : \"" << os.str() << "\" ," ;
|
||||
else
|
||||
ss_ << "\"" << x << "\" ," ;
|
||||
ss_ << os.str() << " ," ;
|
||||
}
|
||||
}// namespace Grid
|
||||
|
||||
@@ -138,6 +142,7 @@ void JSONReader::pop(void)
|
||||
|
||||
bool JSONReader::nextElement(const std::string &s)
|
||||
{
|
||||
// Work in progress
|
||||
// JSON dictionaries do not support multiple names
|
||||
// Same name objects must be packed in vectors
|
||||
++it_;
|
||||
|
||||
@@ -58,10 +58,15 @@ namespace Grid
|
||||
void writeDefault(const std::string &s, const std::complex<U> &x);
|
||||
template <typename U>
|
||||
void writeDefault(const std::string &s, const std::vector<U> &x);
|
||||
template <typename U, typename P>
|
||||
void writeDefault(const std::string &s, const std::pair<U,P> &x);
|
||||
|
||||
template<std::size_t N>
|
||||
void writeDefault(const std::string &s, const char(&x)[N]);
|
||||
|
||||
void writeDefault(const std::string &s, const std::string &x);
|
||||
|
||||
|
||||
private:
|
||||
void delete_comma();
|
||||
std::string fileName_;
|
||||
@@ -82,6 +87,8 @@ namespace Grid
|
||||
void readDefault(const std::string &s, std::complex<U> &output);
|
||||
template <typename U>
|
||||
void readDefault(const std::string &s, std::vector<U> &output);
|
||||
template <typename U, typename P>
|
||||
void readDefault(const std::string &s, std::pair<U,P> &output);
|
||||
private:
|
||||
json jobject_; // main object
|
||||
json jcur_; // current json object
|
||||
@@ -106,7 +113,7 @@ namespace Grid
|
||||
template <typename U>
|
||||
void JSONWriter::writeDefault(const std::string &s, const U &x)
|
||||
{
|
||||
//std::cout << "JSONReader::writeDefault(U) : " << s << std::endl;
|
||||
//std::cout << "JSONWriter::writeDefault(U) : " << s << " " << x <<std::endl;
|
||||
std::ostringstream os;
|
||||
os << std::boolalpha << x;
|
||||
if (s.size())
|
||||
@@ -118,7 +125,7 @@ namespace Grid
|
||||
template <typename U>
|
||||
void JSONWriter::writeDefault(const std::string &s, const std::complex<U> &x)
|
||||
{
|
||||
//std::cout << "JSONReader::writeDefault(complex) : " << s << std::endl;
|
||||
//std::cout << "JSONWriter::writeDefault(complex) : " << s << " " << x << std::endl;
|
||||
std::ostringstream os;
|
||||
os << "["<< std::boolalpha << x.real() << ", " << x.imag() << "]";
|
||||
if (s.size())
|
||||
@@ -127,10 +134,22 @@ namespace Grid
|
||||
ss_ << os.str() << " ," ;
|
||||
}
|
||||
|
||||
template <typename U, typename P>
|
||||
void JSONWriter::writeDefault(const std::string &s, const std::pair<U,P> &x)
|
||||
{
|
||||
//std::cout << "JSONWriter::writeDefault(pair) : " << s << " " << x << std::endl;
|
||||
std::ostringstream os;
|
||||
os << "["<< std::boolalpha << "\""<< x.first << "\" , \"" << x.second << "\" ]";
|
||||
if (s.size())
|
||||
ss_ << "\""<< s << "\" : " << os.str() << " ," ;
|
||||
else
|
||||
ss_ << os.str() << " ," ;
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
void JSONWriter::writeDefault(const std::string &s, const std::vector<U> &x)
|
||||
{
|
||||
//std::cout << "JSONReader::writeDefault(vec U) : " << s << std::endl;
|
||||
//std::cout << "JSONWriter::writeDefault(vec U) : " << s << std::endl;
|
||||
|
||||
if (s.size())
|
||||
ss_ << " \""<<s<<"\" : [";
|
||||
@@ -146,12 +165,12 @@ namespace Grid
|
||||
|
||||
template<std::size_t N>
|
||||
void JSONWriter::writeDefault(const std::string &s, const char(&x)[N]){
|
||||
//std::cout << "JSONReader::writeDefault(char U) : " << s << std::endl;
|
||||
//std::cout << "JSONWriter::writeDefault(char U) : " << s << " " << x << std::endl;
|
||||
|
||||
if (s.size())
|
||||
ss_ << "\""<< s << "\" : \"" << x << "\" ," ;
|
||||
ss_ << "\""<< s << "\" : \"" << x << "\" ," ;
|
||||
else
|
||||
ss_ << "\"" << x << "\" ," ;
|
||||
ss_ << "\"" << x << "\" ," ;
|
||||
}
|
||||
|
||||
// Reader template implementation ////////////////////////////////////////////
|
||||
@@ -173,11 +192,35 @@ namespace Grid
|
||||
|
||||
}
|
||||
|
||||
// Reader template implementation ////////////////////////////////////////////
|
||||
template <typename U, typename P>
|
||||
void JSONReader::readDefault(const std::string &s, std::pair<U,P> &output)
|
||||
{
|
||||
U first;
|
||||
P second;
|
||||
json j;
|
||||
if (s.size()){
|
||||
//std::cout << "JSONReader::readDefault(pair) : " << s << " | "<< jcur_[s] << std::endl;
|
||||
j = jcur_[s];
|
||||
} else {
|
||||
j = jcur_;
|
||||
}
|
||||
json::iterator it = j.begin();
|
||||
jcur_ = *it;
|
||||
read("", first);
|
||||
it++;
|
||||
jcur_ = *it;
|
||||
read("", second);
|
||||
output = std::pair<U,P>(first,second);
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename U>
|
||||
void JSONReader::readDefault(const std::string &s, std::complex<U> &output)
|
||||
{
|
||||
U tmp1, tmp2;
|
||||
//std::cout << "JSONReader::readDefault( complex U) : " << s << " : "<< jcur_ << std::endl;
|
||||
//std::cout << "JSONReader::readDefault(complex U) : " << s << " : "<< jcur_ << std::endl;
|
||||
json j = jcur_;
|
||||
json::iterator it = j.begin();
|
||||
jcur_ = *it;
|
||||
|
||||
@@ -701,9 +701,28 @@ namespace Optimization {
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m256i>::operator()(__m256i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
__m128i ret;
|
||||
#if defined (AVX2)
|
||||
// AVX2 horizontal adds within upper and lower halves of register; use
|
||||
// SSE to add upper and lower halves for result.
|
||||
__m256i v1, v2;
|
||||
__m128i u1, u2;
|
||||
v1 = _mm256_hadd_epi32(in, in);
|
||||
v2 = _mm256_hadd_epi32(v1, v1);
|
||||
u1 = _mm256_castsi256_si128(v2); // upper half
|
||||
u2 = _mm256_extracti128_si256(v2, 1); // lower half
|
||||
ret = _mm_add_epi32(u1, u2);
|
||||
#else
|
||||
// No AVX horizontal add; extract upper and lower halves of register & use
|
||||
// SSE intrinsics.
|
||||
__m128i u1, u2, u3;
|
||||
u1 = _mm256_extractf128_si256(in, 0); // upper half
|
||||
u2 = _mm256_extractf128_si256(in, 1); // lower half
|
||||
u3 = _mm_add_epi32(u1, u2);
|
||||
u1 = _mm_hadd_epi32(u3, u3);
|
||||
ret = _mm_hadd_epi32(u1, u1);
|
||||
#endif
|
||||
return _mm_cvtsi128_si32(ret);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -543,6 +543,24 @@ namespace Optimization {
|
||||
u512d conv; conv.v = v1;
|
||||
return conv.f[0];
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
|
||||
// No full vector reduce, use AVX to add upper and lower halves of register
|
||||
// and perform AVX reduction.
|
||||
__m256i v1, v2, v3;
|
||||
__m128i u1, u2, ret;
|
||||
v1 = _mm512_castsi512_si256(in); // upper half
|
||||
v2 = _mm512_extracti32x8_epi32(in, 1); // lower half
|
||||
v3 = _mm256_add_epi32(v1, v2);
|
||||
v1 = _mm256_hadd_epi32(v3, v3);
|
||||
v2 = _mm256_hadd_epi32(v1, v1);
|
||||
u1 = _mm256_castsi256_si128(v2) // upper half
|
||||
u2 = _mm256_extracti128_si256(v2, 1); // lower half
|
||||
ret = _mm_add_epi32(u1, u2);
|
||||
return _mm_cvtsi128_si32(ret);
|
||||
}
|
||||
#else
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
@@ -570,9 +588,7 @@ namespace Optimization {
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
return _mm512_reduce_add_epi32(in);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -401,9 +401,7 @@ namespace Optimization {
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m512i>::operator()(__m512i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
return _mm512_reduce_add_epi32(in);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
/*************************************************************************************
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/simd/Grid_neon.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
Author: neo <cossu@post.kek.jp>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@@ -26,19 +27,25 @@ Author: neo <cossu@post.kek.jp>
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//----------------------------------------------------------------------
|
||||
/*! @file Grid_sse4.h
|
||||
@brief Optimization libraries for NEON (ARM) instructions set ARMv8
|
||||
|
||||
Experimental - Using intrinsics - DEVELOPING!
|
||||
/*
|
||||
|
||||
ARMv8 NEON intrinsics layer by
|
||||
|
||||
Nils Meyer <nils.meyer@ur.de>,
|
||||
University of Regensburg, Germany
|
||||
SFB/TRR55
|
||||
|
||||
*/
|
||||
// Time-stamp: <2015-07-10 17:45:09 neo>
|
||||
//----------------------------------------------------------------------
|
||||
|
||||
#ifndef GEN_SIMD_WIDTH
|
||||
#define GEN_SIMD_WIDTH 16u
|
||||
#endif
|
||||
|
||||
#include "Grid_generic_types.h"
|
||||
#include <arm_neon.h>
|
||||
|
||||
// ARMv8 supports double precision
|
||||
|
||||
namespace Grid {
|
||||
namespace Optimization {
|
||||
|
||||
template<class vtype>
|
||||
@@ -46,16 +53,20 @@ namespace Optimization {
|
||||
float32x4_t f;
|
||||
vtype v;
|
||||
};
|
||||
|
||||
union u128f {
|
||||
float32x4_t v;
|
||||
float f[4];
|
||||
};
|
||||
union u128d {
|
||||
float64x2_t v;
|
||||
double f[4];
|
||||
double f[2];
|
||||
};
|
||||
|
||||
// half precision
|
||||
union u128h {
|
||||
float16x8_t v;
|
||||
uint16_t f[8];
|
||||
};
|
||||
|
||||
struct Vsplat{
|
||||
//Complex float
|
||||
inline float32x4_t operator()(float a, float b){
|
||||
@@ -64,31 +75,31 @@ namespace Optimization {
|
||||
}
|
||||
// Real float
|
||||
inline float32x4_t operator()(float a){
|
||||
return vld1q_dup_f32(&a);
|
||||
return vdupq_n_f32(a);
|
||||
}
|
||||
//Complex double
|
||||
inline float32x4_t operator()(double a, double b){
|
||||
float tmp[4]={(float)a,(float)b,(float)a,(float)b};
|
||||
return vld1q_f32(tmp);
|
||||
inline float64x2_t operator()(double a, double b){
|
||||
double tmp[2]={a,b};
|
||||
return vld1q_f64(tmp);
|
||||
}
|
||||
//Real double
|
||||
inline float32x4_t operator()(double a){
|
||||
return vld1q_dup_f32(&a);
|
||||
inline float64x2_t operator()(double a){
|
||||
return vdupq_n_f64(a);
|
||||
}
|
||||
//Integer
|
||||
inline uint32x4_t operator()(Integer a){
|
||||
return vld1q_dup_u32(&a);
|
||||
return vdupq_n_u32(a);
|
||||
}
|
||||
};
|
||||
|
||||
struct Vstore{
|
||||
//Float
|
||||
//Float
|
||||
inline void operator()(float32x4_t a, float* F){
|
||||
vst1q_f32(F, a);
|
||||
}
|
||||
//Double
|
||||
inline void operator()(float32x4_t a, double* D){
|
||||
vst1q_f32((float*)D, a);
|
||||
inline void operator()(float64x2_t a, double* D){
|
||||
vst1q_f64(D, a);
|
||||
}
|
||||
//Integer
|
||||
inline void operator()(uint32x4_t a, Integer* I){
|
||||
@@ -97,54 +108,53 @@ namespace Optimization {
|
||||
|
||||
};
|
||||
|
||||
struct Vstream{
|
||||
//Float
|
||||
struct Vstream{ // N:equivalents to _mm_stream_p* in NEON?
|
||||
//Float // N:generic
|
||||
inline void operator()(float * a, float32x4_t b){
|
||||
|
||||
memcpy(a,&b,4*sizeof(float));
|
||||
}
|
||||
//Double
|
||||
inline void operator()(double * a, float32x4_t b){
|
||||
|
||||
//Double // N:generic
|
||||
inline void operator()(double * a, float64x2_t b){
|
||||
memcpy(a,&b,2*sizeof(double));
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
// Nils: Vset untested; not used currently in Grid at all;
|
||||
// git commit 4a8c4ccfba1d05159348d21a9698028ea847e77b
|
||||
struct Vset{
|
||||
// Complex float
|
||||
// Complex float
|
||||
inline float32x4_t operator()(Grid::ComplexF *a){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
float tmp[4]={a[1].imag(),a[1].real(),a[0].imag(),a[0].real()};
|
||||
return vld1q_f32(tmp);
|
||||
}
|
||||
// Complex double
|
||||
inline float32x4_t operator()(Grid::ComplexD *a){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
// Complex double
|
||||
inline float64x2_t operator()(Grid::ComplexD *a){
|
||||
double tmp[2]={a[0].imag(),a[0].real()};
|
||||
return vld1q_f64(tmp);
|
||||
}
|
||||
// Real float
|
||||
// Real float
|
||||
inline float32x4_t operator()(float *a){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
float tmp[4]={a[3],a[2],a[1],a[0]};
|
||||
return vld1q_f32(tmp);
|
||||
}
|
||||
// Real double
|
||||
inline float32x4_t operator()(double *a){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
inline float64x2_t operator()(double *a){
|
||||
double tmp[2]={a[1],a[0]};
|
||||
return vld1q_f64(tmp);
|
||||
}
|
||||
// Integer
|
||||
inline uint32x4_t operator()(Integer *a){
|
||||
uint32x4_t foo;
|
||||
return foo;
|
||||
return vld1q_dup_u32(a);
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
template <typename Out_type, typename In_type>
|
||||
struct Reduce{
|
||||
//Need templated class to overload output type
|
||||
//General form must generate error if compiled
|
||||
inline Out_type operator()(In_type in){
|
||||
inline Out_type operator()(In_type in){
|
||||
printf("Error, using wrong Reduce function\n");
|
||||
exit(1);
|
||||
return 0;
|
||||
@@ -184,26 +194,98 @@ namespace Optimization {
|
||||
}
|
||||
};
|
||||
|
||||
struct MultRealPart{
|
||||
inline float32x4_t operator()(float32x4_t a, float32x4_t b){
|
||||
float32x4_t re = vtrn1q_f32(a, a);
|
||||
return vmulq_f32(re, b);
|
||||
}
|
||||
inline float64x2_t operator()(float64x2_t a, float64x2_t b){
|
||||
float64x2_t re = vzip1q_f64(a, a);
|
||||
return vmulq_f64(re, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct MaddRealPart{
|
||||
inline float32x4_t operator()(float32x4_t a, float32x4_t b, float32x4_t c){
|
||||
float32x4_t re = vtrn1q_f32(a, a);
|
||||
return vfmaq_f32(c, re, b);
|
||||
}
|
||||
inline float64x2_t operator()(float64x2_t a, float64x2_t b, float64x2_t c){
|
||||
float64x2_t re = vzip1q_f64(a, a);
|
||||
return vfmaq_f64(c, re, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct Div{
|
||||
// Real float
|
||||
inline float32x4_t operator()(float32x4_t a, float32x4_t b){
|
||||
return vdivq_f32(a, b);
|
||||
}
|
||||
// Real double
|
||||
inline float64x2_t operator()(float64x2_t a, float64x2_t b){
|
||||
return vdivq_f64(a, b);
|
||||
}
|
||||
};
|
||||
|
||||
struct MultComplex{
|
||||
// Complex float
|
||||
inline float32x4_t operator()(float32x4_t a, float32x4_t b){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
|
||||
float32x4_t r0, r1, r2, r3, r4;
|
||||
|
||||
// a = ar ai Ar Ai
|
||||
// b = br bi Br Bi
|
||||
// collect real/imag part, negate bi and Bi
|
||||
r0 = vtrn1q_f32(b, b); // br br Br Br
|
||||
r1 = vnegq_f32(b); // -br -bi -Br -Bi
|
||||
r2 = vtrn2q_f32(b, r1); // bi -bi Bi -Bi
|
||||
|
||||
// the fun part
|
||||
r3 = vmulq_f32(r2, a); // bi*ar -bi*ai ...
|
||||
r4 = vrev64q_f32(r3); // -bi*ai bi*ar ...
|
||||
|
||||
// fma(a,b,c) = a+b*c
|
||||
return vfmaq_f32(r4, r0, a); // ar*br-ai*bi ai*br+ar*bi ...
|
||||
|
||||
// no fma, use mul and add
|
||||
// float32x4_t r5;
|
||||
// r5 = vmulq_f32(r0, a);
|
||||
// return vaddq_f32(r4, r5);
|
||||
}
|
||||
// Complex double
|
||||
inline float64x2_t operator()(float64x2_t a, float64x2_t b){
|
||||
float32x4_t foo;
|
||||
return foo;
|
||||
|
||||
float64x2_t r0, r1, r2, r3, r4;
|
||||
|
||||
// b = br bi
|
||||
// collect real/imag part, negate bi
|
||||
r0 = vtrn1q_f64(b, b); // br br
|
||||
r1 = vnegq_f64(b); // -br -bi
|
||||
r2 = vtrn2q_f64(b, r1); // bi -bi
|
||||
|
||||
// the fun part
|
||||
r3 = vmulq_f64(r2, a); // bi*ar -bi*ai
|
||||
r4 = vextq_f64(r3,r3,1); // -bi*ai bi*ar
|
||||
|
||||
// fma(a,b,c) = a+b*c
|
||||
return vfmaq_f64(r4, r0, a); // ar*br-ai*bi ai*br+ar*bi
|
||||
|
||||
// no fma, use mul and add
|
||||
// float64x2_t r5;
|
||||
// r5 = vmulq_f64(r0, a);
|
||||
// return vaddq_f64(r4, r5);
|
||||
}
|
||||
};
|
||||
|
||||
struct Mult{
|
||||
// Real float
|
||||
inline float32x4_t mac(float32x4_t a, float32x4_t b, float32x4_t c){
|
||||
return vaddq_f32(vmulq_f32(b,c),a);
|
||||
//return vaddq_f32(vmulq_f32(b,c),a);
|
||||
return vfmaq_f32(a, b, c);
|
||||
}
|
||||
inline float64x2_t mac(float64x2_t a, float64x2_t b, float64x2_t c){
|
||||
return vaddq_f64(vmulq_f64(b,c),a);
|
||||
//return vaddq_f64(vmulq_f64(b,c),a);
|
||||
return vfmaq_f64(a, b, c);
|
||||
}
|
||||
inline float32x4_t operator()(float32x4_t a, float32x4_t b){
|
||||
return vmulq_f32(a,b);
|
||||
@@ -221,89 +303,268 @@ namespace Optimization {
|
||||
struct Conj{
|
||||
// Complex single
|
||||
inline float32x4_t operator()(float32x4_t in){
|
||||
return in;
|
||||
// ar ai br bi -> ar -ai br -bi
|
||||
float32x4_t r0, r1;
|
||||
r0 = vnegq_f32(in); // -ar -ai -br -bi
|
||||
r1 = vrev64q_f32(r0); // -ai -ar -bi -br
|
||||
return vtrn1q_f32(in, r1); // ar -ai br -bi
|
||||
}
|
||||
// Complex double
|
||||
//inline float32x4_t operator()(float32x4_t in){
|
||||
// return 0;
|
||||
//}
|
||||
inline float64x2_t operator()(float64x2_t in){
|
||||
|
||||
float64x2_t r0, r1;
|
||||
r0 = vextq_f64(in, in, 1); // ai ar
|
||||
r1 = vnegq_f64(r0); // -ai -ar
|
||||
return vextq_f64(r0, r1, 1); // ar -ai
|
||||
}
|
||||
// do not define for integer input
|
||||
};
|
||||
|
||||
struct TimesMinusI{
|
||||
//Complex single
|
||||
inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
|
||||
return in;
|
||||
// ar ai br bi -> ai -ar ai -br
|
||||
float32x4_t r0, r1;
|
||||
r0 = vnegq_f32(in); // -ar -ai -br -bi
|
||||
r1 = vrev64q_f32(in); // ai ar bi br
|
||||
return vtrn1q_f32(r1, r0); // ar -ai br -bi
|
||||
}
|
||||
//Complex double
|
||||
//inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
|
||||
// return in;
|
||||
//}
|
||||
|
||||
|
||||
inline float64x2_t operator()(float64x2_t in, float64x2_t ret){
|
||||
// a ib -> b -ia
|
||||
float64x2_t tmp;
|
||||
tmp = vnegq_f64(in);
|
||||
return vextq_f64(in, tmp, 1);
|
||||
}
|
||||
};
|
||||
|
||||
struct TimesI{
|
||||
//Complex single
|
||||
inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
|
||||
//need shuffle
|
||||
return in;
|
||||
// ar ai br bi -> -ai ar -bi br
|
||||
float32x4_t r0, r1;
|
||||
r0 = vnegq_f32(in); // -ar -ai -br -bi
|
||||
r1 = vrev64q_f32(r0); // -ai -ar -bi -br
|
||||
return vtrn1q_f32(r1, in); // -ai ar -bi br
|
||||
}
|
||||
//Complex double
|
||||
//inline float32x4_t operator()(float32x4_t in, float32x4_t ret){
|
||||
// return 0;
|
||||
//}
|
||||
inline float64x2_t operator()(float64x2_t in, float64x2_t ret){
|
||||
// a ib -> -b ia
|
||||
float64x2_t tmp;
|
||||
tmp = vnegq_f64(in);
|
||||
return vextq_f64(tmp, in, 1);
|
||||
}
|
||||
};
|
||||
|
||||
struct Permute{
|
||||
|
||||
static inline float32x4_t Permute0(float32x4_t in){ // N:ok
|
||||
// AB CD -> CD AB
|
||||
return vextq_f32(in, in, 2);
|
||||
};
|
||||
static inline float32x4_t Permute1(float32x4_t in){ // N:ok
|
||||
// AB CD -> BA DC
|
||||
return vrev64q_f32(in);
|
||||
};
|
||||
static inline float32x4_t Permute2(float32x4_t in){ // N:not used by Boyle
|
||||
return in;
|
||||
};
|
||||
static inline float32x4_t Permute3(float32x4_t in){ // N:not used by Boyle
|
||||
return in;
|
||||
};
|
||||
|
||||
static inline float64x2_t Permute0(float64x2_t in){ // N:ok
|
||||
// AB -> BA
|
||||
return vextq_f64(in, in, 1);
|
||||
};
|
||||
static inline float64x2_t Permute1(float64x2_t in){ // N:not used by Boyle
|
||||
return in;
|
||||
};
|
||||
static inline float64x2_t Permute2(float64x2_t in){ // N:not used by Boyle
|
||||
return in;
|
||||
};
|
||||
static inline float64x2_t Permute3(float64x2_t in){ // N:not used by Boyle
|
||||
return in;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
struct Rotate{
|
||||
|
||||
static inline float32x4_t rotate(float32x4_t in,int n){ // N:ok
|
||||
switch(n){
|
||||
case 0: // AB CD -> AB CD
|
||||
return tRotate<0>(in);
|
||||
break;
|
||||
case 1: // AB CD -> BC DA
|
||||
return tRotate<1>(in);
|
||||
break;
|
||||
case 2: // AB CD -> CD AB
|
||||
return tRotate<2>(in);
|
||||
break;
|
||||
case 3: // AB CD -> DA BC
|
||||
return tRotate<3>(in);
|
||||
break;
|
||||
default: assert(0);
|
||||
}
|
||||
}
|
||||
static inline float64x2_t rotate(float64x2_t in,int n){ // N:ok
|
||||
switch(n){
|
||||
case 0: // AB -> AB
|
||||
return tRotate<0>(in);
|
||||
break;
|
||||
case 1: // AB -> BA
|
||||
return tRotate<1>(in);
|
||||
break;
|
||||
default: assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
template<int n> static inline float32x4_t tRotate(float32x4_t in){ return vextq_f32(in,in,n%4); };
|
||||
template<int n> static inline float64x2_t tRotate(float64x2_t in){ return vextq_f64(in,in,n%2); };
|
||||
|
||||
};
|
||||
|
||||
struct PrecisionChange {
|
||||
|
||||
static inline float16x8_t StoH (const float32x4_t &a,const float32x4_t &b) {
|
||||
float16x4_t h = vcvt_f16_f32(a);
|
||||
return vcvt_high_f16_f32(h, b);
|
||||
}
|
||||
static inline void HtoS (float16x8_t h,float32x4_t &sa,float32x4_t &sb) {
|
||||
sb = vcvt_high_f32_f16(h);
|
||||
// there is no direct conversion from lower float32x4_t to float64x2_t
|
||||
// vextq_f16 not supported by clang 3.8 / 4.0 / arm clang
|
||||
// float16x8_t h1 = vextq_f16(h, h, 4); // correct, but not supported by clang
|
||||
// workaround for clang
|
||||
uint32x4_t h1u = reinterpret_cast<uint32x4_t>(h);
|
||||
float16x8_t h1 = reinterpret_cast<float16x8_t>(vextq_u32(h1u, h1u, 2));
|
||||
sa = vcvt_high_f32_f16(h1);
|
||||
}
|
||||
static inline float32x4_t DtoS (float64x2_t a,float64x2_t b) {
|
||||
float32x2_t s = vcvt_f32_f64(a);
|
||||
return vcvt_high_f32_f64(s, b);
|
||||
|
||||
}
|
||||
static inline void StoD (float32x4_t s,float64x2_t &a,float64x2_t &b) {
|
||||
b = vcvt_high_f64_f32(s);
|
||||
// there is no direct conversion from lower float32x4_t to float64x2_t
|
||||
float32x4_t s1 = vextq_f32(s, s, 2);
|
||||
a = vcvt_high_f64_f32(s1);
|
||||
|
||||
}
|
||||
static inline float16x8_t DtoH (float64x2_t a,float64x2_t b,float64x2_t c,float64x2_t d) {
|
||||
float32x4_t s1 = DtoS(a, b);
|
||||
float32x4_t s2 = DtoS(c, d);
|
||||
return StoH(s1, s2);
|
||||
}
|
||||
static inline void HtoD (float16x8_t h,float64x2_t &a,float64x2_t &b,float64x2_t &c,float64x2_t &d) {
|
||||
float32x4_t s1, s2;
|
||||
HtoS(h, s1, s2);
|
||||
StoD(s1, a, b);
|
||||
StoD(s2, c, d);
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Exchange support
|
||||
|
||||
struct Exchange{
|
||||
static inline void Exchange0(float32x4_t &out1,float32x4_t &out2,float32x4_t in1,float32x4_t in2){
|
||||
// in1: ABCD -> out1: ABEF
|
||||
// in2: EFGH -> out2: CDGH
|
||||
|
||||
// z: CDAB
|
||||
float32x4_t z = vextq_f32(in1, in1, 2);
|
||||
// out1: ABEF
|
||||
out1 = vextq_f32(z, in2, 2);
|
||||
|
||||
// z: GHEF
|
||||
z = vextq_f32(in2, in2, 2);
|
||||
// out2: CDGH
|
||||
out2 = vextq_f32(in1, z, 2);
|
||||
};
|
||||
|
||||
static inline void Exchange1(float32x4_t &out1,float32x4_t &out2,float32x4_t in1,float32x4_t in2){
|
||||
// in1: ABCD -> out1: AECG
|
||||
// in2: EFGH -> out2: BFDH
|
||||
out1 = vtrn1q_f32(in1, in2);
|
||||
out2 = vtrn2q_f32(in1, in2);
|
||||
};
|
||||
static inline void Exchange2(float32x4_t &out1,float32x4_t &out2,float32x4_t in1,float32x4_t in2){
|
||||
assert(0);
|
||||
return;
|
||||
};
|
||||
static inline void Exchange3(float32x4_t &out1,float32x4_t &out2,float32x4_t in1,float32x4_t in2){
|
||||
assert(0);
|
||||
return;
|
||||
};
|
||||
// double precision
|
||||
static inline void Exchange0(float64x2_t &out1,float64x2_t &out2,float64x2_t in1,float64x2_t in2){
|
||||
// in1: AB -> out1: AC
|
||||
// in2: CD -> out2: BD
|
||||
out1 = vzip1q_f64(in1, in2);
|
||||
out2 = vzip2q_f64(in1, in2);
|
||||
};
|
||||
static inline void Exchange1(float64x2_t &out1,float64x2_t &out2,float64x2_t in1,float64x2_t in2){
|
||||
assert(0);
|
||||
return;
|
||||
};
|
||||
static inline void Exchange2(float64x2_t &out1,float64x2_t &out2,float64x2_t in1,float64x2_t in2){
|
||||
assert(0);
|
||||
return;
|
||||
};
|
||||
static inline void Exchange3(float64x2_t &out1,float64x2_t &out2,float64x2_t in1,float64x2_t in2){
|
||||
assert(0);
|
||||
return;
|
||||
};
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Some Template specialization
|
||||
template < typename vtype >
|
||||
void permute(vtype &a, vtype b, int perm) {
|
||||
|
||||
};
|
||||
|
||||
//Complex float Reduce
|
||||
template<>
|
||||
inline Grid::ComplexF Reduce<Grid::ComplexF, float32x4_t>::operator()(float32x4_t in){
|
||||
return 0;
|
||||
float32x4_t v1; // two complex
|
||||
v1 = Optimization::Permute::Permute0(in);
|
||||
v1 = vaddq_f32(v1,in);
|
||||
u128f conv; conv.v=v1;
|
||||
return Grid::ComplexF(conv.f[0],conv.f[1]);
|
||||
}
|
||||
//Real float Reduce
|
||||
template<>
|
||||
inline Grid::RealF Reduce<Grid::RealF, float32x4_t>::operator()(float32x4_t in){
|
||||
float32x2_t high = vget_high_f32(in);
|
||||
float32x2_t low = vget_low_f32(in);
|
||||
float32x2_t tmp = vadd_f32(low, high);
|
||||
float32x2_t sum = vpadd_f32(tmp, tmp);
|
||||
return vget_lane_f32(sum,0);
|
||||
return vaddvq_f32(in);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//Complex double Reduce
|
||||
template<>
|
||||
inline Grid::ComplexD Reduce<Grid::ComplexD, float64x2_t>::operator()(float64x2_t in){
|
||||
return 0;
|
||||
u128d conv; conv.v = in;
|
||||
return Grid::ComplexD(conv.f[0],conv.f[1]);
|
||||
}
|
||||
|
||||
|
||||
//Real double Reduce
|
||||
template<>
|
||||
inline Grid::RealD Reduce<Grid::RealD, float64x2_t>::operator()(float64x2_t in){
|
||||
float64x2_t sum = vpaddq_f64(in, in);
|
||||
return vgetq_lane_f64(sum,0);
|
||||
return vaddvq_f64(in);
|
||||
}
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, uint32x4_t>::operator()(uint32x4_t in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
return vaddvq_u32(in);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
namespace Grid {
|
||||
// Here assign types
|
||||
|
||||
// typedef Optimization::vech SIMD_Htype; // Reduced precision type
|
||||
typedef float16x8_t SIMD_Htype; // Half precision type
|
||||
typedef float32x4_t SIMD_Ftype; // Single precision type
|
||||
typedef float64x2_t SIMD_Dtype; // Double precision type
|
||||
typedef uint32x4_t SIMD_Itype; // Integer type
|
||||
@@ -312,13 +573,6 @@ namespace Grid {
|
||||
inline void prefetch_HINT_T0(const char *ptr){};
|
||||
|
||||
|
||||
// Gpermute function
|
||||
template < typename VectorSIMD >
|
||||
inline void Gpermute(VectorSIMD &y,const VectorSIMD &b, int perm ) {
|
||||
Optimization::permute(y.v,b.v,perm);
|
||||
}
|
||||
|
||||
|
||||
// Function name aliases
|
||||
typedef Optimization::Vsplat VsplatSIMD;
|
||||
typedef Optimization::Vstore VstoreSIMD;
|
||||
@@ -326,16 +580,20 @@ namespace Grid {
|
||||
typedef Optimization::Vstream VstreamSIMD;
|
||||
template <typename S, typename T> using ReduceSIMD = Optimization::Reduce<S,T>;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Arithmetic operations
|
||||
typedef Optimization::Sum SumSIMD;
|
||||
typedef Optimization::Sub SubSIMD;
|
||||
typedef Optimization::Div DivSIMD;
|
||||
typedef Optimization::Mult MultSIMD;
|
||||
typedef Optimization::MultComplex MultComplexSIMD;
|
||||
typedef Optimization::MultRealPart MultRealPartSIMD;
|
||||
typedef Optimization::MaddRealPart MaddRealPartSIMD;
|
||||
typedef Optimization::Conj ConjSIMD;
|
||||
typedef Optimization::TimesMinusI TimesMinusISIMD;
|
||||
typedef Optimization::TimesI TimesISIMD;
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -374,6 +374,84 @@ namespace Optimization {
|
||||
// Complex float
|
||||
FLOAT_WRAP_2(operator(), inline)
|
||||
};
|
||||
#define USE_FP16
|
||||
struct PrecisionChange {
|
||||
static inline vech StoH (const vector4float &a, const vector4float &b) {
|
||||
vech ret;
|
||||
std::cout << GridLogError << "QPX single to half precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
return ret;
|
||||
}
|
||||
static inline void HtoS (vech h, vector4float &sa, vector4float &sb) {
|
||||
std::cout << GridLogError << "QPX half to single precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
static inline vector4float DtoS (vector4double a, vector4double b) {
|
||||
vector4float ret;
|
||||
std::cout << GridLogError << "QPX double to single precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
return ret;
|
||||
}
|
||||
static inline void StoD (vector4float s, vector4double &a, vector4double &b) {
|
||||
std::cout << GridLogError << "QPX single to double precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
static inline vech DtoH (vector4double a, vector4double b,
|
||||
vector4double c, vector4double d) {
|
||||
vech ret;
|
||||
std::cout << GridLogError << "QPX double to half precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
return ret;
|
||||
}
|
||||
static inline void HtoD (vech h, vector4double &a, vector4double &b,
|
||||
vector4double &c, vector4double &d) {
|
||||
std::cout << GridLogError << "QPX half to double precision conversion not yet supported." << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Exchange support
|
||||
#define FLOAT_WRAP_EXCHANGE(fn) \
|
||||
static inline void fn(vector4float &out1, vector4float &out2, \
|
||||
vector4float in1, vector4float in2) \
|
||||
{ \
|
||||
vector4double out1d, out2d, in1d, in2d; \
|
||||
in1d = Vset()(in1); \
|
||||
in2d = Vset()(in2); \
|
||||
fn(out1d, out2d, in1d, in2d); \
|
||||
Vstore()(out1d, out1); \
|
||||
Vstore()(out2d, out2); \
|
||||
}
|
||||
|
||||
struct Exchange{
|
||||
|
||||
// double precision
|
||||
static inline void Exchange0(vector4double &out1, vector4double &out2,
|
||||
vector4double in1, vector4double in2) {
|
||||
out1 = vec_perm(in1, in2, vec_gpci(0145));
|
||||
out2 = vec_perm(in1, in2, vec_gpci(02367));
|
||||
}
|
||||
static inline void Exchange1(vector4double &out1, vector4double &out2,
|
||||
vector4double in1, vector4double in2) {
|
||||
out1 = vec_perm(in1, in2, vec_gpci(0426));
|
||||
out2 = vec_perm(in1, in2, vec_gpci(01537));
|
||||
}
|
||||
static inline void Exchange2(vector4double &out1, vector4double &out2,
|
||||
vector4double in1, vector4double in2) {
|
||||
assert(0);
|
||||
}
|
||||
static inline void Exchange3(vector4double &out1, vector4double &out2,
|
||||
vector4double in1, vector4double in2) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
// single precision
|
||||
FLOAT_WRAP_EXCHANGE(Exchange0);
|
||||
FLOAT_WRAP_EXCHANGE(Exchange1);
|
||||
FLOAT_WRAP_EXCHANGE(Exchange2);
|
||||
FLOAT_WRAP_EXCHANGE(Exchange3);
|
||||
};
|
||||
|
||||
struct Permute{
|
||||
//Complex double
|
||||
@@ -497,15 +575,19 @@ namespace Optimization {
|
||||
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, int>::operator()(int in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
inline Integer Reduce<Integer, veci>::operator()(veci in){
|
||||
Integer a = 0;
|
||||
for (unsigned int i = 0; i < W<Integer>::r; ++i)
|
||||
{
|
||||
a += in.v[i];
|
||||
}
|
||||
return a;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Here assign types
|
||||
typedef Optimization::vech SIMD_Htype; // Half precision type
|
||||
typedef Optimization::vector4float SIMD_Ftype; // Single precision type
|
||||
typedef vector4double SIMD_Dtype; // Double precision type
|
||||
typedef Optimization::veci SIMD_Itype; // Integer type
|
||||
|
||||
@@ -570,9 +570,9 @@ namespace Optimization {
|
||||
//Integer Reduce
|
||||
template<>
|
||||
inline Integer Reduce<Integer, __m128i>::operator()(__m128i in){
|
||||
// FIXME unimplemented
|
||||
printf("Reduce : Missing integer implementation -> FIX\n");
|
||||
assert(0);
|
||||
__m128i v1 = _mm_hadd_epi32(in, in);
|
||||
__m128i v2 = _mm_hadd_epi32(v1, v1);
|
||||
return _mm_cvtsi128_si32(v2);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ directory
|
||||
#if defined IMCI
|
||||
#include "Grid_imci.h"
|
||||
#endif
|
||||
#ifdef NEONv8
|
||||
#ifdef NEONV8
|
||||
#include "Grid_neon.h"
|
||||
#endif
|
||||
#if defined QPX
|
||||
@@ -376,7 +376,18 @@ class Grid_simd {
|
||||
Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
}
|
||||
|
||||
friend inline void exchange0(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange0(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend inline void exchange1(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange1(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend inline void exchange2(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange2(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
friend inline void exchange3(Grid_simd &out1,Grid_simd &out2,Grid_simd in1,Grid_simd in2){
|
||||
Optimization::Exchange::Exchange3(out1.v,out2.v,in1.v,in2.v);
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////
|
||||
// General permute; assumes vector length is same across
|
||||
// all subtypes; may not be a good assumption, but could
|
||||
|
||||
@@ -32,8 +32,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
namespace Grid {
|
||||
|
||||
int LebesgueOrder::UseLebesgueOrder;
|
||||
#ifdef KNL
|
||||
std::vector<int> LebesgueOrder::Block({8,2,2,2});
|
||||
|
||||
#else
|
||||
std::vector<int> LebesgueOrder::Block({2,2,2,2});
|
||||
#endif
|
||||
LebesgueOrder::IndexInteger LebesgueOrder::alignup(IndexInteger n){
|
||||
n--; // 1000 0011 --> 1000 0010
|
||||
n |= n >> 1; // 1000 0010 | 0100 0001 = 1100 0011
|
||||
@@ -51,8 +54,31 @@ LebesgueOrder::LebesgueOrder(GridBase *_grid)
|
||||
if ( Block[0]==0) ZGraph();
|
||||
else if ( Block[1]==0) NoBlocking();
|
||||
else CartesianBlocking();
|
||||
}
|
||||
|
||||
if (0) {
|
||||
std::cout << "Thread Interleaving"<<std::endl;
|
||||
ThreadInterleave();
|
||||
}
|
||||
}
|
||||
void LebesgueOrder::ThreadInterleave(void)
|
||||
{
|
||||
std::vector<IndexInteger> reorder = _LebesgueReorder;
|
||||
std::vector<IndexInteger> throrder;
|
||||
int vol = _LebesgueReorder.size();
|
||||
int threads = GridThread::GetThreads();
|
||||
int blockbits=3;
|
||||
int blocklen = 8;
|
||||
int msk = 0x7;
|
||||
|
||||
for(int t=0;t<threads;t++){
|
||||
for(int ss=0;ss<vol;ss++){
|
||||
if ( ( ss >> blockbits) % threads == t ) {
|
||||
throrder.push_back(reorder[ss]);
|
||||
}
|
||||
}
|
||||
}
|
||||
_LebesgueReorder = throrder;
|
||||
}
|
||||
void LebesgueOrder::NoBlocking(void)
|
||||
{
|
||||
std::cout<<GridLogDebug<<"Lexicographic : no cache blocking"<<std::endl;
|
||||
|
||||
@@ -70,6 +70,8 @@ namespace Grid {
|
||||
std::vector<IndexInteger> & xi,
|
||||
std::vector<IndexInteger> &dims);
|
||||
|
||||
void ThreadInterleave(void);
|
||||
|
||||
private:
|
||||
std::vector<IndexInteger> _LebesgueReorder;
|
||||
|
||||
|
||||
@@ -176,6 +176,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
// Timing info; ugly; possibly temporary
|
||||
/////////////////////////////////////////
|
||||
double commtime;
|
||||
double mpi3synctime;
|
||||
double mpi3synctime_g;
|
||||
double shmmergetime;
|
||||
double gathertime;
|
||||
double gathermtime;
|
||||
double halogtime;
|
||||
@@ -185,6 +188,10 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
double splicetime;
|
||||
double nosplicetime;
|
||||
double calls;
|
||||
std::vector<double> comm_bytes_thr;
|
||||
std::vector<double> comm_time_thr;
|
||||
std::vector<double> comm_enter_thr;
|
||||
std::vector<double> comm_leave_thr;
|
||||
|
||||
////////////////////////////////////////
|
||||
// Stencil query
|
||||
@@ -248,35 +255,120 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
//////////////////////////////////////////
|
||||
// Comms packet queue for asynch thread
|
||||
//////////////////////////////////////////
|
||||
void CommunicateThreaded()
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
// must be called in parallel region
|
||||
int mythread = omp_get_thread_num();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
#else
|
||||
int mythread = 0;
|
||||
int nthreads = 1;
|
||||
#endif
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
comm_enter_thr[mythread] = usecond();
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
}
|
||||
comm_leave_thr[mythread]= usecond();
|
||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
||||
}
|
||||
}
|
||||
|
||||
void CollateThreads(void)
|
||||
{
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
double first=0.0;
|
||||
double last =0.0;
|
||||
|
||||
for(int t=0;t<nthreads;t++) {
|
||||
|
||||
double t0 = comm_enter_thr[t];
|
||||
double t1 = comm_leave_thr[t];
|
||||
comms_bytes+=comm_bytes_thr[t];
|
||||
|
||||
comm_enter_thr[t] = 0.0;
|
||||
comm_leave_thr[t] = 0.0;
|
||||
comm_time_thr[t] = 0.0;
|
||||
comm_bytes_thr[t]=0;
|
||||
|
||||
if ( first == 0.0 ) first = t0; // first is t0
|
||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
||||
|
||||
if ( t1 > last ) last = t1; // max time seen
|
||||
|
||||
}
|
||||
commtime+= last-first;
|
||||
}
|
||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||
{
|
||||
reqs.resize(Packets.size());
|
||||
commtime-=usecond();
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
comms_bytes+=_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes);
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
}
|
||||
}
|
||||
|
||||
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||
{
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromComplete(reqs[i]);
|
||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||
}
|
||||
commtime+=usecond();
|
||||
}
|
||||
void Communicate(void)
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
#pragma omp parallel
|
||||
{
|
||||
// must be called in parallel region
|
||||
int mythread = omp_get_thread_num();
|
||||
int maxthreads= omp_get_max_threads();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
assert(nthreads <= maxthreads);
|
||||
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
#else
|
||||
int mythread = 0;
|
||||
int nthreads = 1;
|
||||
#endif
|
||||
if (mythread < nthreads) {
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
double start = usecond();
|
||||
comm_bytes_thr[mythread] += _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_time_thr[mythread] += usecond() - start;
|
||||
}
|
||||
}
|
||||
#ifdef GRID_OMP
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
Prepare();
|
||||
HaloGather(source,compress);
|
||||
CommunicateBegin(reqs);
|
||||
CommunicateComplete(reqs);
|
||||
// Concurrent
|
||||
//CommunicateBegin(reqs);
|
||||
//CommunicateComplete(reqs);
|
||||
// Sequential, possibly threaded
|
||||
Communicate();
|
||||
CommsMergeSHM(compress);
|
||||
CommsMerge(compress);
|
||||
}
|
||||
@@ -285,7 +377,7 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
{
|
||||
int dimension = _directions[point];
|
||||
int displacement = _distances[point];
|
||||
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
|
||||
@@ -308,11 +400,13 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
if ( sshift[0] == sshift[1] ) {
|
||||
if (splice_dim) {
|
||||
splicetime-=usecond();
|
||||
same_node = same_node && GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
same_node = same_node && tmp;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
same_node = same_node && Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
same_node = same_node && tmp;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
} else {
|
||||
@@ -320,13 +414,15 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
splicetime-=usecond();
|
||||
// if checkerboard is unfavourable take two passes
|
||||
// both with block stride loop iteration
|
||||
same_node = same_node && GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
||||
same_node = same_node && GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
||||
same_node = same_node && tmp1 && tmp2;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
same_node = same_node && Gather(source,dimension,shift,0x1,compress,face_idx);
|
||||
same_node = same_node && Gather(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
||||
same_node = same_node && tmp1 && tmp2;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
}
|
||||
@@ -337,7 +433,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
template<class compressor>
|
||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
mpi3synctime_g-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime_g+=usecond();
|
||||
|
||||
// conformable(source._grid,_grid);
|
||||
assert(source._grid==_grid);
|
||||
@@ -397,8 +495,12 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
}
|
||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||
mpi3synctime-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||
shmmergetime+=usecond();
|
||||
}
|
||||
|
||||
template<class decompressor>
|
||||
@@ -442,7 +544,12 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
int checkerboard,
|
||||
const std::vector<int> &directions,
|
||||
const std::vector<int> &distances)
|
||||
: _permute_type(npoints), _comm_buf_size(npoints)
|
||||
: _permute_type(npoints),
|
||||
_comm_buf_size(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
comm_enter_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_time_thr(npoints)
|
||||
{
|
||||
face_table_computed=0;
|
||||
_npoints = npoints;
|
||||
@@ -996,6 +1103,15 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
void ZeroCounters(void) {
|
||||
gathertime = 0.;
|
||||
commtime = 0.;
|
||||
mpi3synctime=0.;
|
||||
mpi3synctime_g=0.;
|
||||
shmmergetime=0.;
|
||||
for(int i=0;i<_npoints;i++){
|
||||
comm_time_thr[i]=0;
|
||||
comm_bytes_thr[i]=0;
|
||||
comm_enter_thr[i]=0;
|
||||
comm_leave_thr[i]=0;
|
||||
}
|
||||
halogtime = 0.;
|
||||
mergetime = 0.;
|
||||
decompresstime = 0.;
|
||||
@@ -1011,6 +1127,18 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
#define PRINTIT(A) AVERAGE(A); std::cout << GridLogMessage << " Stencil " << #A << " "<< A/calls<<std::endl;
|
||||
RealD NP = _grid->_Nprocessors;
|
||||
RealD NN = _grid->NodeCount();
|
||||
double t = 0;
|
||||
// if comm_time_thr is set they were all done in parallel so take the max
|
||||
// but add up the bytes
|
||||
int threaded = 0 ;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
if ( comm_time_thr[i]>0.0 ) {
|
||||
threaded = 1;
|
||||
comms_bytes += comm_bytes_thr[i];
|
||||
if (t < comm_time_thr[i]) t = comm_time_thr[i];
|
||||
}
|
||||
}
|
||||
if (threaded) commtime += t;
|
||||
|
||||
_grid->GlobalSum(commtime); commtime/=NP;
|
||||
if ( calls > 0. ) {
|
||||
@@ -1026,6 +1154,9 @@ class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal
|
||||
std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000. << " GB/s per rank"<<std::endl;
|
||||
std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000.*NP/NN << " GB/s per node"<<std::endl;
|
||||
}
|
||||
PRINTIT(mpi3synctime);
|
||||
PRINTIT(mpi3synctime_g);
|
||||
PRINTIT(shmmergetime);
|
||||
PRINTIT(splicetime);
|
||||
PRINTIT(nosplicetime);
|
||||
}
|
||||
|
||||
@@ -98,7 +98,9 @@ template<class rtype,class vtype,class mtype,int N>
|
||||
strong_inline void mult(iVector<rtype,N> * __restrict__ ret,
|
||||
const iVector<vtype,N> * __restrict__ rhs,
|
||||
const iScalar<mtype> * __restrict__ lhs){
|
||||
mult(ret,lhs,rhs);
|
||||
for(int c1=0;c1<N;c1++){
|
||||
mult(&ret->_internal[c1],&rhs->_internal[c1],&lhs->_internal);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ class TensorIndexRecursion {
|
||||
}
|
||||
}
|
||||
template<class vtype,int N> inline static
|
||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(TensorIndexRecursion<Level-1>::peekIndex(ret._internal[0],0)),N> &arg, int i,int j)
|
||||
void pokeIndex(iVector<vtype,N> &ret, const iVector<decltype(TensorIndexRecursion<Level-1>::peekIndex(ret._internal[0],0,0)),N> &arg, int i,int j)
|
||||
{
|
||||
for(int ii=0;ii<N;ii++){
|
||||
TensorIndexRecursion<Level-1>::pokeIndex(ret._internal[ii],arg._internal[ii],i,j);
|
||||
@@ -191,7 +191,7 @@ class TensorIndexRecursion {
|
||||
}}
|
||||
}
|
||||
template<class vtype,int N> inline static
|
||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(TensorIndexRecursion<Level-1>::peekIndex(ret._internal[0][0],0)),N> &arg, int i,int j)
|
||||
void pokeIndex(iMatrix<vtype,N> &ret, const iMatrix<decltype(TensorIndexRecursion<Level-1>::peekIndex(ret._internal[0][0],0,0)),N> &arg, int i,int j)
|
||||
{
|
||||
for(int ii=0;ii<N;ii++){
|
||||
for(int jj=0;jj<N;jj++){
|
||||
|
||||
@@ -219,9 +219,15 @@ void Grid_init(int *argc,char ***argv)
|
||||
int MB;
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--shm");
|
||||
GridCmdOptionInt(arg,MB);
|
||||
CartesianCommunicator::MAX_MPI_SHM_BYTES = MB*1024*1024;
|
||||
uint64_t MB64 = MB;
|
||||
CartesianCommunicator::MAX_MPI_SHM_BYTES = MB64*1024LL*1024LL;
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--shm-hugepages") ){
|
||||
CartesianCommunicator::Hugepages = 1;
|
||||
}
|
||||
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--debug-signals") ){
|
||||
Grid_debug_handler_init();
|
||||
}
|
||||
@@ -304,6 +310,7 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::cout<<GridLogMessage<<" --threads n : default number of OMP threads"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --grid n.n.n.n : default Grid size"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --shm M : allocate M megabytes of shared memory for comms"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --shm-hugepages : use explicit huge pages in mmap call "<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<"Verbose and debug:"<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
@@ -317,7 +324,7 @@ void Grid_init(int *argc,char ***argv)
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --comms-concurrent : Asynchronous MPI calls; several dirs at a time "<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --comms-sequential : Synchronous MPI calls; one dirs at a time "<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --comms-overlap : Overlap comms with compute "<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --comms-overlap : Overlap comms with compute "<<std::endl;
|
||||
std::cout<<GridLogMessage<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-generic: Wilson kernel for generic Nc"<<std::endl;
|
||||
std::cout<<GridLogMessage<<" --dslash-unroll : Wilson kernel for Nc=3"<<std::endl;
|
||||
@@ -356,10 +363,15 @@ void Grid_init(int *argc,char ***argv)
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-sequential") ){
|
||||
CartesianCommunicator::SetCommunicatorPolicy(CartesianCommunicator::CommunicatorPolicySequential);
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--lebesgue") ){
|
||||
LebesgueOrder::UseLebesgueOrder=1;
|
||||
}
|
||||
|
||||
CartesianCommunicator::nCommThreads = -1;
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--comms-threads") ){
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--comms-threads");
|
||||
GridCmdOptionInt(arg,CartesianCommunicator::nCommThreads);
|
||||
}
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--cacheblocking") ){
|
||||
arg= GridCmdOptionPayload(*argv,*argv+*argc,"--cacheblocking");
|
||||
GridCmdOptionIntVector(arg,LebesgueOrder::Block);
|
||||
@@ -374,10 +386,13 @@ void Grid_init(int *argc,char ***argv)
|
||||
Grid_default_latt,
|
||||
Grid_default_mpi);
|
||||
|
||||
std::cout << GridLogDebug << "Requesting "<< CartesianCommunicator::MAX_MPI_SHM_BYTES <<" byte stencil comms buffers "<<std::endl;
|
||||
std::cout << GridLogMessage << "Requesting "<< CartesianCommunicator::MAX_MPI_SHM_BYTES <<" byte stencil comms buffers "<<std::endl;
|
||||
if ( CartesianCommunicator::Hugepages) {
|
||||
std::cout << GridLogMessage << "Mapped stencil comms buffers as MAP_HUGETLB "<<std::endl;
|
||||
}
|
||||
|
||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
||||
std::cout<<GridLogMessage<<"Grid Decomposition\n";
|
||||
std::cout<<GridLogMessage<<"Grid Default Decomposition patterns\n";
|
||||
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tMPI tasks : "<<GridCmdVectorIntToString(GridDefaultMpi())<<std::endl;
|
||||
std::cout<<GridLogMessage<<"\tvRealF : "<<sizeof(vRealF)*8 <<"bits ; " <<GridCmdVectorIntToString(GridDefaultSimd(4,vRealF::Nsimd()))<<std::endl;
|
||||
@@ -393,7 +408,7 @@ void Grid_init(int *argc,char ***argv)
|
||||
|
||||
void Grid_finalize(void)
|
||||
{
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3)
|
||||
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT)
|
||||
MPI_Finalize();
|
||||
Grid_unquiesce_nodes();
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user