1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Compare commits

..

3 Commits

Author SHA1 Message Date
9bfd641b22 Reorganise a little to let the PV inverter be defined outside
the Reconstruct class.

This lets the multiple choices for PV inversion be composed without
changing the routine and no if/else case enumeration.

Implemented SchurDiagMooee PV inversion (red black) and Unprec PV inversion.
Red black cuts from 190 iterations to 90 iterations at 10^-12 on 8^4 test system

Will revisit multiple Schur options and add a Fourier based multishift PV inverse, similar
to the one Rudy Arthur did in BFM
2018-10-10 13:22:01 +01:00
be40aaf751 4d 5d reconstruction code & test 2018-10-09 18:37:20 +01:00
e069fd5ed8 SetMass should be implemented universially 2018-10-09 17:41:56 +01:00
91 changed files with 1851 additions and 5719 deletions

View File

@ -52,6 +52,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/algorithms/CoarsenedMatrix.h>
#include <Grid/algorithms/FFT.h>
// EigCg
// Pcg
// Hdcg

View File

@ -380,12 +380,6 @@ namespace Grid {
template<class Field> class OperatorFunction {
public:
virtual void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) = 0;
virtual void operator() (LinearOperatorBase<Field> &Linop, const std::vector<Field> &in,std::vector<Field> &out) {
assert(in.size()==out.size());
for(int k=0;k<in.size();k++){
(*this)(Linop,in[k],out[k]);
}
};
};
template<class Field> class LinearFunction {
@ -427,7 +421,7 @@ namespace Grid {
// Hermitian operator Linear function and operator function
////////////////////////////////////////////////////////////////////////////////////////////
template<class Field>
class HermOpOperatorFunction : public OperatorFunction<Field> {
class HermOpOperatorFunction : public OperatorFunction<Field> {
void operator() (LinearOperatorBase<Field> &Linop, const Field &in, Field &out) {
Linop.HermOp(in,out);
};

View File

@ -55,14 +55,6 @@ namespace Grid {
template<class Field> class CheckerBoardedSparseMatrixBase : public SparseMatrixBase<Field> {
public:
virtual GridBase *RedBlackGrid(void)=0;
//////////////////////////////////////////////////////////////////////
// Query the even even properties to make algorithmic decisions
//////////////////////////////////////////////////////////////////////
virtual RealD Mass(void) { return 0.0; };
virtual int ConstEE(void) { return 0; }; // Disable assumptions unless overridden
virtual int isTrivialEE(void) { return 0; }; // by a derived class that knows better
// half checkerboard operaions
virtual void Meooe (const Field &in, Field &out)=0;
virtual void Mooee (const Field &in, Field &out)=0;

View File

@ -33,7 +33,7 @@ directory
namespace Grid {
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS, BlockCGVec, BlockCGrQVec };
enum BlockCGtype { BlockCG, BlockCGrQ, CGmultiRHS };
//////////////////////////////////////////////////////////////////////////
// Block conjugate gradient. Dimension zero should be the block direction
@ -42,6 +42,7 @@ template <class Field>
class BlockConjugateGradient : public OperatorFunction<Field> {
public:
typedef typename Field::scalar_type scomplex;
int blockDim ;
@ -53,15 +54,21 @@ class BlockConjugateGradient : public OperatorFunction<Field> {
RealD Tolerance;
Integer MaxIterations;
Integer IterationsToComplete; //Number of iterations the CG took to finish. Filled in upon completion
Integer PrintInterval; //GridLogMessages or Iterative
BlockConjugateGradient(BlockCGtype cgtype,int _Orthog,RealD tol, Integer maxit, bool err_on_no_conv = true)
: Tolerance(tol), CGtype(cgtype), blockDim(_Orthog), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv),PrintInterval(100)
: Tolerance(tol), CGtype(cgtype), blockDim(_Orthog), MaxIterations(maxit), ErrorOnNoConverge(err_on_no_conv)
{};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Thin QR factorisation (google it)
////////////////////////////////////////////////////////////////////////////////////////////////////
void ThinQRfact (Eigen::MatrixXcd &m_rr,
Eigen::MatrixXcd &C,
Eigen::MatrixXcd &Cinv,
Field & Q,
const Field & R)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
////////////////////////////////////////////////////////////////////////////////////////////////////
//Dimensions
// R_{ferm x Nblock} = Q_{ferm x Nblock} x C_{Nblock x Nblock} -> ferm x Nblock
@ -78,20 +85,22 @@ class BlockConjugateGradient : public OperatorFunction<Field> {
// Cdag C = Rdag R ; passes.
// QdagQ = 1 ; passes
////////////////////////////////////////////////////////////////////////////////////////////////////
void ThinQRfact (Eigen::MatrixXcd &m_rr,
Eigen::MatrixXcd &C,
Eigen::MatrixXcd &Cinv,
Field & Q,
const Field & R)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
sliceInnerProductMatrix(m_rr,R,R,Orthog);
// Force manifest hermitian to avoid rounding related
m_rr = 0.5*(m_rr+m_rr.adjoint());
Eigen::MatrixXcd L = m_rr.llt().matrixL();
#if 0
std::cout << " Calling Cholesky ldlt on m_rr " << m_rr <<std::endl;
Eigen::MatrixXcd L_ldlt = m_rr.ldlt().matrixL();
std::cout << " Called Cholesky ldlt on m_rr " << L_ldlt <<std::endl;
auto D_ldlt = m_rr.ldlt().vectorD();
std::cout << " Called Cholesky ldlt on m_rr " << D_ldlt <<std::endl;
#endif
// std::cout << " Calling Cholesky llt on m_rr " <<std::endl;
Eigen::MatrixXcd L = m_rr.llt().matrixL();
// std::cout << " Called Cholesky llt on m_rr " << L <<std::endl;
C = L.adjoint();
Cinv = C.inverse();
////////////////////////////////////////////////////////////////////////////////////////////////////
@ -103,25 +112,6 @@ void ThinQRfact (Eigen::MatrixXcd &m_rr,
////////////////////////////////////////////////////////////////////////////////////////////////////
sliceMulMatrix(Q,Cinv,R,Orthog);
}
// see comments above
void ThinQRfact (Eigen::MatrixXcd &m_rr,
Eigen::MatrixXcd &C,
Eigen::MatrixXcd &Cinv,
std::vector<Field> & Q,
const std::vector<Field> & R)
{
InnerProductMatrix(m_rr,R,R);
m_rr = 0.5*(m_rr+m_rr.adjoint());
Eigen::MatrixXcd L = m_rr.llt().matrixL();
C = L.adjoint();
Cinv = C.inverse();
MulMatrix(Q,Cinv,R);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
// Call one of several implementations
////////////////////////////////////////////////////////////////////////////////////////////////////
@ -129,20 +119,14 @@ void operator()(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
{
if ( CGtype == BlockCGrQ ) {
BlockCGrQsolve(Linop,Src,Psi);
} else if (CGtype == BlockCG ) {
BlockCGsolve(Linop,Src,Psi);
} else if (CGtype == CGmultiRHS ) {
CGmultiRHSsolve(Linop,Src,Psi);
} else {
assert(0);
}
}
virtual void operator()(LinearOperatorBase<Field> &Linop, const std::vector<Field> &Src, std::vector<Field> &Psi)
{
if ( CGtype == BlockCGrQVec ) {
BlockCGrQsolveVec(Linop,Src,Psi);
} else {
assert(0);
}
}
////////////////////////////////////////////////////////////////////////////
// BlockCGrQ implementation:
@ -155,8 +139,7 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
Nblock = B._grid->_fdimensions[Orthog];
/* FAKE */
Nblock=8;
std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
X.checkerboard = B.checkerboard;
@ -219,10 +202,15 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
std::cout << GridLogMessage<<"BlockCGrQ algorithm initialisation " <<std::endl;
//1. QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
Linop.HermOp(X, AD);
tmp = B - AD;
//std::cout << GridLogMessage << " initial tmp " << norm2(tmp)<< std::endl;
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
//std::cout << GridLogMessage << " initial Q " << norm2(Q)<< std::endl;
//std::cout << GridLogMessage << " m_rr " << m_rr<<std::endl;
//std::cout << GridLogMessage << " m_C " << m_C<<std::endl;
//std::cout << GridLogMessage << " m_Cinv " << m_Cinv<<std::endl;
D=Q;
std::cout << GridLogMessage<<"BlockCGrQ computed initial residual and QR fact " <<std::endl;
@ -244,12 +232,14 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
MatrixTimer.Start();
Linop.HermOp(D, Z);
MatrixTimer.Stop();
//std::cout << GridLogMessage << " norm2 Z " <<norm2(Z)<<std::endl;
//4. M = [D^dag Z]^{-1}
sliceInnerTimer.Start();
sliceInnerProductMatrix(m_DZ,D,Z,Orthog);
sliceInnerTimer.Stop();
m_M = m_DZ.inverse();
//std::cout << GridLogMessage << " m_DZ " <<m_DZ<<std::endl;
//5. X = X + D MC
m_tmp = m_M * m_C;
@ -267,7 +257,6 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
//7. D = Q + D S^dag
m_tmp = m_S.adjoint();
sliceMaddTimer.Start();
sliceMaddMatrix(D,m_tmp,D,Q,Orthog);
sliceMaddTimer.Stop();
@ -328,6 +317,152 @@ void BlockCGrQsolve(LinearOperatorBase<Field> &Linop, const Field &B, Field &X)
IterationsToComplete = k;
}
//////////////////////////////////////////////////////////////////////////
// Block conjugate gradient; Original O'Leary Dimension zero should be the block direction
//////////////////////////////////////////////////////////////////////////
void BlockCGsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &Psi)
{
int Orthog = blockDim; // First dimension is block dim; this is an assumption
Nblock = Src._grid->_fdimensions[Orthog];
std::cout<<GridLogMessage<<" Block Conjugate Gradient : Orthog "<<Orthog<<" Nblock "<<Nblock<<std::endl;
Psi.checkerboard = Src.checkerboard;
conformable(Psi, Src);
Field P(Src);
Field AP(Src);
Field R(Src);
Eigen::MatrixXcd m_pAp = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_pAp_inv= Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_rr = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_rr_inv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_alpha = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_beta = Eigen::MatrixXcd::Zero(Nblock,Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
sliceNorm(ssq,Src,Orthog);
RealD sssum=0;
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
sliceNorm(residuals,Src,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
sliceNorm(residuals,Psi,Orthog);
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
// Initial search dir is guess
Linop.HermOp(Psi, AP);
/************************************************************************
* Block conjugate gradient (Stephen Pickles, thesis 1995, pp 71, O Leary 1980)
************************************************************************
* O'Leary : R = B - A X
* O'Leary : P = M R ; preconditioner M = 1
* O'Leary : alpha = PAP^{-1} RMR
* O'Leary : beta = RMR^{-1}_old RMR_new
* O'Leary : X=X+Palpha
* O'Leary : R_new=R_old-AP alpha
* O'Leary : P=MR_new+P beta
*/
R = Src - AP;
P = R;
sliceInnerProductMatrix(m_rr,R,R,Orthog);
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
RealD rrsum=0;
for(int b=0;b<Nblock;b++) rrsum+=real(m_rr(b,b));
std::cout << GridLogIterative << "\titeration "<<k<<" rr_sum "<<rrsum<<" ssq_sum "<< sssum
<<" / "<<std::sqrt(rrsum/sssum) <<std::endl;
MatrixTimer.Start();
Linop.HermOp(P, AP);
MatrixTimer.Stop();
// Alpha
sliceInnerTimer.Start();
sliceInnerProductMatrix(m_pAp,P,AP,Orthog);
sliceInnerTimer.Stop();
m_pAp_inv = m_pAp.inverse();
m_alpha = m_pAp_inv * m_rr ;
// Psi, R update
sliceMaddTimer.Start();
sliceMaddMatrix(Psi,m_alpha, P,Psi,Orthog); // add alpha * P to psi
sliceMaddMatrix(R ,m_alpha,AP, R,Orthog,-1.0);// sub alpha * AP to resid
sliceMaddTimer.Stop();
// Beta
m_rr_inv = m_rr.inverse();
sliceInnerTimer.Start();
sliceInnerProductMatrix(m_rr,R,R,Orthog);
sliceInnerTimer.Stop();
m_beta = m_rr_inv *m_rr;
// Search update
sliceMaddTimer.Start();
sliceMaddMatrix(AP,m_beta,P,R,Orthog);
sliceMaddTimer.Stop();
P= AP;
/*********************
* convergence monitor
*********************
*/
RealD max_resid=0;
RealD rr;
for(int b=0;b<Nblock;b++){
rr = real(m_rr(b,b))/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"BlockCG converged in "<<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "
<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
Linop.HermOp(Psi, AP);
AP = AP-Src;
std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(norm2(AP)/norm2(Src)) <<std::endl;
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BlockConjugateGradient did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
//////////////////////////////////////////////////////////////////////////
// multiRHS conjugate gradient. Dimension zero should be the block direction
// Use this for spread out across nodes
//////////////////////////////////////////////////////////////////////////
@ -465,233 +600,6 @@ void CGmultiRHSsolve(LinearOperatorBase<Field> &Linop, const Field &Src, Field &
IterationsToComplete = k;
}
void InnerProductMatrix(Eigen::MatrixXcd &m , const std::vector<Field> &X, const std::vector<Field> &Y){
for(int b=0;b<Nblock;b++){
for(int bp=0;bp<Nblock;bp++) {
m(b,bp) = innerProduct(X[b],Y[bp]);
}}
}
void MaddMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X,const std::vector<Field> &Y,RealD scale=1.0){
// Should make this cache friendly with site outermost, parallel_for
// Deal with case AP aliases with either Y or X
std::vector<Field> tmp(Nblock,X[0]);
for(int b=0;b<Nblock;b++){
tmp[b] = Y[b];
for(int bp=0;bp<Nblock;bp++) {
tmp[b] = tmp[b] + (scale*m(bp,b))*X[bp];
}
}
for(int b=0;b<Nblock;b++){
AP[b] = tmp[b];
}
}
void MulMatrix(std::vector<Field> &AP, Eigen::MatrixXcd &m , const std::vector<Field> &X){
// Should make this cache friendly with site outermost, parallel_for
for(int b=0;b<Nblock;b++){
AP[b] = zero;
for(int bp=0;bp<Nblock;bp++) {
AP[b] += (m(bp,b))*X[bp];
}
}
}
double normv(const std::vector<Field> &P){
double nn = 0.0;
for(int b=0;b<Nblock;b++) {
nn+=norm2(P[b]);
}
return nn;
}
////////////////////////////////////////////////////////////////////////////
// BlockCGrQvec implementation:
//--------------------------
// X is guess/Solution
// B is RHS
// Solve A X_i = B_i ; i refers to Nblock index
////////////////////////////////////////////////////////////////////////////
void BlockCGrQsolveVec(LinearOperatorBase<Field> &Linop, const std::vector<Field> &B, std::vector<Field> &X)
{
Nblock = B.size();
assert(Nblock == X.size());
std::cout<<GridLogMessage<<" Block Conjugate Gradient Vec rQ : Nblock "<<Nblock<<std::endl;
for(int b=0;b<Nblock;b++){
X[b].checkerboard = B[b].checkerboard;
conformable(X[b], B[b]);
conformable(X[b], X[0]);
}
Field Fake(B[0]);
std::vector<Field> tmp(Nblock,Fake);
std::vector<Field> Q(Nblock,Fake);
std::vector<Field> D(Nblock,Fake);
std::vector<Field> Z(Nblock,Fake);
std::vector<Field> AD(Nblock,Fake);
Eigen::MatrixXcd m_DZ = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_M = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_rr = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_C = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Cinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_S = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_Sinv = Eigen::MatrixXcd::Zero(Nblock,Nblock);
Eigen::MatrixXcd m_tmp = Eigen::MatrixXcd::Identity(Nblock,Nblock);
Eigen::MatrixXcd m_tmp1 = Eigen::MatrixXcd::Identity(Nblock,Nblock);
// Initial residual computation & set up
std::vector<RealD> residuals(Nblock);
std::vector<RealD> ssq(Nblock);
RealD sssum=0;
for(int b=0;b<Nblock;b++){ ssq[b] = norm2(B[b]);}
for(int b=0;b<Nblock;b++) sssum+=ssq[b];
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(B[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
for(int b=0;b<Nblock;b++){ residuals[b] = norm2(X[b]);}
for(int b=0;b<Nblock;b++){ assert(std::isnan(residuals[b])==0); }
/************************************************************************
* Block conjugate gradient rQ (Sebastien Birk Thesis, after Dubrulle 2001)
************************************************************************
* Dimensions:
*
* X,B==(Nferm x Nblock)
* A==(Nferm x Nferm)
*
* Nferm = Nspin x Ncolour x Ncomplex x Nlattice_site
*
* QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
* for k:
* Z = AD
* M = [D^dag Z]^{-1}
* X = X + D MC
* QS = Q - ZM
* D = Q + D S^dag
* C = S C
*/
///////////////////////////////////////
// Initial block: initial search dir is guess
///////////////////////////////////////
std::cout << GridLogMessage<<"BlockCGrQvec algorithm initialisation " <<std::endl;
//1. QC = R = B-AX, D = Q ; QC => Thin QR factorisation (google it)
for(int b=0;b<Nblock;b++) {
Linop.HermOp(X[b], AD[b]);
tmp[b] = B[b] - AD[b];
}
ThinQRfact (m_rr, m_C, m_Cinv, Q, tmp);
for(int b=0;b<Nblock;b++) D[b]=Q[b];
std::cout << GridLogMessage<<"BlockCGrQ vec computed initial residual and QR fact " <<std::endl;
///////////////////////////////////////
// Timers
///////////////////////////////////////
GridStopWatch sliceInnerTimer;
GridStopWatch sliceMaddTimer;
GridStopWatch QRTimer;
GridStopWatch MatrixTimer;
GridStopWatch SolverTimer;
SolverTimer.Start();
int k;
for (k = 1; k <= MaxIterations; k++) {
//3. Z = AD
MatrixTimer.Start();
for(int b=0;b<Nblock;b++) Linop.HermOp(D[b], Z[b]);
MatrixTimer.Stop();
//4. M = [D^dag Z]^{-1}
sliceInnerTimer.Start();
InnerProductMatrix(m_DZ,D,Z);
sliceInnerTimer.Stop();
m_M = m_DZ.inverse();
//5. X = X + D MC
m_tmp = m_M * m_C;
sliceMaddTimer.Start();
MaddMatrix(X,m_tmp, D,X);
sliceMaddTimer.Stop();
//6. QS = Q - ZM
sliceMaddTimer.Start();
MaddMatrix(tmp,m_M,Z,Q,-1.0);
sliceMaddTimer.Stop();
QRTimer.Start();
ThinQRfact (m_rr, m_S, m_Sinv, Q, tmp);
QRTimer.Stop();
//7. D = Q + D S^dag
m_tmp = m_S.adjoint();
sliceMaddTimer.Start();
MaddMatrix(D,m_tmp,D,Q);
sliceMaddTimer.Stop();
//8. C = S C
m_C = m_S*m_C;
/*********************
* convergence monitor
*********************
*/
m_rr = m_C.adjoint() * m_C;
RealD max_resid=0;
RealD rrsum=0;
RealD rr;
for(int b=0;b<Nblock;b++) {
rrsum+=real(m_rr(b,b));
rr = real(m_rr(b,b))/ssq[b];
if ( rr > max_resid ) max_resid = rr;
}
std::cout << GridLogIterative << "\t Block Iteration "<<k<<" ave resid "<< sqrt(rrsum/sssum) << " max "<< sqrt(max_resid) <<std::endl;
if ( max_resid < Tolerance*Tolerance ) {
SolverTimer.Stop();
std::cout << GridLogMessage<<"BlockCGrQ converged in "<<k<<" iterations"<<std::endl;
for(int b=0;b<Nblock;b++){
std::cout << GridLogMessage<< "\t\tblock "<<b<<" computed resid "<< std::sqrt(real(m_rr(b,b))/ssq[b])<<std::endl;
}
std::cout << GridLogMessage<<"\tMax residual is "<<std::sqrt(max_resid)<<std::endl;
for(int b=0;b<Nblock;b++) Linop.HermOp(X[b], AD[b]);
for(int b=0;b<Nblock;b++) AD[b] = AD[b]-B[b];
std::cout << GridLogMessage <<"\t True residual is " << std::sqrt(normv(AD)/normv(B)) <<std::endl;
std::cout << GridLogMessage << "Time Breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInnerProd " << sliceInnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMaddMatrix " << sliceMaddTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tThinQRfact " << QRTimer.Elapsed() <<std::endl;
IterationsToComplete = k;
return;
}
}
std::cout << GridLogMessage << "BlockConjugateGradient(rQ) did NOT converge" << std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;
}
};
}

View File

@ -133,7 +133,7 @@ class ConjugateGradient : public OperatorFunction<Field> {
LinalgTimer.Stop();
std::cout << GridLogIterative << "ConjugateGradient: Iteration " << k
<< " residual^2 " << sqrt(cp/ssq) << " target " << Tolerance << std::endl;
<< " residual " << cp << " target " << rsq << std::endl;
// Stopping condition
if (cp <= rsq) {

View File

@ -30,6 +30,49 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
namespace Grid {
namespace QCD {
template<class Field>
class PauliVillarsSolverUnprec
{
public:
ConjugateGradient<Field> & CG;
PauliVillarsSolverUnprec( ConjugateGradient<Field> &_CG) : CG(_CG){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
MdagMLinearOperator<Matrix,Field> HermOp(_Matrix);
_Matrix.SetMass(1.0);
_Matrix.Mdag(src,A);
CG(HermOp,A,sol);
_Matrix.SetMass(m);
};
};
template<class Field>
class PauliVillarsSolverRBprec
{
public:
ConjugateGradient<Field> & CG;
PauliVillarsSolverRBprec( ConjugateGradient<Field> &_CG) : CG(_CG){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
_Matrix.SetMass(1.0);
SchurRedBlackDiagMooeeSolve<Field> SchurSolver(CG);
SchurSolver(_Matrix,src,sol);
_Matrix.SetMass(m);
};
};
template<class Field,class PVinverter> class Reconstruct5DfromPhysical {
private:
PVinverter & PauliVillarsSolver;
@ -42,12 +85,20 @@ template<class Field,class PVinverter> class Reconstruct5DfromPhysical {
// of the Mobius exact AMA corrections.
//
// TODO : understand absence of contact term in eqns in Hantao's thesis
// sol4 is contact term subtracted, but thesis & Brower's paper suggests not.
// sol4 is contact term subtracted.
//
// Step 1: Localise PV inverse in a routine. [DONE]
// Options
// a) Defect correction approach:
// 1) Compute defect from current soln (initially guess).
// This is ...... outerToInner check !!!!
// 2) Deflated Zmobius solve to get 4d soln
// Ensure deflation is working
// 3) Refine 5d Outer using the inner 4d delta soln
//
// Step 1: localise PV inverse in a routine. [DONE]
// Step 2: Schur based PV inverse [DONE]
// Step 3: Fourier accelerated PV inverse [DONE]
//
// Step 3: Fourier accelerated PV inverse
// Step 4:
/////////////////////////////////////////////////////
Reconstruct5DfromPhysical(PVinverter &_PauliVillarsSolver)

View File

@ -86,23 +86,229 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
*/
namespace Grid {
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Use base class to share code
///////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form a Red Black solver calling a Herm solver
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackBase {
protected:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
// Now make the norm reflect extra factor of Mee
template<class Field> class SchurRedBlackStaggeredSolve {
private:
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
public:
SchurRedBlackBase(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
{
CBfactorise=0;
subtractGuess(initSubGuess);
};
void subtractGuess(const bool initSubGuess)
{
subGuess = initSubGuess;
}
bool isSubtractGuess(void)
{
return subGuess;
}
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &in, Field &out){
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
template<class Matrix, class Guesser>
void operator() (Matrix & _Matrix,const Field &in, Field &out, Guesser &guess){
// FIXME CGdiagonalMee not implemented virtual function
// FIXME use CBfactorise to control schur decomp
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
Field src_e(grid);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
Field Mtmp(grid);
Field resid(fgrid);
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve " <<std::endl;
pickCheckerboard(Even,src_e,in);
pickCheckerboard(Odd ,src_o,in);
pickCheckerboard(Even,sol_e,out);
pickCheckerboard(Odd ,sol_o,out);
std::cout << GridLogMessage << " SchurRedBlackStaggeredSolve checkerboards picked" <<std::endl;
/////////////////////////////////////////////////////
// src_o = (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
//src_o = tmp; assert(src_o.checkerboard ==Odd);
_Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm.
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver calling the Mpc solver" <<std::endl;
guess(src_o, sol_o);
Mtmp = sol_o;
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver called the Mpc solver" <<std::endl;
// Fionn A2A boolean behavioural control
if (subGuess) sol_o = sol_o-Mtmp;
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.checkerboard ==Even);
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver reconstructed other CB" <<std::endl;
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
std::cout<<GridLogMessage << "SchurRedBlackStaggeredSolver inserted solution" <<std::endl;
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackStaggered solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
} else {
std::cout << GridLogMessage << "Guess subtracted after solve." << std::endl;
}
}
};
template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form a Red Black solver calling a Herm solver
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagMooeeSolve {
private:
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver,int cb=0, const bool initSubGuess = false) : _HermitianRBSolver(HermitianRBSolver)
{
CBfactorise=cb;
subtractGuess(initSubGuess);
};
void subtractGuess(const bool initSubGuess)
{
subGuess = initSubGuess;
}
bool isSubtractGuess(void)
{
return subGuess;
}
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &in, Field &out){
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
template<class Matrix, class Guesser>
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
// FIXME CGdiagonalMee not implemented virtual function
// FIXME use CBfactorise to control schur decomp
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
Field src_e(grid);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
Field Mtmp(grid);
Field resid(fgrid);
pickCheckerboard(Even,src_e,in);
pickCheckerboard(Odd ,src_o,in);
pickCheckerboard(Even,sol_e,out);
pickCheckerboard(Odd ,sol_o,out);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
// get the right MpcDag
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd);
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
guess(src_o,sol_o);
Mtmp = sol_o;
_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
// Fionn A2A boolean behavioural control
if (subGuess) sol_o = sol_o-Mtmp;
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.checkerboard ==Even);
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackDiagMooee solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
} else {
std::cout << GridLogMessage << "Guess subtracted after solve." << std::endl;
}
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form a Red Black solver calling a Herm solver
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagTwoSolve {
private:
OperatorFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
public:
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
{
CBfactorise = 0;
subtractGuess(initSubGuess);
@ -116,86 +322,12 @@ namespace Grid {
return subGuess;
}
/////////////////////////////////////////////////////////////
// Shared code
/////////////////////////////////////////////////////////////
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &in, Field &out){
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out)
{
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
template<class Guesser>
void operator()(Matrix &_Matrix, const std::vector<Field> &in, std::vector<Field> &out,Guesser &guess)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
int nblock = in.size();
std::vector<Field> src_o(nblock,grid);
std::vector<Field> sol_o(nblock,grid);
std::vector<Field> guess_save;
Field resid(fgrid);
Field tmp(grid);
////////////////////////////////////////////////
// Prepare RedBlack source
////////////////////////////////////////////////
for(int b=0;b<nblock;b++){
RedBlackSource(_Matrix,in[b],tmp,src_o[b]);
}
////////////////////////////////////////////////
// Make the guesses
////////////////////////////////////////////////
if ( subGuess ) guess_save.resize(nblock,grid);
for(int b=0;b<nblock;b++){
guess(src_o[b],sol_o[b]);
if ( subGuess ) {
guess_save[b] = sol_o[b];
}
}
//////////////////////////////////////////////////////////////
// Call the block solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlackBase calling the solver for "<<nblock<<" RHS" <<std::endl;
RedBlackSolve(_Matrix,src_o,sol_o);
////////////////////////////////////////////////
// A2A boolean behavioural control & reconstruct other checkerboard
////////////////////////////////////////////////
for(int b=0;b<nblock;b++) {
if (subGuess) sol_o[b] = sol_o[b] - guess_save[b];
///////// Needs even source //////////////
pickCheckerboard(Even,tmp,in[b]);
RedBlackSolution(_Matrix,sol_o[b],tmp,out[b]);
/////////////////////////////////////////////////
// Check unprec residual if possible
/////////////////////////////////////////////////
if ( ! subGuess ) {
_Matrix.M(out[b],resid);
resid = resid-in[b];
RealD ns = norm2(in[b]);
RealD nr = norm2(resid);
std::cout<<GridLogMessage<< "SchurRedBlackBase solver true unprec resid["<<b<<"] "<<std::sqrt(nr/ns) << std::endl;
} else {
std::cout<<GridLogMessage<< "SchurRedBlackBase Guess subtracted after solve["<<b<<"] " << std::endl;
}
}
}
template<class Guesser>
template<class Matrix,class Guesser>
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
// FIXME CGdiagonalMee not implemented virtual function
@ -203,105 +335,42 @@ namespace Grid {
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field resid(fgrid);
Field src_o(grid);
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
Field src_e(grid);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
////////////////////////////////////////////////
// RedBlack source
////////////////////////////////////////////////
RedBlackSource(_Matrix,in,src_e,src_o);
////////////////////////////////
// Construct the guess
////////////////////////////////
Field tmp(grid);
guess(src_o,sol_o);
Field guess_save(grid);
guess_save = sol_o;
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
RedBlackSolve(_Matrix,src_o,sol_o);
////////////////////////////////////////////////
// Fionn A2A boolean behavioural control
////////////////////////////////////////////////
if (subGuess) sol_o= sol_o-guess_save;
///////////////////////////////////////////////////
// RedBlack solution needs the even source
///////////////////////////////////////////////////
RedBlackSolution(_Matrix,sol_o,src_e,out);
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackBase solver true unprec resid "<< std::sqrt(nr/ns) << std::endl;
} else {
std::cout << GridLogMessage << "SchurRedBlackBase Guess subtracted after solve." << std::endl;
}
}
/////////////////////////////////////////////////////////////
// Override in derived. Not virtual as template methods
/////////////////////////////////////////////////////////////
virtual void RedBlackSource (Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o) =0;
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol) =0;
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o) =0;
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)=0;
};
template<class Field> class SchurRedBlackStaggeredSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackStaggeredSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess)
{
}
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
Field resid(fgrid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
pickCheckerboard(Even,src_e,in);
pickCheckerboard(Odd ,src_o,in);
pickCheckerboard(Even,sol_e,out);
pickCheckerboard(Odd ,sol_o,out);
/////////////////////////////////////////////////////
// src_o = (source_o - Moe MeeInv source_e)
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
_Matrix.Mooee(tmp,src_o); // Extra factor of "m" in source from dumb choice of matrix norm.
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e_c,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
// get the right MpcDag
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd);
Field tmp(grid);
Field sol_e(grid);
Field src_e(grid);
src_e = src_e_c; // Const correctness
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
guess(src_o,tmp);
Mtmp = tmp;
_HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
// Fionn A2A boolean behavioural control
if (subGuess) tmp = tmp-Mtmp;
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
@ -310,116 +379,78 @@ namespace Grid {
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_o); assert( sol_o.checkerboard ==Odd );
}
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurStaggeredOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout<<GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid "<< std::sqrt(nr/ns) <<" nr "<< nr <<" ns "<<ns << std::endl;
} else {
std::cout << GridLogMessage << "Guess subtracted after solve." << std::endl;
}
}
};
template<class Field> using SchurRedBlackStagSolve = SchurRedBlackStaggeredSolve<Field>;
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Site diagonal has Mooee on it.
// Take a matrix and form a Red Black solver calling a Herm solver
// Use of RB info prevents making SchurRedBlackSolve conform to standard interface
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagMooeeSolve : public SchurRedBlackBase<Field> {
template<class Field> class SchurRedBlackDiagTwoMixed {
private:
LinearFunction<Field> & _HermitianRBSolver;
int CBfactorise;
bool subGuess;
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
SchurRedBlackDiagMooeeSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field> (HermitianRBSolver,initSubGuess) {};
//////////////////////////////////////////////////////
// Override RedBlack specialisation
//////////////////////////////////////////////////////
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field Mtmp(grid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
/////////////////////////////////////////////////////
_Matrix.MooeeInv(src_e,tmp); assert( tmp.checkerboard ==Even);
_Matrix.Meooe (tmp,Mtmp); assert( Mtmp.checkerboard ==Odd);
tmp=src_o-Mtmp; assert( tmp.checkerboard ==Odd);
// get the right MpcDag
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd);
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field tmp(grid);
Field sol_e(grid);
Field src_e_i(grid);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o,tmp); assert( tmp.checkerboard ==Even);
src_e_i = src_e-tmp; assert( src_e_i.checkerboard ==Even);
_Matrix.MooeeInv(src_e_i,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_o); assert( sol_o.checkerboard ==Odd );
}
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurDiagMooeeOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Site diagonal is identity, right preconditioned by Mee^inv
// ( 1 - Meo Moo^inv Moe Mee^inv ) phi =( 1 - Meo Moo^inv Moe Mee^inv ) Mee psi = = eta = eta
//=> psi = MeeInv phi
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class SchurRedBlackDiagTwoSolve : public SchurRedBlackBase<Field> {
public:
typedef CheckerBoardedSparseMatrixBase<Field> Matrix;
/////////////////////////////////////////////////////
// Wrap the usual normal equations Schur trick
/////////////////////////////////////////////////////
SchurRedBlackDiagTwoSolve(OperatorFunction<Field> &HermitianRBSolver, const bool initSubGuess = false)
: SchurRedBlackBase<Field>(HermitianRBSolver,initSubGuess) {};
virtual void RedBlackSource(Matrix & _Matrix,const Field &src, Field &src_e,Field &src_o)
SchurRedBlackDiagTwoMixed(LinearFunction<Field> &HermitianRBSolver, const bool initSubGuess = false) :
_HermitianRBSolver(HermitianRBSolver)
{
CBfactorise=0;
subtractGuess(initSubGuess);
};
void subtractGuess(const bool initSubGuess)
{
subGuess = initSubGuess;
}
bool isSubtractGuess(void)
{
return subGuess;
}
template<class Matrix>
void operator() (Matrix & _Matrix,const Field &in, Field &out){
ZeroGuesser<Field> guess;
(*this)(_Matrix,in,out,guess);
}
template<class Matrix, class Guesser>
void operator() (Matrix & _Matrix,const Field &in, Field &out,Guesser &guess){
// FIXME CGdiagonalMee not implemented virtual function
// FIXME use CBfactorise to control schur decomp
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
Field src_e(grid);
Field src_o(grid);
Field sol_e(grid);
Field sol_o(grid);
Field tmp(grid);
Field Mtmp(grid);
Field resid(fgrid);
pickCheckerboard(Even,src_e,src);
pickCheckerboard(Odd ,src_o,src);
pickCheckerboard(Even,src_e,in);
pickCheckerboard(Odd ,src_o,in);
pickCheckerboard(Even,sol_e,out);
pickCheckerboard(Odd ,sol_o,out);
/////////////////////////////////////////////////////
// src_o = Mdag * (source_o - Moe MeeInv source_e)
@ -430,44 +461,43 @@ namespace Grid {
// get the right MpcDag
_HermOpEO.MpcDag(tmp,src_o); assert(src_o.checkerboard ==Odd);
}
virtual void RedBlackSolution(Matrix & _Matrix,const Field &sol_o, const Field &src_e,Field &sol)
{
GridBase *grid = _Matrix.RedBlackGrid();
GridBase *fgrid= _Matrix.Grid();
Field sol_o_i(grid);
Field tmp(grid);
Field sol_e(grid);
////////////////////////////////////////////////
// MooeeInv due to pecond
////////////////////////////////////////////////
_Matrix.MooeeInv(sol_o,tmp);
sol_o_i = tmp;
//////////////////////////////////////////////////////////////
// Call the red-black solver
//////////////////////////////////////////////////////////////
std::cout<<GridLogMessage << "SchurRedBlack solver calling the MpcDagMp solver" <<std::endl;
// _HermitianRBSolver(_HermOpEO,src_o,sol_o); assert(sol_o.checkerboard==Odd);
// _HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
guess(src_o,tmp);
Mtmp = tmp;
_HermitianRBSolver(_HermOpEO,src_o,tmp); assert(tmp.checkerboard==Odd);
// Fionn A2A boolean behavioural control
if (subGuess) tmp = tmp-Mtmp;
_Matrix.MooeeInv(tmp,sol_o); assert( sol_o.checkerboard ==Odd);
///////////////////////////////////////////////////
// sol_e = M_ee^-1 * ( src_e - Meo sol_o )...
///////////////////////////////////////////////////
_Matrix.Meooe(sol_o_i,tmp); assert( tmp.checkerboard ==Even);
tmp = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(tmp,sol_e); assert( sol_e.checkerboard ==Even);
_Matrix.Meooe(sol_o,tmp); assert( tmp.checkerboard ==Even);
src_e = src_e-tmp; assert( src_e.checkerboard ==Even);
_Matrix.MooeeInv(src_e,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(sol,sol_o_i); assert( sol_o_i.checkerboard ==Odd );
};
setCheckerboard(out,sol_e); assert( sol_e.checkerboard ==Even);
setCheckerboard(out,sol_o); assert( sol_o.checkerboard ==Odd );
virtual void RedBlackSolve (Matrix & _Matrix,const Field &src_o, Field &sol_o)
{
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
};
virtual void RedBlackSolve (Matrix & _Matrix,const std::vector<Field> &src_o, std::vector<Field> &sol_o)
{
SchurDiagTwoOperator<Matrix,Field> _HermOpEO(_Matrix);
this->_HermitianRBSolver(_HermOpEO,src_o,sol_o);
}
// Verify the unprec residual
if ( ! subGuess ) {
_Matrix.M(out,resid);
resid = resid-in;
RealD ns = norm2(in);
RealD nr = norm2(resid);
std::cout << GridLogMessage << "SchurRedBlackDiagTwo solver true unprec resid " << std::sqrt(nr / ns) << " nr " << nr << " ns " << ns << std::endl;
} else {
std::cout << GridLogMessage << "Guess subtracted after solve." << std::endl;
}
}
};
}
#endif

View File

@ -392,10 +392,14 @@ namespace Grid {
void SeedUniqueString(const std::string &s){
std::vector<int> seeds;
std::stringstream sha;
seeds = GridChecksum::sha256_seeds(s);
for(int i=0;i<seeds.size();i++) {
sha << std::hex << seeds[i];
}
std::cout << GridLogMessage << "Intialising parallel RNG with unique string '"
<< s << "'" << std::endl;
std::cout << GridLogMessage << "Seed SHA256: " << GridChecksum::sha256_string(seeds) << std::endl;
std::cout << GridLogMessage << "Seed SHA256: " << sha.str() << std::endl;
SeedFixedIntegers(seeds);
}
void SeedFixedIntegers(const std::vector<int> &seeds){

View File

@ -146,11 +146,9 @@ public:
if ( log.timestamp ) {
log.StopWatch->Stop();
GridTime now = log.StopWatch->Elapsed();
if ( log.timing_mode==1 ) log.StopWatch->Reset();
log.StopWatch->Start();
stream << log.evidence()
<< now << log.background() << " : " ;
stream << log.evidence()<< std::setw(6)<<now << log.background() << " : " ;
}
stream << log.colour();
return stream;

View File

@ -233,8 +233,7 @@ class GridLimeReader : public BinaryIO {
// std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
BinarySimpleMunger<sobj,sobj> munge;
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
/////////////////////////////////////////////
// Insist checksum is next record
/////////////////////////////////////////////

View File

@ -49,35 +49,21 @@ inline double usecond(void) {
typedef std::chrono::system_clock GridClock;
typedef std::chrono::time_point<GridClock> GridTimePoint;
typedef std::chrono::seconds GridSecs;
typedef std::chrono::milliseconds GridMillisecs;
typedef std::chrono::microseconds GridUsecs;
typedef std::chrono::microseconds GridTime;
typedef std::chrono::microseconds GridUsecs;
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
inline std::ostream& operator<< (std::ostream & stream, const std::chrono::milliseconds & time)
{
stream << time.count()<<" s";
stream << time.count()<<" ms";
return stream;
}
inline std::ostream& operator<< (std::ostream & stream, const GridMillisecs & now)
inline std::ostream& operator<< (std::ostream & stream, const std::chrono::microseconds & time)
{
GridSecs second(1);
auto secs = now/second ;
auto subseconds = now%second ;
stream << secs<<"."<<std::setw(3)<<std::setfill('0')<<subseconds.count()<<" s";
stream << time.count()<<" usec";
return stream;
}
inline std::ostream& operator<< (std::ostream & stream, const GridUsecs & now)
{
GridSecs second(1);
auto seconds = now/second ;
auto subseconds = now%second ;
stream << seconds<<"."<<std::setw(6)<<std::setfill('0')<<subseconds.count()<<" s";
return stream;
}
class GridStopWatch {
private:
bool running;

View File

@ -90,20 +90,17 @@ namespace QCD {
// That probably makes for GridRedBlack4dCartesian grid.
// s,sp,c,spc,lc
template<typename vtype> using iSinglet = iScalar<iScalar<iScalar<vtype> > >;
template<typename vtype> using iSpinMatrix = iScalar<iMatrix<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
template<typename vtype> using iSpinColourVector = iScalar<iVector<iVector<vtype, Nc>, Ns> >;
template<typename vtype> using iHalfSpinVector = iScalar<iVector<iScalar<vtype>, Nhs> >;
template<typename vtype> using iHalfSpinColourVector = iScalar<iVector<iVector<vtype, Nc>, Nhs> >;
template<typename vtype> using iSpinColourSpinColourMatrix = iScalar<iMatrix<iMatrix<iMatrix<iMatrix<vtype, Nc>, Ns>, Nc>, Ns> >;
template<typename vtype> using iSinglet = iScalar<iScalar<iScalar<vtype> > >;
template<typename vtype> using iSpinMatrix = iScalar<iMatrix<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourMatrix = iScalar<iScalar<iMatrix<vtype, Nc> > > ;
template<typename vtype> using iSpinColourMatrix = iScalar<iMatrix<iMatrix<vtype, Nc>, Ns> >;
template<typename vtype> using iLorentzColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nd > ;
template<typename vtype> using iDoubleStoredColourMatrix = iVector<iScalar<iMatrix<vtype, Nc> >, Nds > ;
template<typename vtype> using iSpinVector = iScalar<iVector<iScalar<vtype>, Ns> >;
template<typename vtype> using iColourVector = iScalar<iScalar<iVector<vtype, Nc> > >;
template<typename vtype> using iSpinColourVector = iScalar<iVector<iVector<vtype, Nc>, Ns> >;
template<typename vtype> using iHalfSpinVector = iScalar<iVector<iScalar<vtype>, Nhs> >;
template<typename vtype> using iHalfSpinColourVector = iScalar<iVector<iVector<vtype, Nc>, Nhs> >;
template<typename vtype> using iGparitySpinColourVector = iVector<iVector<iVector<vtype, Nc>, Ns>, Ngp >;
template<typename vtype> using iGparityHalfSpinColourVector = iVector<iVector<iVector<vtype, Nc>, Nhs>, Ngp >;
@ -130,28 +127,10 @@ namespace QCD {
typedef iSpinColourMatrix<Complex > SpinColourMatrix;
typedef iSpinColourMatrix<ComplexF > SpinColourMatrixF;
typedef iSpinColourMatrix<ComplexD > SpinColourMatrixD;
typedef iSpinColourMatrix<vComplex > vSpinColourMatrix;
typedef iSpinColourMatrix<vComplexF> vSpinColourMatrixF;
typedef iSpinColourMatrix<vComplexD> vSpinColourMatrixD;
// SpinColourSpinColour matrix
typedef iSpinColourSpinColourMatrix<Complex > SpinColourSpinColourMatrix;
typedef iSpinColourSpinColourMatrix<ComplexF > SpinColourSpinColourMatrixF;
typedef iSpinColourSpinColourMatrix<ComplexD > SpinColourSpinColourMatrixD;
typedef iSpinColourSpinColourMatrix<vComplex > vSpinColourSpinColourMatrix;
typedef iSpinColourSpinColourMatrix<vComplexF> vSpinColourSpinColourMatrixF;
typedef iSpinColourSpinColourMatrix<vComplexD> vSpinColourSpinColourMatrixD;
// SpinColourSpinColour matrix
typedef iSpinColourSpinColourMatrix<Complex > SpinColourSpinColourMatrix;
typedef iSpinColourSpinColourMatrix<ComplexF > SpinColourSpinColourMatrixF;
typedef iSpinColourSpinColourMatrix<ComplexD > SpinColourSpinColourMatrixD;
typedef iSpinColourSpinColourMatrix<vComplex > vSpinColourSpinColourMatrix;
typedef iSpinColourSpinColourMatrix<vComplexF> vSpinColourSpinColourMatrixF;
typedef iSpinColourSpinColourMatrix<vComplexD> vSpinColourSpinColourMatrixD;
// LorentzColour
typedef iLorentzColourMatrix<Complex > LorentzColourMatrix;
@ -250,9 +229,6 @@ namespace QCD {
typedef Lattice<vSpinColourMatrixF> LatticeSpinColourMatrixF;
typedef Lattice<vSpinColourMatrixD> LatticeSpinColourMatrixD;
typedef Lattice<vSpinColourSpinColourMatrix> LatticeSpinColourSpinColourMatrix;
typedef Lattice<vSpinColourSpinColourMatrixF> LatticeSpinColourSpinColourMatrixF;
typedef Lattice<vSpinColourSpinColourMatrixD> LatticeSpinColourSpinColourMatrixD;
typedef Lattice<vLorentzColourMatrix> LatticeLorentzColourMatrix;
typedef Lattice<vLorentzColourMatrixF> LatticeLorentzColourMatrixF;

View File

@ -485,13 +485,9 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,std::vector<Co
double bpc = b+c;
double bmc = b-c;
_b = b;
_c = c;
_gamma = gamma; // Save the parameters so we can change mass later.
_zolo_hi= zolo_hi;
for(int i=0; i < Ls; i++){
as[i] = 1.0;
omega[i] = _gamma[i]*_zolo_hi; //NB reciprocal relative to Chroma NEF code
omega[i] = gamma[i]*zolo_hi; //NB reciprocal relative to Chroma NEF code
assert(omega[i]!=Coeff_t(0.0));
bs[i] = 0.5*(bpc/omega[i] + bmc);
cs[i] = 0.5*(bpc/omega[i] - bmc);

View File

@ -97,10 +97,7 @@ namespace Grid {
// Support for MADWF tricks
///////////////////////////////////////////////////////////////
RealD Mass(void) { return mass; };
void SetMass(RealD _mass) {
mass=_mass;
SetCoefficientsInternal(_zolo_hi,_gamma,_b,_c); // Reset coeffs
} ;
void SetMass(RealD _mass) { mass=_mass; } ;
void P(const FermionField &psi, FermionField &chi);
void Pdag(const FermionField &psi, FermionField &chi);
@ -150,12 +147,6 @@ namespace Grid {
// protected:
RealD mass;
// Save arguments to SetCoefficientsInternal
std::vector<Coeff_t> _gamma;
RealD _zolo_hi;
RealD _b;
RealD _c;
// Cayley form Moebius (tanh and zolotarev)
std::vector<Coeff_t> omega;
std::vector<Coeff_t> bs; // S dependent coeffs

View File

@ -80,24 +80,12 @@ Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
///////////////////////////////////////////////////////////////////////////////
#include <Grid/qcd/action/fermion/g5HermitianLinop.h>
///////////////////////////////////////////////////////////////////////////////
// Fourier accelerated Pauli Villars inverse support
///////////////////////////////////////////////////////////////////////////////
#include <Grid/qcd/action/fermion/WilsonTMFermion5D.h>
////////////////////////////////////////////////////////////////////////////////
// Move this group to a DWF specific tools/algorithms subdir?
////////////////////////////////////////////////////////////////////////////////
#include <Grid/qcd/action/fermion/FourierAcceleratedPV.h>
#include <Grid/qcd/action/fermion/PauliVillarsInverters.h>
#include <Grid/qcd/action/fermion/Reconstruct5Dprop.h>
#include <Grid/qcd/action/fermion/MADWF.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
// More maintainable to maintain the following typedef list centrally, as more "impl" targets
// are added, (e.g. extension for gparity, half precision project in comms etc..)
////////////////////////////////////////////////////////////////////////////////////////////////////
// Cayley 5d
namespace Grid {
namespace QCD {

View File

@ -64,6 +64,12 @@ namespace Grid {
virtual RealD M (const FermionField &in, FermionField &out)=0;
virtual RealD Mdag (const FermionField &in, FermionField &out)=0;
// Query the even even properties to make algorithmic decisions
virtual int ConstEE(void) { return 1; }; // clover returns zero as EE depends on gauge field
virtual int isTrivialEE(void) { return 0; };
virtual RealD Mass(void) {return 0.0;};
virtual void SetMass(RealD _mass) { return; };
// half checkerboard operaions
virtual void Meooe (const FermionField &in, FermionField &out)=0;
virtual void MeooeDag (const FermionField &in, FermionField &out)=0;

View File

@ -141,7 +141,6 @@ namespace QCD {
////////////////////////////////////////////////////////////////////////
#define INHERIT_FIMPL_TYPES(Impl)\
typedef Impl Impl_t; \
typedef typename Impl::FermionField FermionField; \
typedef typename Impl::PropagatorField PropagatorField; \
typedef typename Impl::DoubledGaugeField DoubledGaugeField; \

View File

@ -1,237 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/fermion/FourierAcceleratedPV.h
Copyright (C) 2015
Author: Christoph Lehner (lifted with permission by Peter Boyle, brought back to Grid)
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
namespace Grid {
namespace QCD {
template<typename M>
void get_real_const_bc(M& m, RealD& _b, RealD& _c) {
ComplexD b,c;
b=m.bs[0];
c=m.cs[0];
std::cout << GridLogMessage << "b=" << b << ", c=" << c << std::endl;
for (size_t i=1;i<m.bs.size();i++) {
assert(m.bs[i] == b);
assert(m.cs[i] == c);
}
assert(b.imag() == 0.0);
assert(c.imag() == 0.0);
_b = b.real();
_c = c.real();
}
template<typename Vi, typename M, typename G>
class FourierAcceleratedPV {
public:
ConjugateGradient<Vi> &cg;
M& dwfPV;
G& Umu;
GridCartesian* grid5D;
GridRedBlackCartesian* gridRB5D;
int group_in_s;
FourierAcceleratedPV(M& _dwfPV, G& _Umu, ConjugateGradient<Vi> &_cg, int _group_in_s = 2)
: dwfPV(_dwfPV), Umu(_Umu), cg(_cg), group_in_s(_group_in_s)
{
assert( dwfPV.FermionGrid()->_fdimensions[0] % (2*group_in_s) == 0);
grid5D = QCD::SpaceTimeGrid::makeFiveDimGrid(2*group_in_s, (GridCartesian*)Umu._grid);
gridRB5D = QCD::SpaceTimeGrid::makeFiveDimRedBlackGrid(2*group_in_s, (GridCartesian*)Umu._grid);
}
void rotatePV(const Vi& _src, Vi& dst, bool forward) const {
GridStopWatch gsw1, gsw2;
typedef typename Vi::scalar_type Coeff_t;
int Ls = dst._grid->_fdimensions[0];
Vi _tmp(dst._grid);
double phase = M_PI / (double)Ls;
Coeff_t bzero(0.0,0.0);
FFT theFFT((GridCartesian*)dst._grid);
if (!forward) {
gsw1.Start();
for (int s=0;s<Ls;s++) {
Coeff_t a(::cos(phase*s),-::sin(phase*s));
axpby_ssp(_tmp,a,_src,bzero,_src,s,s);
}
gsw1.Stop();
gsw2.Start();
theFFT.FFT_dim(dst,_tmp,0,FFT::forward);
gsw2.Stop();
} else {
gsw2.Start();
theFFT.FFT_dim(_tmp,_src,0,FFT::backward);
gsw2.Stop();
gsw1.Start();
for (int s=0;s<Ls;s++) {
Coeff_t a(::cos(phase*s),::sin(phase*s));
axpby_ssp(dst,a,_tmp,bzero,_tmp,s,s);
}
gsw1.Stop();
}
std::cout << GridLogMessage << "Timing rotatePV: " << gsw1.Elapsed() << ", " << gsw2.Elapsed() << std::endl;
}
void pvInv(const Vi& _src, Vi& _dst) const {
std::cout << GridLogMessage << "Fourier-Accelerated Outer Pauli Villars"<<std::endl;
typedef typename Vi::scalar_type Coeff_t;
int Ls = _dst._grid->_fdimensions[0];
GridStopWatch gswT;
gswT.Start();
RealD b,c;
get_real_const_bc(dwfPV,b,c);
RealD M5 = dwfPV.M5;
// U(true) Rightinv TMinv U(false) = Minv
Vi _src_diag(_dst._grid);
Vi _src_diag_slice(dwfPV.GaugeGrid());
Vi _dst_diag_slice(dwfPV.GaugeGrid());
Vi _src_diag_slices(grid5D);
Vi _dst_diag_slices(grid5D);
Vi _dst_diag(_dst._grid);
rotatePV(_src,_src_diag,false);
// now do TM solves
Gamma G5(Gamma::Algebra::Gamma5);
GridStopWatch gswA, gswB;
gswA.Start();
typedef typename M::Impl_t Impl;
//WilsonTMFermion<Impl> tm(x.Umu,*x.UGridF,*x.UrbGridF,0.0,0.0,solver_outer.parent.par.wparams_f);
std::vector<RealD> vmass(grid5D->_fdimensions[0],0.0);
std::vector<RealD> vmu(grid5D->_fdimensions[0],0.0);
WilsonTMFermion5D<Impl> tm(Umu,*grid5D,*gridRB5D,
*(GridCartesian*)dwfPV.GaugeGrid(),
*(GridRedBlackCartesian*)dwfPV.GaugeRedBlackGrid(),
vmass,vmu);
//SchurRedBlackDiagTwoSolve<Vi> sol(cg);
SchurRedBlackDiagMooeeSolve<Vi> sol(cg); // same performance as DiagTwo
gswA.Stop();
gswB.Start();
for (int sgroup=0;sgroup<Ls/2/group_in_s;sgroup++) {
for (int sidx=0;sidx<group_in_s;sidx++) {
int s = sgroup*group_in_s + sidx;
int sprime = Ls-s-1;
RealD phase = M_PI / (RealD)Ls * (2.0 * s + 1.0);
RealD cosp = ::cos(phase);
RealD sinp = ::sin(phase);
RealD denom = b*b + c*c + 2.0*b*c*cosp;
RealD mass = -(b*b*M5 + c*(1.0 - cosp + c*M5) + b*(-1.0 + cosp + 2.0*c*cosp*M5))/denom;
RealD mu = (b+c)*sinp/denom;
vmass[2*sidx + 0] = mass;
vmass[2*sidx + 1] = mass;
vmu[2*sidx + 0] = mu;
vmu[2*sidx + 1] = -mu;
}
tm.update(vmass,vmu);
for (int sidx=0;sidx<group_in_s;sidx++) {
int s = sgroup*group_in_s + sidx;
int sprime = Ls-s-1;
ExtractSlice(_src_diag_slice,_src_diag,s,0);
InsertSlice(_src_diag_slice,_src_diag_slices,2*sidx + 0,0);
ExtractSlice(_src_diag_slice,_src_diag,sprime,0);
InsertSlice(_src_diag_slice,_src_diag_slices,2*sidx + 1,0);
}
GridStopWatch gsw;
gsw.Start();
_dst_diag_slices = zero; // zero guess
sol(tm,_src_diag_slices,_dst_diag_slices);
gsw.Stop();
std::cout << GridLogMessage << "Solve[sgroup=" << sgroup << "] completed in " << gsw.Elapsed() << ", " << gswA.Elapsed() << std::endl;
for (int sidx=0;sidx<group_in_s;sidx++) {
int s = sgroup*group_in_s + sidx;
int sprime = Ls-s-1;
RealD phase = M_PI / (RealD)Ls * (2.0 * s + 1.0);
RealD cosp = ::cos(phase);
RealD sinp = ::sin(phase);
// now rotate with inverse of
Coeff_t pA = b + c*cosp;
Coeff_t pB = - Coeff_t(0.0,1.0)*c*sinp;
Coeff_t pABden = pA*pA - pB*pB;
// (pA + pB * G5) * (pA - pB*G5) = (pA^2 - pB^2)
ExtractSlice(_dst_diag_slice,_dst_diag_slices,2*sidx + 0,0);
_dst_diag_slice = (pA/pABden) * _dst_diag_slice - (pB/pABden) * (G5 * _dst_diag_slice);
InsertSlice(_dst_diag_slice,_dst_diag,s,0);
ExtractSlice(_dst_diag_slice,_dst_diag_slices,2*sidx + 1,0);
_dst_diag_slice = (pA/pABden) * _dst_diag_slice + (pB/pABden) * (G5 * _dst_diag_slice);
InsertSlice(_dst_diag_slice,_dst_diag,sprime,0);
}
}
gswB.Stop();
rotatePV(_dst_diag,_dst,true);
gswT.Stop();
std::cout << GridLogMessage << "PV completed in " << gswT.Elapsed() << " (Setup: " << gswA.Elapsed() << ", s-loop: " << gswB.Elapsed() << ")" << std::endl;
}
};
}}

View File

@ -1,193 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/MADWF.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
namespace Grid {
namespace QCD {
template <class Fieldi, class Fieldo,IfNotSame<Fieldi,Fieldo> X=0>
inline void convert(const Fieldi &from,Fieldo &to)
{
precisionChange(to,from);
}
template <class Fieldi, class Fieldo,IfSame<Fieldi,Fieldo> X=0>
inline void convert(const Fieldi &from,Fieldo &to)
{
to=from;
}
template<class Matrixo,class Matrixi,class PVinverter,class SchurSolver, class Guesser>
class MADWF
{
private:
typedef typename Matrixo::FermionField FermionFieldo;
typedef typename Matrixi::FermionField FermionFieldi;
PVinverter & PauliVillarsSolvero;// For the outer field
SchurSolver & SchurSolveri; // For the inner approx field
Guesser & Guesseri; // To deflate the inner approx solves
Matrixo & Mato; // Action object for outer
Matrixi & Mati; // Action object for inner
RealD target_resid;
int maxiter;
public:
MADWF(Matrixo &_Mato,
Matrixi &_Mati,
PVinverter &_PauliVillarsSolvero,
SchurSolver &_SchurSolveri,
Guesser & _Guesseri,
RealD resid,
int _maxiter) :
Mato(_Mato),Mati(_Mati),
SchurSolveri(_SchurSolveri),
PauliVillarsSolvero(_PauliVillarsSolvero),Guesseri(_Guesseri)
{
target_resid=resid;
maxiter =_maxiter;
};
void operator() (const FermionFieldo &src4,FermionFieldo &sol5)
{
std::cout << GridLogMessage<< " ************************************************" << std::endl;
std::cout << GridLogMessage<< " MADWF-like algorithm " << std::endl;
std::cout << GridLogMessage<< " ************************************************" << std::endl;
FermionFieldi c0i(Mati.GaugeGrid()); // 4d
FermionFieldi y0i(Mati.GaugeGrid()); // 4d
FermionFieldo c0 (Mato.GaugeGrid()); // 4d
FermionFieldo y0 (Mato.GaugeGrid()); // 4d
FermionFieldo A(Mato.FermionGrid()); // Temporary outer
FermionFieldo B(Mato.FermionGrid()); // Temporary outer
FermionFieldo b(Mato.FermionGrid()); // 5d source
FermionFieldo c(Mato.FermionGrid()); // PVinv source; reused so store
FermionFieldo defect(Mato.FermionGrid()); // 5d source
FermionFieldi ci(Mati.FermionGrid());
FermionFieldi yi(Mati.FermionGrid());
FermionFieldi xi(Mati.FermionGrid());
FermionFieldi srci(Mati.FermionGrid());
FermionFieldi Ai(Mati.FermionGrid());
RealD m=Mati.Mass();
///////////////////////////////////////
//Import source, include Dminus factors
///////////////////////////////////////
Mato.ImportPhysicalFermionSource(src4,b);
std::cout << GridLogMessage << " src4 " <<norm2(src4)<<std::endl;
std::cout << GridLogMessage << " b " <<norm2(b)<<std::endl;
defect = b;
sol5=zero;
for (int i=0;i<maxiter;i++) {
///////////////////////////////////////
// Set up c0 from current defect
///////////////////////////////////////
PauliVillarsSolvero(Mato,defect,A);
Mato.Pdag(A,c);
ExtractSlice(c0, c, 0 , 0);
////////////////////////////////////////////////
// Solve the inner system with surface term c0
////////////////////////////////////////////////
ci = zero;
convert(c0,c0i); // Possible precison change
InsertSlice(c0i,ci,0, 0);
// Dwm P y = Dwm x = D(1) P (c0,0,0,0)^T
Mati.P(ci,Ai);
Mati.SetMass(1.0); Mati.M(Ai,srci); Mati.SetMass(m);
SchurSolveri(Mati,srci,xi,Guesseri);
Mati.Pdag(xi,yi);
ExtractSlice(y0i, yi, 0 , 0);
convert(y0i,y0); // Possible precision change
//////////////////////////////////////
// Propagate solution back to outer system
// Build Pdag PV^-1 Dm P [-sol4,c2,c3... cL]
//////////////////////////////////////
c0 = - y0;
InsertSlice(c0, c, 0 , 0);
/////////////////////////////
// Reconstruct the bulk solution Pdag PV^-1 Dm P
/////////////////////////////
Mato.P(c,B);
Mato.M(B,A);
PauliVillarsSolvero(Mato,A,B);
Mato.Pdag(B,A);
//////////////////////////////
// Reinsert surface prop
//////////////////////////////
InsertSlice(y0,A,0,0);
//////////////////////////////
// Convert from y back to x
//////////////////////////////
Mato.P(A,B);
// sol5' = sol5 + M^-1 defect
// = sol5 + M^-1 src - M^-1 M sol5 ...
sol5 = sol5 + B;
std::cout << GridLogMessage << "***************************************" <<std::endl;
std::cout << GridLogMessage << " Sol5 update "<<std::endl;
std::cout << GridLogMessage << "***************************************" <<std::endl;
std::cout << GridLogMessage << " Sol5 now "<<norm2(sol5)<<std::endl;
std::cout << GridLogMessage << " delta "<<norm2(B)<<std::endl;
// New defect = b - M sol5
Mato.M(sol5,A);
defect = b - A;
std::cout << GridLogMessage << " defect "<<norm2(defect)<<std::endl;
double resid = ::sqrt(norm2(defect) / norm2(b));
std::cout << GridLogMessage << "Residual " << i << ": " << resid << std::endl;
std::cout << GridLogMessage << "***************************************" <<std::endl;
if (resid < target_resid) {
return;
}
}
std::cout << GridLogMessage << "MADWF : Exceeded maxiter "<<std::endl;
assert(0);
}
};
}}

View File

@ -1,95 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/iterative/SchurRedBlack.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
namespace Grid {
namespace QCD {
template<class Field>
class PauliVillarsSolverUnprec
{
public:
ConjugateGradient<Field> & CG;
PauliVillarsSolverUnprec( ConjugateGradient<Field> &_CG) : CG(_CG){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
MdagMLinearOperator<Matrix,Field> HermOp(_Matrix);
_Matrix.SetMass(1.0);
_Matrix.Mdag(src,A);
CG(HermOp,A,sol);
_Matrix.SetMass(m);
};
};
template<class Field,class SchurSolverType>
class PauliVillarsSolverRBprec
{
public:
SchurSolverType & SchurSolver;
PauliVillarsSolverRBprec( SchurSolverType &_SchurSolver) : SchurSolver(_SchurSolver){};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
RealD m = _Matrix.Mass();
Field A (_Matrix.FermionGrid());
_Matrix.SetMass(1.0);
SchurSolver(_Matrix,src,sol);
_Matrix.SetMass(m);
};
};
template<class Field,class GaugeField>
class PauliVillarsSolverFourierAccel
{
public:
GaugeField & Umu;
ConjugateGradient<Field> & CG;
PauliVillarsSolverFourierAccel(GaugeField &_Umu,ConjugateGradient<Field> &_CG) : Umu(_Umu), CG(_CG)
{
};
template<class Matrix>
void operator() (Matrix &_Matrix,const Field &src,Field &sol)
{
FourierAcceleratedPV<Field, Matrix, typename Matrix::GaugeField > faPV(_Matrix,Umu,CG) ;
faPV.pvInv(src,sol);
};
};
}
}

View File

@ -1,155 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/action/fermion/WilsonTMFermion5D.h
Copyright (C) 2015
Author: paboyle <paboyle@ph.ed.ac.uk> ; NB Christoph did similar in GPT
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/qcd/action/fermion/FermionCore.h>
#include <Grid/qcd/action/fermion/WilsonFermion.h>
namespace Grid {
namespace QCD {
template<class Impl>
class WilsonTMFermion5D : public WilsonFermion5D<Impl>
{
public:
INHERIT_IMPL_TYPES(Impl);
public:
virtual void Instantiatable(void) {};
// Constructors
WilsonTMFermion5D(GaugeField &_Umu,
GridCartesian &Fgrid,
GridRedBlackCartesian &Frbgrid,
GridCartesian &Ugrid,
GridRedBlackCartesian &Urbgrid,
const std::vector<RealD> _mass,
const std::vector<RealD> _mu,
const ImplParams &p= ImplParams()
) :
WilsonFermion5D<Impl>(_Umu,
Fgrid,
Frbgrid,
Ugrid,
Urbgrid,
4.0,p)
{
update(_mass,_mu);
}
virtual void Meooe(const FermionField &in, FermionField &out) {
if (in.checkerboard == Odd) {
this->DhopEO(in, out, DaggerNo);
} else {
this->DhopOE(in, out, DaggerNo);
}
}
virtual void MeooeDag(const FermionField &in, FermionField &out) {
if (in.checkerboard == Odd) {
this->DhopEO(in, out, DaggerYes);
} else {
this->DhopOE(in, out, DaggerYes);
}
}
// allow override for twisted mass and clover
virtual void Mooee(const FermionField &in, FermionField &out) {
out.checkerboard = in.checkerboard;
//axpibg5x(out,in,a,b); // out = a*in + b*i*G5*in
for (int s=0;s<(int)this->mass.size();s++) {
ComplexD a = 4.0+this->mass[s];
ComplexD b(0.0,this->mu[s]);
axpbg5y_ssp(out,a,in,b,in,s,s);
}
}
virtual void MooeeDag(const FermionField &in, FermionField &out) {
out.checkerboard = in.checkerboard;
for (int s=0;s<(int)this->mass.size();s++) {
ComplexD a = 4.0+this->mass[s];
ComplexD b(0.0,-this->mu[s]);
axpbg5y_ssp(out,a,in,b,in,s,s);
}
}
virtual void MooeeInv(const FermionField &in, FermionField &out) {
for (int s=0;s<(int)this->mass.size();s++) {
RealD m = this->mass[s];
RealD tm = this->mu[s];
RealD mtil = 4.0+this->mass[s];
RealD sq = mtil*mtil+tm*tm;
ComplexD a = mtil/sq;
ComplexD b(0.0, -tm /sq);
axpbg5y_ssp(out,a,in,b,in,s,s);
}
}
virtual void MooeeInvDag(const FermionField &in, FermionField &out) {
for (int s=0;s<(int)this->mass.size();s++) {
RealD m = this->mass[s];
RealD tm = this->mu[s];
RealD mtil = 4.0+this->mass[s];
RealD sq = mtil*mtil+tm*tm;
ComplexD a = mtil/sq;
ComplexD b(0.0,tm /sq);
axpbg5y_ssp(out,a,in,b,in,s,s);
}
}
virtual RealD M(const FermionField &in, FermionField &out) {
out.checkerboard = in.checkerboard;
this->Dhop(in, out, DaggerNo);
FermionField tmp(out._grid);
for (int s=0;s<(int)this->mass.size();s++) {
ComplexD a = 4.0+this->mass[s];
ComplexD b(0.0,this->mu[s]);
axpbg5y_ssp(tmp,a,in,b,in,s,s);
}
return axpy_norm(out, 1.0, tmp, out);
}
// needed for fast PV
void update(const std::vector<RealD>& _mass, const std::vector<RealD>& _mu) {
assert(_mass.size() == _mu.size());
assert(_mass.size() == this->FermionGrid()->_fdimensions[0]);
this->mass = _mass;
this->mu = _mu;
}
private:
std::vector<RealD> mu;
std::vector<RealD> mass;
};
typedef WilsonTMFermion5D<WilsonImplF> WilsonTMFermion5DF;
typedef WilsonTMFermion5D<WilsonImplD> WilsonTMFermion5DD;
}}

View File

@ -27,13 +27,12 @@ public:
typedef iSpinColourMatrix<vector_type> SpinColourMatrix_v;
template <typename TensorType> // output: rank 5 tensor, e.g. Eigen::Tensor<ComplexD, 5>
static void MesonField(TensorType &mat,
static void MesonField(Eigen::Tensor<ComplexD,5> &mat,
const FermionField *lhs_wi,
const FermionField *rhs_vj,
std::vector<Gamma::Algebra> gammas,
const std::vector<ComplexField > &mom,
int orthogdim, double *t_kernel = nullptr, double *t_gsum = nullptr);
int orthogdim);
static void PionFieldWVmom(Eigen::Tensor<ComplexD,4> &mat,
const FermionField *wi,
@ -60,14 +59,6 @@ public:
const FermionField *vj,
int orthogdim);
template <typename TensorType> // output: rank 5 tensor, e.g. Eigen::Tensor<ComplexD, 5>
static void AslashField(TensorType &mat,
const FermionField *lhs_wi,
const FermionField *rhs_vj,
const std::vector<ComplexField> &emB0,
const std::vector<ComplexField> &emB1,
int orthogdim, double *t_kernel = nullptr, double *t_gsum = nullptr);
static void ContractWWVV(std::vector<PropagatorField> &WWVV,
const Eigen::Tensor<ComplexD,3> &WW_sd,
const FermionField *vs,
@ -101,14 +92,13 @@ public:
#endif
};
template <class FImpl>
template <typename TensorType>
void A2Autils<FImpl>::MesonField(TensorType &mat,
template<class FImpl>
void A2Autils<FImpl>::MesonField(Eigen::Tensor<ComplexD,5> &mat,
const FermionField *lhs_wi,
const FermionField *rhs_vj,
std::vector<Gamma::Algebra> gammas,
const std::vector<ComplexField > &mom,
int orthogdim, double *t_kernel, double *t_gsum)
int orthogdim)
{
typedef typename FImpl::SiteSpinor vobj;
@ -156,7 +146,6 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
int stride=grid->_slice_stride[orthogdim];
// potentially wasting cores here if local time extent too small
if (t_kernel) *t_kernel = -usecond();
parallel_for(int r=0;r<rd;r++){
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
@ -223,7 +212,7 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
}
}}}
}
if (t_kernel) *t_kernel += usecond();
assert(mat.dimension(0) == Nmom);
assert(mat.dimension(1) == Ngamma);
assert(mat.dimension(2) == Nt);
@ -267,9 +256,9 @@ void A2Autils<FImpl>::MesonField(TensorType &mat,
// Vector size is 7 x 16 x 32 x 16 x 16 x sizeof(complex) = 2MB - 60MB depending on volume
// Healthy size that should suffice
////////////////////////////////////////////////////////////////////
if (t_gsum) *t_gsum = -usecond();
grid->GlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock);
if (t_gsum) *t_gsum += usecond();
}
@ -625,189 +614,6 @@ void A2Autils<FImpl>::PionFieldVV(Eigen::Tensor<ComplexD,3> &mat,
PionFieldXX(mat,vi,vj,orthogdim,nog5);
}
// "A-slash" field w_i(x)^dag * i * A_mu * gamma_mu * v_j(x)
//
// With:
//
// B_0 = A_0 + i A_1
// B_1 = A_2 + i A_3
//
// then in spin space
//
// ( 0 0 -conj(B_1) -B_0 )
// i * A_mu g_mu = ( 0 0 -conj(B_0) B_1 )
// ( B_1 B_0 0 0 )
// ( conj(B_0) -conj(B_1) 0 0 )
template <class FImpl>
template <typename TensorType>
void A2Autils<FImpl>::AslashField(TensorType &mat,
const FermionField *lhs_wi,
const FermionField *rhs_vj,
const std::vector<ComplexField> &emB0,
const std::vector<ComplexField> &emB1,
int orthogdim, double *t_kernel, double *t_gsum)
{
typedef typename FermionField::vector_object vobj;
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef iSpinMatrix<vector_type> SpinMatrix_v;
typedef iSpinMatrix<scalar_type> SpinMatrix_s;
typedef iSinglet<vector_type> Singlet_v;
typedef iSinglet<scalar_type> Singlet_s;
int Lblock = mat.dimension(3);
int Rblock = mat.dimension(4);
GridBase *grid = lhs_wi[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
int Nem = emB0.size();
assert(emB1.size() == Nem);
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
int MFrvol = rd*Lblock*Rblock*Nem;
int MFlvol = ld*Lblock*Rblock*Nem;
Vector<vector_type> lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++)
{
lvSum[r] = zero;
}
Vector<scalar_type> lsSum(MFlvol);
parallel_for (int r = 0; r < MFlvol; r++)
{
lsSum[r] = scalar_type(0.0);
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
// Nested parallelism would be ok
// Wasting cores here. Test case r
if (t_kernel) *t_kernel = -usecond();
parallel_for(int r=0;r<rd;r++)
{
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++)
for(int b=0;b<e2;b++)
{
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++)
{
auto left = conjugate(lhs_wi[i]._odata[ss]);
for(int j=0;j<Rblock;j++)
{
SpinMatrix_v vv;
auto right = rhs_vj[j]._odata[ss];
for(int s1=0;s1<Ns;s1++)
for(int s2=0;s2<Ns;s2++)
{
vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
+ left()(s2)(1) * right()(s1)(1)
+ left()(s2)(2) * right()(s1)(2);
}
// After getting the sitewise product do the mom phase loop
int base = Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*r;
for ( int m=0;m<Nem;m++)
{
int idx = m+base;
auto b0 = emB0[m]._odata[ss];
auto b1 = emB1[m]._odata[ss];
auto cb0 = conjugate(b0);
auto cb1 = conjugate(b1);
lvSum[idx] += - vv()(3,0)()*b0()()() - vv()(2,0)()*cb1()()()
+ vv()(3,1)()*b1()()() - vv()(2,1)()*cb0()()()
+ vv()(0,2)()*b1()()() + vv()(1,2)()*b0()()()
+ vv()(0,3)()*cb0()()() - vv()(1,3)()*cb1()()();
}
}
}
}
}
// Sum across simd lanes in the plane, breaking out orthog dir.
parallel_for(int rt=0;rt<rd;rt++)
{
std::vector<int> icoor(Nd);
std::vector<scalar_type> extracted(Nsimd);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nem;m++)
{
int ij_rdx = m+Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*rt;
extract<vector_type,scalar_type>(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++)
{
grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = m+Nem*i+Nem*Lblock*j+Nem*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
}
}
}
if (t_kernel) *t_kernel += usecond();
// ld loop and local only??
int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++)
{
for(int pt=0;pt<pd;pt++)
{
int t = lt + pt*ld;
if (pt == pc)
{
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nem;m++)
{
int ij_dx = m+Nem*i + Nem*Lblock * j + Nem*Lblock * Rblock * lt;
mat(m,0,t,i,j) = lsSum[ij_dx];
}
}
else
{
const scalar_type zz(0.0);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nem;m++)
{
mat(m,0,t,i,j) = zz;
}
}
}
}
if (t_gsum) *t_gsum = -usecond();
grid->GlobalSumVector(&mat(0,0,0,0,0),Nem*Nt*Lblock*Rblock);
if (t_gsum) *t_gsum += usecond();
}
////////////////////////////////////////////
// Schematic thoughts about more generalised four quark insertion

View File

@ -38,21 +38,7 @@ public:
{
return ::crc32(0L,(unsigned char *)data,bytes);
}
template <typename T>
static inline std::string sha256_string(const std::vector<T> &hash)
{
std::stringstream sha;
std::string s;
for(unsigned int i = 0; i < hash.size(); i++)
{
sha << std::hex << static_cast<unsigned int>(hash[i]);
}
s = sha.str();
return s;
}
static inline std::vector<unsigned char> sha256(const void *data,size_t bytes)
static inline std::vector<unsigned char> sha256(void *data,size_t bytes)
{
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
SHA256_CTX sha256;

View File

@ -7,7 +7,6 @@ Source file: Hadrons/A2AMatrix.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -30,128 +29,38 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define A2A_Matrix_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/TimerArray.hpp>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
#ifndef HADRONS_A2AM_NAME
#define HADRONS_A2AM_NAME "a2aMatrix"
#endif
#define HADRONS_A2AM_PARALLEL_IO
BEGIN_HADRONS_NAMESPACE
// general A2A matrix set based on Eigen tensors and Grid-allocated memory
// Dimensions:
// 0 - ext - external field (momentum, EM field, ...)
// 1 - str - spin-color structure
// 2 - t - timeslice
// 3 - i - left A2A mode index
// 4 - j - right A2A mode index
template <typename T>
using A2AMatrixSet = Eigen::TensorMap<Eigen::Tensor<T, 5, Eigen::RowMajor>>;
/******************************************************************************
* Abstract class for A2A kernels *
******************************************************************************/
template <typename T, typename Field>
class A2AKernel
{
public:
A2AKernel(void) = default;
virtual ~A2AKernel(void) = default;
virtual void operator()(A2AMatrixSet<T> &m, const Field *left, const Field *right,
const unsigned int orthogDim, double &time) = 0;
virtual double flops(const unsigned int blockSizei, const unsigned int blockSizej) = 0;
virtual double bytes(const unsigned int blockSizei, const unsigned int blockSizej) = 0;
};
/******************************************************************************
* Class to handle A2A matrix block HDF5 I/O *
******************************************************************************/
template <typename T>
template <typename T, typename MetadataType>
class A2AMatrixIo
{
public:
// constructors
A2AMatrixIo(void) = default;
A2AMatrixIo(std::string filename, std::string dataname,
const unsigned int nt, const unsigned int ni,
const unsigned int nj);
// destructor
~A2AMatrixIo(void) = default;
// file allocation
template <typename MetadataType>
void initFile(const MetadataType &d, const unsigned int chunkSize);
// block I/O
void saveBlock(const T *data, const unsigned int i, const unsigned int j,
const unsigned int blockSizei, const unsigned int blockSizej);
void saveBlock(const A2AMatrixSet<T> &m, const unsigned int ext, const unsigned int str,
const unsigned int i, const unsigned int j);
private:
std::string filename_, dataname_;
unsigned int nt_, ni_, nj_;
};
/******************************************************************************
* Wrapper for A2A matrix block computation *
******************************************************************************/
template <typename T, typename Field, typename MetadataType, typename TIo = T>
class A2AMatrixBlockComputation
{
private:
struct IoHelper
{
A2AMatrixIo<TIo> io;
MetadataType md;
unsigned int e, s, i, j;
};
typedef std::function<std::string(const unsigned int, const unsigned int)> FilenameFn;
typedef std::function<MetadataType(const unsigned int, const unsigned int)> MetadataFn;
public:
// constructor
A2AMatrixBlockComputation(GridBase *grid,
const unsigned int orthogDim,
const unsigned int next,
const unsigned int nstr,
const unsigned int blockSize,
const unsigned int cacheBlockSize,
TimerArray *tArray = nullptr);
// execution
void execute(const std::vector<Field> &left,
const std::vector<Field> &right,
A2AKernel<T, Field> &kernel,
const FilenameFn &ionameFn,
const FilenameFn &filenameFn,
const MetadataFn &metadataFn);
private:
// I/O handler
void saveBlock(const A2AMatrixSet<TIo> &m, IoHelper &h);
private:
TimerArray *tArray_;
GridBase *grid_;
unsigned int orthogDim_, nt_, next_, nstr_, blockSize_, cacheBlockSize_;
Vector<T> mCache_;
Vector<TIo> mBuf_;
std::vector<IoHelper> nodeIo_;
};
/******************************************************************************
* A2AMatrixIo template implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename T>
A2AMatrixIo<T>::A2AMatrixIo(std::string filename, std::string dataname,
const unsigned int nt, const unsigned int ni,
const unsigned int nj)
template <typename T, typename MetadataType>
A2AMatrixIo<T, MetadataType>::A2AMatrixIo(std::string filename,
std::string dataname,
const unsigned int nt,
const unsigned int ni,
const unsigned int nj)
: filename_(filename), dataname_(dataname)
, nt_(nt), ni_(ni), nj_(nj)
{}
// file allocation /////////////////////////////////////////////////////////////
template <typename T>
template <typename MetadataType>
void A2AMatrixIo<T>::initFile(const MetadataType &d, const unsigned int chunkSize)
template <typename T, typename MetadataType>
void A2AMatrixIo<T, MetadataType>::initFile(const MetadataType &d, const unsigned int chunkSize)
{
#ifdef HAVE_HDF5
std::vector<hsize_t> dim = {static_cast<hsize_t>(nt_),
@ -176,19 +85,18 @@ void A2AMatrixIo<T>::initFile(const MetadataType &d, const unsigned int chunkSiz
push(reader, dataname_);
auto &group = reader.getGroup();
plist.setChunk(chunk.size(), chunk.data());
dataset = group.createDataSet(HADRONS_A2AM_NAME, Hdf5Type<T>::type(), dataspace, plist);
dataset = group.createDataSet("data", Hdf5Type<T>::type(), dataspace, plist);
#else
HADRONS_ERROR(Implementation, "all-to-all matrix I/O needs HDF5 library");
#endif
}
// block I/O ///////////////////////////////////////////////////////////////////
template <typename T>
void A2AMatrixIo<T>::saveBlock(const T *data,
const unsigned int i,
const unsigned int j,
const unsigned int blockSizei,
const unsigned int blockSizej)
template <typename T, typename MetadataType>
void A2AMatrixIo<T, MetadataType>::saveBlock(const T *data,
const unsigned int i,
const unsigned int j,
const unsigned int blockSizei,
const unsigned int blockSizej)
{
#ifdef HAVE_HDF5
Hdf5Reader reader(filename_);
@ -203,7 +111,7 @@ void A2AMatrixIo<T>::saveBlock(const T *data,
push(reader, dataname_);
auto &group = reader.getGroup();
dataset = group.openDataSet(HADRONS_A2AM_NAME);
dataset = group.openDataSet("data");
dataspace = dataset.getSpace();
dataspace.selectHyperslab(H5S_SELECT_SET, count.data(), offset.data(),
stride.data(), block.data());
@ -213,193 +121,6 @@ void A2AMatrixIo<T>::saveBlock(const T *data,
#endif
}
template <typename T>
void A2AMatrixIo<T>::saveBlock(const A2AMatrixSet<T> &m,
const unsigned int ext, const unsigned int str,
const unsigned int i, const unsigned int j)
{
unsigned int blockSizei = m.dimension(3);
unsigned int blockSizej = m.dimension(4);
unsigned int nstr = m.dimension(1);
size_t offset = (ext*nstr + str)*nt_*blockSizei*blockSizej;
saveBlock(m.data() + offset, i, j, blockSizei, blockSizej);
}
/******************************************************************************
* A2AMatrixBlockComputation template implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename T, typename Field, typename MetadataType, typename TIo>
A2AMatrixBlockComputation<T, Field, MetadataType, TIo>
::A2AMatrixBlockComputation(GridBase *grid,
const unsigned int orthogDim,
const unsigned int next,
const unsigned int nstr,
const unsigned int blockSize,
const unsigned int cacheBlockSize,
TimerArray *tArray)
: grid_(grid), nt_(grid->GlobalDimensions()[orthogDim]), orthogDim_(orthogDim)
, next_(next), nstr_(nstr), blockSize_(blockSize), cacheBlockSize_(cacheBlockSize)
, tArray_(tArray)
{
mCache_.resize(nt_*next_*nstr_*cacheBlockSize_*cacheBlockSize_);
mBuf_.resize(nt_*next_*nstr_*blockSize_*blockSize_);
}
#define START_TIMER(name) if (tArray_) tArray_->startTimer(name)
#define STOP_TIMER(name) if (tArray_) tArray_->stopTimer(name)
#define GET_TIMER(name) ((tArray_ != nullptr) ? tArray_->getDTimer(name) : 0.)
// execution ///////////////////////////////////////////////////////////////////
template <typename T, typename Field, typename MetadataType, typename TIo>
void A2AMatrixBlockComputation<T, Field, MetadataType, TIo>
::execute(const std::vector<Field> &left, const std::vector<Field> &right,
A2AKernel<T, Field> &kernel, const FilenameFn &ionameFn,
const FilenameFn &filenameFn, const MetadataFn &metadataFn)
{
//////////////////////////////////////////////////////////////////////////
// i,j is first loop over blockSize_ factors
// ii,jj is second loop over cacheBlockSize_ factors for high perf contractions
// iii,jjj are loops within cacheBlock
// Total index is sum of these i+ii+iii etc...
//////////////////////////////////////////////////////////////////////////
int N_i = left.size();
int N_j = right.size();
double flops, bytes, t_kernel;
double nodes = grid_->NodeCount();
int NBlock_i = N_i/blockSize_ + (((N_i % blockSize_) != 0) ? 1 : 0);
int NBlock_j = N_j/blockSize_ + (((N_j % blockSize_) != 0) ? 1 : 0);
for(int i=0;i<N_i;i+=blockSize_)
for(int j=0;j<N_j;j+=blockSize_)
{
// Get the W and V vectors for this block^2 set of terms
int N_ii = MIN(N_i-i,blockSize_);
int N_jj = MIN(N_j-j,blockSize_);
A2AMatrixSet<TIo> mBlock(mBuf_.data(), next_, nstr_, nt_, N_ii, N_jj);
LOG(Message) << "All-to-all matrix block "
<< j/blockSize_ + NBlock_j*i/blockSize_ + 1
<< "/" << NBlock_i*NBlock_j << " [" << i <<" .. "
<< i+N_ii-1 << ", " << j <<" .. " << j+N_jj-1 << "]"
<< std::endl;
// Series of cache blocked chunks of the contractions within this block
flops = 0.0;
bytes = 0.0;
t_kernel = 0.0;
for(int ii=0;ii<N_ii;ii+=cacheBlockSize_)
for(int jj=0;jj<N_jj;jj+=cacheBlockSize_)
{
double t;
int N_iii = MIN(N_ii-ii,cacheBlockSize_);
int N_jjj = MIN(N_jj-jj,cacheBlockSize_);
A2AMatrixSet<T> mCacheBlock(mCache_.data(), next_, nstr_, nt_, N_iii, N_jjj);
START_TIMER("kernel");
kernel(mCacheBlock, &left[i+ii], &right[j+jj], orthogDim_, t);
STOP_TIMER("kernel");
t_kernel += t;
flops += kernel.flops(N_iii, N_jjj);
bytes += kernel.bytes(N_iii, N_jjj);
START_TIMER("cache copy");
parallel_for_nest5(int e =0;e<next_;e++)
for(int s =0;s< nstr_;s++)
for(int t =0;t< nt_;t++)
for(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
{
mBlock(e,s,t,ii+iii,jj+jjj) = mCacheBlock(e,s,t,iii,jjj);
}
STOP_TIMER("cache copy");
}
// perf
LOG(Message) << "Kernel perf " << flops/t_kernel/1.0e3/nodes
<< " Gflop/s/node " << std::endl;
LOG(Message) << "Kernel perf " << bytes/t_kernel*1.0e6/1024/1024/1024/nodes
<< " GB/s/node " << std::endl;
// IO
double blockSize, ioTime;
unsigned int myRank = grid_->ThisRank(), nRank = grid_->RankCount();
LOG(Message) << "Writing block to disk" << std::endl;
ioTime = -GET_TIMER("IO: write block");
START_TIMER("IO: total");
makeFileDir(filenameFn(0, 0), grid_);
#ifdef HADRONS_A2AM_PARALLEL_IO
grid_->Barrier();
// make task list for current node
nodeIo_.clear();
for(int f = myRank; f < next_*nstr_; f += nRank)
{
IoHelper h;
h.i = i;
h.j = j;
h.e = f/nstr_;
h.s = f % nstr_;
h.io = A2AMatrixIo<TIo>(filenameFn(h.e, h.s),
ionameFn(h.e, h.s), nt_, N_i, N_j);
h.md = metadataFn(h.e, h.s);
nodeIo_.push_back(h);
}
// parallel IO
for (auto &h: nodeIo_)
{
saveBlock(mBlock, h);
}
grid_->Barrier();
#else
// serial IO, for testing purposes only
for(int e = 0; e < next_; e++)
for(int s = 0; s < nstr_; s++)
{
IoHelper h;
h.i = i;
h.j = j;
h.e = e;
h.s = s;
h.io = A2AMatrixIo<TIo>(filenameFn(h.e, h.s),
ionameFn(h.e, h.s), nt_, N_i, N_j);
h.md = metadataFn(h.e, h.s);
saveBlock(mfBlock, h);
}
#endif
STOP_TIMER("IO: total");
blockSize = static_cast<double>(next_*nstr_*nt_*N_ii*N_jj*sizeof(TIo));
ioTime += GET_TIMER("IO: write block");
LOG(Message) << "HDF5 IO done " << sizeString(blockSize) << " in "
<< ioTime << " us ("
<< blockSize/ioTime*1.0e6/1024/1024
<< " MB/s)" << std::endl;
}
}
// I/O handler /////////////////////////////////////////////////////////////////
template <typename T, typename Field, typename MetadataType, typename TIo>
void A2AMatrixBlockComputation<T, Field, MetadataType, TIo>
::saveBlock(const A2AMatrixSet<TIo> &m, IoHelper &h)
{
if ((h.i == 0) and (h.j == 0))
{
START_TIMER("IO: file creation");
h.io.initFile(h.md, blockSize_);
STOP_TIMER("IO: file creation");
}
START_TIMER("IO: write block");
h.io.saveBlock(m, h.e, h.s, h.i, h.j);
STOP_TIMER("IO: write block");
}
#undef START_TIMER
#undef STOP_TIMER
#undef GET_TIMER
END_HADRONS_NAMESPACE
#endif // A2A_Matrix_hpp_

View File

@ -36,7 +36,7 @@ See the full license in the file "LICENSE" in the top level distribution directo
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Class to generate V & W all-to-all vectors *
* Classes to generate V & W all-to-all vectors *
******************************************************************************/
template <typename FImpl>
class A2AVectorsSchurDiagTwo
@ -70,42 +70,6 @@ private:
SchurDiagTwoOperator<FMat, FermionField> op_;
};
/******************************************************************************
* Methods for V & W all-to-all vectors I/O *
******************************************************************************/
class A2AVectorsIo
{
public:
struct Record: Serializable
{
GRID_SERIALIZABLE_CLASS_MEMBERS(Record,
unsigned int, index);
Record(void): index(0) {}
};
public:
template <typename Field>
static void write(const std::string fileStem, std::vector<Field> &vec,
const bool multiFile, const int trajectory = -1);
template <typename Field>
static void read(std::vector<Field> &vec, const std::string fileStem,
const bool multiFile, const int trajectory = -1);
private:
static inline std::string vecFilename(const std::string stem, const int traj,
const bool multiFile)
{
std::string t = (traj < 0) ? "" : ("." + std::to_string(traj));
if (multiFile)
{
return stem + t;
}
else
{
return stem + t + ".bin";
}
}
};
/******************************************************************************
* A2AVectorsSchurDiagTwo template implementation *
******************************************************************************/
@ -253,90 +217,6 @@ void A2AVectorsSchurDiagTwo<FImpl>::makeHighModeW5D(FermionField &wout_4d,
}
}
/******************************************************************************
* all-to-all vectors I/O template implementation *
******************************************************************************/
template <typename Field>
void A2AVectorsIo::write(const std::string fileStem, std::vector<Field> &vec,
const bool multiFile, const int trajectory)
{
Record record;
GridBase *grid = vec[0]._grid;
ScidacWriter binWriter(grid->IsBoss());
std::string filename = vecFilename(fileStem, trajectory, multiFile);
if (multiFile)
{
std::string fullFilename;
for (unsigned int i = 0; i < vec.size(); ++i)
{
fullFilename = filename + "/elem" + std::to_string(i) + ".bin";
LOG(Message) << "Writing vector " << i << std::endl;
makeFileDir(fullFilename, grid);
binWriter.open(fullFilename);
record.index = i;
binWriter.writeScidacFieldRecord(vec[i], record);
binWriter.close();
}
}
else
{
makeFileDir(filename, grid);
binWriter.open(filename);
for (unsigned int i = 0; i < vec.size(); ++i)
{
LOG(Message) << "Writing vector " << i << std::endl;
record.index = i;
binWriter.writeScidacFieldRecord(vec[i], record);
}
binWriter.close();
}
}
template <typename Field>
void A2AVectorsIo::read(std::vector<Field> &vec, const std::string fileStem,
const bool multiFile, const int trajectory)
{
Record record;
ScidacReader binReader;
std::string filename = vecFilename(fileStem, trajectory, multiFile);
if (multiFile)
{
std::string fullFilename;
for (unsigned int i = 0; i < vec.size(); ++i)
{
fullFilename = filename + "/elem" + std::to_string(i) + ".bin";
LOG(Message) << "Reading vector " << i << std::endl;
binReader.open(fullFilename);
binReader.readScidacFieldRecord(vec[i], record);
binReader.close();
if (record.index != i)
{
HADRONS_ERROR(Io, "vector index mismatch");
}
}
}
else
{
binReader.open(filename);
for (unsigned int i = 0; i < vec.size(); ++i)
{
LOG(Message) << "Reading vector " << i << std::endl;
binReader.readScidacFieldRecord(vec[i], record);
if (record.index != i)
{
HADRONS_ERROR(Io, "vector index mismatch");
}
}
binReader.close();
}
}
END_HADRONS_NAMESPACE
#endif // A2A_Vectors_hpp_

View File

@ -7,7 +7,6 @@ Source file: Hadrons/DilutedNoise.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Vera Guelpers <Vera.Guelpers@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -77,22 +76,6 @@ private:
unsigned int nt_;
};
template <typename FImpl>
class FullVolumeSpinColorDiagonalNoise: public DilutedNoise<FImpl>
{
public:
typedef typename FImpl::FermionField FermionField;
public:
// constructor/destructor
FullVolumeSpinColorDiagonalNoise(GridCartesian *g, unsigned int n_src);
virtual ~FullVolumeSpinColorDiagonalNoise(void) = default;
// generate noise
virtual void generateNoise(GridParallelRNG &rng);
private:
unsigned int nSrc_;
};
/******************************************************************************
* DilutedNoise template implementation *
******************************************************************************/
@ -203,47 +186,6 @@ void TimeDilutedSpinColorDiagonalNoise<FImpl>::generateNoise(GridParallelRNG &rn
}
}
/******************************************************************************
* FullVolumeSpinColorDiagonalNoise template implementation *
******************************************************************************/
template <typename FImpl>
FullVolumeSpinColorDiagonalNoise<FImpl>::
FullVolumeSpinColorDiagonalNoise(GridCartesian *g, unsigned int nSrc)
: DilutedNoise<FImpl>(g, nSrc*Ns*FImpl::Dimension), nSrc_(nSrc)
{}
template <typename FImpl>
void FullVolumeSpinColorDiagonalNoise<FImpl>::generateNoise(GridParallelRNG &rng)
{
typedef decltype(peekColour((*this)[0], 0)) SpinField;
auto &noise = *this;
auto g = this->getGrid();
auto nd = g->GlobalDimensions().size();
auto nc = FImpl::Dimension;
Complex shift(1., 1.);
LatticeComplex eta(g);
SpinField etas(g);
unsigned int i = 0;
bernoulli(rng, eta);
eta = (2.*eta - shift)*(1./::sqrt(2.));
for (unsigned int n = 0; n < nSrc_; ++n)
{
for (unsigned int s = 0; s < Ns; ++s)
{
etas = zero;
pokeSpin(etas, eta, s);
for (unsigned int c = 0; c < nc; ++c)
{
noise[i] = zero;
pokeColour(noise[i], etas, c);
i++;
}
}
}
}
END_HADRONS_NAMESPACE
#endif // Hadrons_DilutedNoise_hpp_

View File

@ -34,12 +34,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <ftw.h>
#include <unistd.h>
#ifdef DV_DEBUG
#define DV_DEBUG_MSG(dv, stream) LOG(Debug) << "diskvector " << (dv) << ": " << stream << std::endl
#else
#define DV_DEBUG_MSG(dv, stream)
#endif
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
@ -62,7 +56,9 @@ public:
// write to disk and cache
T &operator=(const T &obj) const
{
DV_DEBUG_MSG(&master_, "writing to " << i_);
#ifdef DV_DEBUG
LOG(Debug) << "diskvector " << &master_ << ": writing to " << i_ << std::endl;
#endif
master_.cacheInsert(i_, obj);
master_.save(master_.filename(i_), obj);
@ -86,8 +82,6 @@ public:
virtual ~DiskVectorBase(void);
const T & operator[](const unsigned int i) const;
RwAccessHelper operator[](const unsigned int i);
double hitRatio(void) const;
void resetStat(void);
private:
virtual void load(T &obj, const std::string filename) const = 0;
virtual void save(const std::string filename, const T &obj) const = 0;
@ -99,7 +93,6 @@ private:
private:
std::string dirname_;
unsigned int size_, cacheSize_;
double access_{0.}, hit_{0.};
bool clean_;
// using pointers to allow modifications when class is const
// semantic: const means data unmodified, but cache modification allowed
@ -122,7 +115,6 @@ private:
read(reader, basename(filename), obj);
}
virtual void save(const std::string filename, const T &obj) const
{
Writer writer(filename);
@ -131,76 +123,13 @@ private:
}
};
/******************************************************************************
* Specialisation for Eigen matrices *
******************************************************************************/
template <typename T>
using EigenDiskVectorMat = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>;
template <typename T>
class EigenDiskVector: public DiskVectorBase<EigenDiskVectorMat<T>>
{
public:
using DiskVectorBase<EigenDiskVectorMat<T>>::DiskVectorBase;
typedef EigenDiskVectorMat<T> Matrix;
public:
T operator()(const unsigned int i, const Eigen::Index j,
const Eigen::Index k) const
{
return (*this)[i](j, k);
}
private:
virtual void load(EigenDiskVectorMat<T> &obj, const std::string filename) const
{
std::ifstream f(filename, std::ios::binary);
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
Eigen::Index nRow, nCol;
size_t matSize;
double t;
f.read(reinterpret_cast<char *>(hash.data()), hash.size()*sizeof(unsigned char));
f.read(reinterpret_cast<char *>(&nRow), sizeof(Eigen::Index));
f.read(reinterpret_cast<char *>(&nCol), sizeof(Eigen::Index));
obj.resize(nRow, nCol);
matSize = nRow*nCol*sizeof(T);
t = -usecond();
f.read(reinterpret_cast<char *>(obj.data()), matSize);
t += usecond();
DV_DEBUG_MSG(this, "Eigen read " << matSize/t*1.0e6/1024/1024 << " MB/s");
auto check = GridChecksum::sha256(obj.data(), matSize);
DV_DEBUG_MSG(this, "Eigen sha256 " << GridChecksum::sha256_string(check));
if (hash != check)
{
HADRONS_ERROR(Io, "checksum failed")
}
}
virtual void save(const std::string filename, const EigenDiskVectorMat<T> &obj) const
{
std::ofstream f(filename, std::ios::binary);
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
Eigen::Index nRow, nCol;
size_t matSize;
double t;
nRow = obj.rows();
nCol = obj.cols();
matSize = nRow*nCol*sizeof(T);
hash = GridChecksum::sha256(obj.data(), matSize);
DV_DEBUG_MSG(this, "Eigen sha256 " << GridChecksum::sha256_string(hash));
f.write(reinterpret_cast<char *>(hash.data()), hash.size()*sizeof(unsigned char));
f.write(reinterpret_cast<char *>(&nRow), sizeof(Eigen::Index));
f.write(reinterpret_cast<char *>(&nCol), sizeof(Eigen::Index));
t = -usecond();
f.write(reinterpret_cast<const char *>(obj.data()), matSize);
t += usecond();
DV_DEBUG_MSG(this, "Eigen write " << matSize/t*1.0e6/1024/1024 << " MB/s");
}
};
/******************************************************************************
* DiskVectorBase implementation *
******************************************************************************/
#ifdef DV_DEBUG
#define DV_DEBUG_MSG(stream) LOG(Debug) << "diskvector " << this << ": " << stream << std::endl
#endif
template <typename T>
DiskVectorBase<T>::DiskVectorBase(const std::string dirname,
const unsigned int size,
@ -231,29 +160,28 @@ DiskVectorBase<T>::~DiskVectorBase(void)
template <typename T>
const T & DiskVectorBase<T>::operator[](const unsigned int i) const
{
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
auto &cache = *cachePtr_;
auto &loads = *loadsPtr_;
DV_DEBUG_MSG(this, "accessing " << i << " (RO)");
DV_DEBUG_MSG("accessing " << i << " (RO)");
if (i >= size_)
{
HADRONS_ERROR(Size, "index out of range");
}
const_cast<double &>(access_)++;
if (cache.find(i) == cache.end())
{
// cache miss
DV_DEBUG_MSG(this, "cache miss");
DV_DEBUG_MSG("cache miss");
fetch(i);
}
else
{
DV_DEBUG_MSG(this, "cache hit");
DV_DEBUG_MSG("cache hit");
auto pos = std::find(loads.begin(), loads.end(), i);
const_cast<double &>(hit_)++;
loads.erase(pos);
loads.push_back(i);
}
@ -265,7 +193,7 @@ const T & DiskVectorBase<T>::operator[](const unsigned int i) const
{
msg += std::to_string(p) + " ";
}
DV_DEBUG_MSG(this, "in cache: " << msg);
DV_DEBUG_MSG("in cache: " << msg);
#endif
return cache.at(i);
@ -274,7 +202,7 @@ const T & DiskVectorBase<T>::operator[](const unsigned int i) const
template <typename T>
typename DiskVectorBase<T>::RwAccessHelper DiskVectorBase<T>::operator[](const unsigned int i)
{
DV_DEBUG_MSG(this, "accessing " << i << " (RW)");
DV_DEBUG_MSG("accessing " << i << " (RW)");
if (i >= size_)
{
@ -284,19 +212,6 @@ typename DiskVectorBase<T>::RwAccessHelper DiskVectorBase<T>::operator[](const u
return RwAccessHelper(*this, i);
}
template <typename T>
double DiskVectorBase<T>::hitRatio(void) const
{
return hit_/access_;
}
template <typename T>
void DiskVectorBase<T>::resetStat(void)
{
access_ = 0.;
hit_ = 0.;
}
template <typename T>
std::string DiskVectorBase<T>::filename(const unsigned int i) const
{
@ -311,7 +226,7 @@ void DiskVectorBase<T>::evict(void) const
if (cache.size() >= cacheSize_)
{
DV_DEBUG_MSG(this, "evicting " << loads.front());
DV_DEBUG_MSG("evicting " << loads.front());
cache.erase(loads.front());
loads.pop_front();
}
@ -324,7 +239,7 @@ void DiskVectorBase<T>::fetch(const unsigned int i) const
auto &loads = *loadsPtr_;
struct stat s;
DV_DEBUG_MSG(this, "loading " << i << " from disk");
DV_DEBUG_MSG("loading " << i << " from disk");
evict();
if(stat(filename(i).c_str(), &s) != 0)
@ -352,7 +267,7 @@ void DiskVectorBase<T>::cacheInsert(const unsigned int i, const T &obj) const
{
msg += std::to_string(p) + " ";
}
DV_DEBUG_MSG(this, "in cache: " << msg);
DV_DEBUG_MSG("in cache: " << msg);
#endif
}

View File

@ -39,12 +39,12 @@ BEGIN_HADRONS_NAMESPACE
#define HADRONS_DEFAULT_LANCZOS_NBASIS 60
#endif
#define HADRONS_DUMP_EP_METADATA(record) \
#define HADRONS_DUMP_EP_METADATA \
LOG(Message) << "Eigenpack metadata:" << std::endl;\
LOG(Message) << "* operator" << std::endl;\
LOG(Message) << (record).operatorXml << std::endl;\
LOG(Message) << record.operatorXml << std::endl;\
LOG(Message) << "* solver" << std::endl;\
LOG(Message) << (record).solverXml << std::endl;
LOG(Message) << record.solverXml << std::endl;
struct PackRecord
{
@ -59,9 +59,66 @@ struct VecRecord: Serializable
VecRecord(void): index(0), eval(0.) {}
};
namespace EigenPackIo
template <typename F>
class EigenPack
{
inline void readHeader(PackRecord &record, ScidacReader &binReader)
public:
typedef F Field;
public:
std::vector<RealD> eval;
std::vector<F> evec;
PackRecord record;
public:
EigenPack(void) = default;
virtual ~EigenPack(void) = default;
EigenPack(const size_t size, GridBase *grid)
{
resize(size, grid);
}
void resize(const size_t size, GridBase *grid)
{
eval.resize(size);
evec.resize(size, grid);
}
virtual void read(const std::string fileStem, const bool multiFile, const int traj = -1)
{
if (multiFile)
{
for(int k = 0; k < evec.size(); ++k)
{
basicReadSingle(evec[k], eval[k], evecFilename(fileStem, k, traj), k);
if (k == 0)
{
HADRONS_DUMP_EP_METADATA;
}
}
}
else
{
basicRead(evec, eval, evecFilename(fileStem, -1, traj), evec.size());
HADRONS_DUMP_EP_METADATA;
}
}
virtual void write(const std::string fileStem, const bool multiFile, const int traj = -1)
{
if (multiFile)
{
for(int k = 0; k < evec.size(); ++k)
{
basicWriteSingle(evecFilename(fileStem, k, traj), evec[k], eval[k], k);
}
}
else
{
basicWrite(evecFilename(fileStem, -1, traj), evec, eval, evec.size());
}
}
static void readHeader(PackRecord &record, ScidacReader &binReader)
{
std::string recordXml;
@ -73,75 +130,13 @@ namespace EigenPackIo
xmlReader.readCurrentSubtree(record.solverXml);
}
template <typename T, typename TIo = T>
void readElement(T &evec, RealD &eval, const unsigned int index,
ScidacReader &binReader, TIo *ioBuf = nullptr)
template <typename T>
static void readElement(T &evec, VecRecord &vecRecord, ScidacReader &binReader)
{
VecRecord vecRecord;
LOG(Message) << "Reading eigenvector " << index << std::endl;
if (ioBuf == nullptr)
{
binReader.readScidacFieldRecord(evec, vecRecord);
}
else
{
binReader.readScidacFieldRecord(*ioBuf, vecRecord);
precisionChange(evec, *ioBuf);
}
if (vecRecord.index != index)
{
HADRONS_ERROR(Io, "Eigenvector " + std::to_string(index) + " has a"
+ " wrong index (expected " + std::to_string(vecRecord.index)
+ ")");
}
eval = vecRecord.eval;
binReader.readScidacFieldRecord(evec, vecRecord);
}
template <typename T, typename TIo = T>
static void readPack(std::vector<T> &evec, std::vector<RealD> &eval,
PackRecord &record, const std::string filename,
const unsigned int size, bool multiFile,
GridBase *gridIo = nullptr)
{
std::unique_ptr<TIo> ioBuf{nullptr};
ScidacReader binReader;
if (typeHash<T>() != typeHash<TIo>())
{
if (gridIo == nullptr)
{
HADRONS_ERROR(Definition,
"I/O type different from vector type but null I/O grid passed");
}
ioBuf.reset(new TIo(gridIo));
}
if (multiFile)
{
std::string fullFilename;
for(int k = 0; k < size; ++k)
{
fullFilename = filename + "/v" + std::to_string(k) + ".bin";
binReader.open(fullFilename);
readHeader(record, binReader);
readElement(evec[k], eval[k], k, binReader, ioBuf.get());
binReader.close();
}
}
else
{
binReader.open(filename);
readHeader(record, binReader);
for(int k = 0; k < size; ++k)
{
readElement(evec[k], eval[k], k, binReader, ioBuf.get());
}
binReader.close();
}
}
inline void writeHeader(ScidacWriter &binWriter, PackRecord &record)
static void writeHeader(ScidacWriter &binWriter, PackRecord &record)
{
XmlWriter xmlWriter("", "eigenPackPar");
@ -150,217 +145,165 @@ namespace EigenPackIo
binWriter.writeLimeObject(1, 1, xmlWriter, "parameters", SCIDAC_FILE_XML);
}
template <typename T, typename TIo = T>
void writeElement(ScidacWriter &binWriter, T &evec, RealD &eval,
const unsigned int index, TIo *ioBuf,
T *testBuf = nullptr)
template <typename T>
static void writeElement(ScidacWriter &binWriter, T &evec, VecRecord &vecRecord)
{
VecRecord vecRecord;
LOG(Message) << "Writing eigenvector " << index << std::endl;
vecRecord.eval = eval;
vecRecord.index = index;
if ((ioBuf == nullptr) || (testBuf == nullptr))
{
binWriter.writeScidacFieldRecord(evec, vecRecord, DEFAULT_ASCII_PREC);
}
else
{
precisionChange(*ioBuf, evec);
precisionChange(*testBuf, *ioBuf);
*testBuf -= evec;
LOG(Message) << "Precision diff norm^2 " << norm2(*testBuf) << std::endl;
binWriter.writeScidacFieldRecord(*ioBuf, vecRecord, DEFAULT_ASCII_PREC);
}
}
template <typename T, typename TIo = T>
static void writePack(const std::string filename, std::vector<T> &evec,
std::vector<RealD> &eval, PackRecord &record,
const unsigned int size, bool multiFile,
GridBase *gridIo = nullptr)
{
GridBase *grid = evec[0]._grid;
std::unique_ptr<TIo> ioBuf{nullptr};
std::unique_ptr<T> testBuf{nullptr};
ScidacWriter binWriter(grid->IsBoss());
if (typeHash<T>() != typeHash<TIo>())
{
if (gridIo == nullptr)
{
HADRONS_ERROR(Definition,
"I/O type different from vector type but null I/O grid passed");
}
ioBuf.reset(new TIo(gridIo));
testBuf.reset(new T(grid));
}
if (multiFile)
{
std::string fullFilename;
for(int k = 0; k < size; ++k)
{
fullFilename = filename + "/v" + std::to_string(k) + ".bin";
makeFileDir(fullFilename, grid);
binWriter.open(fullFilename);
writeHeader(binWriter, record);
writeElement(binWriter, evec[k], eval[k], k, ioBuf.get(), testBuf.get());
binWriter.close();
}
}
else
{
makeFileDir(filename, grid);
binWriter.open(filename);
writeHeader(binWriter, record);
for(int k = 0; k < size; ++k)
{
writeElement(binWriter, evec[k], eval[k], k, ioBuf.get(), testBuf.get());
}
binWriter.close();
}
}
}
template <typename F>
class BaseEigenPack
{
public:
typedef F Field;
public:
std::vector<RealD> eval;
std::vector<F> evec;
PackRecord record;
public:
BaseEigenPack(void) = default;
BaseEigenPack(const size_t size, GridBase *grid)
{
resize(size, grid);
}
virtual ~BaseEigenPack(void) = default;
void resize(const size_t size, GridBase *grid)
{
eval.resize(size);
evec.resize(size, grid);
}
};
template <typename F, typename FIo = F>
class EigenPack: public BaseEigenPack<F>
{
public:
typedef F Field;
typedef FIo FieldIo;
public:
EigenPack(void) = default;
virtual ~EigenPack(void) = default;
EigenPack(const size_t size, GridBase *grid, GridBase *gridIo = nullptr)
: BaseEigenPack<F>(size, grid)
{
if (typeHash<F>() != typeHash<FIo>())
{
if (gridIo == nullptr)
{
HADRONS_ERROR(Definition,
"I/O type different from vector type but null I/O grid passed");
}
}
gridIo_ = gridIo;
}
virtual void read(const std::string fileStem, const bool multiFile, const int traj = -1)
{
EigenPackIo::readPack<F, FIo>(this->evec, this->eval, this->record,
evecFilename(fileStem, traj, multiFile),
this->evec.size(), multiFile, gridIo_);
HADRONS_DUMP_EP_METADATA(this->record);
}
virtual void write(const std::string fileStem, const bool multiFile, const int traj = -1)
{
EigenPackIo::writePack<F, FIo>(evecFilename(fileStem, traj, multiFile),
this->evec, this->eval, this->record,
this->evec.size(), multiFile, gridIo_);
binWriter.writeScidacFieldRecord(evec, vecRecord, DEFAULT_ASCII_PREC);
}
protected:
std::string evecFilename(const std::string stem, const int traj, const bool multiFile)
std::string evecFilename(const std::string stem, const int vec, const int traj)
{
std::string t = (traj < 0) ? "" : ("." + std::to_string(traj));
if (multiFile)
{
return stem + t;
}
else
if (vec == -1)
{
return stem + t + ".bin";
}
else
{
return stem + t + "/v" + std::to_string(vec) + ".bin";
}
}
template <typename T>
void basicRead(std::vector<T> &evec, std::vector<RealD> &eval,
const std::string filename, const unsigned int size)
{
ScidacReader binReader;
binReader.open(filename);
readHeader(record, binReader);
for(int k = 0; k < size; ++k)
{
VecRecord vecRecord;
LOG(Message) << "Reading eigenvector " << k << std::endl;
readElement(evec[k], vecRecord, binReader);
if (vecRecord.index != k)
{
HADRONS_ERROR(Io, "Eigenvector " + std::to_string(k) + " has a"
+ " wrong index (expected " + std::to_string(vecRecord.index)
+ ") in file '" + filename + "'");
}
eval[k] = vecRecord.eval;
}
binReader.close();
}
template <typename T>
void basicReadSingle(T &evec, RealD &eval, const std::string filename,
const unsigned int index)
{
ScidacReader binReader;
VecRecord vecRecord;
binReader.open(filename);
readHeader(record, binReader);
LOG(Message) << "Reading eigenvector " << index << std::endl;
readElement(evec, vecRecord, binReader);
if (vecRecord.index != index)
{
HADRONS_ERROR(Io, "Eigenvector " + std::to_string(index) + " has a"
+ " wrong index (expected " + std::to_string(vecRecord.index)
+ ") in file '" + filename + "'");
}
eval = vecRecord.eval;
binReader.close();
}
template <typename T>
void basicWrite(const std::string filename, std::vector<T> &evec,
const std::vector<RealD> &eval, const unsigned int size)
{
ScidacWriter binWriter(evec[0]._grid->IsBoss());
makeFileDir(filename, evec[0]._grid);
binWriter.open(filename);
writeHeader(binWriter, record);
for(int k = 0; k < size; ++k)
{
VecRecord vecRecord;
vecRecord.index = k;
vecRecord.eval = eval[k];
LOG(Message) << "Writing eigenvector " << k << std::endl;
writeElement(binWriter, evec[k], vecRecord);
}
binWriter.close();
}
template <typename T>
void basicWriteSingle(const std::string filename, T &evec,
const RealD eval, const unsigned int index)
{
ScidacWriter binWriter(evec._grid->IsBoss());
VecRecord vecRecord;
makeFileDir(filename, evec._grid);
binWriter.open(filename);
writeHeader(binWriter, record);
vecRecord.index = index;
vecRecord.eval = eval;
LOG(Message) << "Writing eigenvector " << index << std::endl;
writeElement(binWriter, evec, vecRecord);
binWriter.close();
}
protected:
GridBase *gridIo_;
};
template <typename FineF, typename CoarseF,
typename FineFIo = FineF, typename CoarseFIo = CoarseF>
class CoarseEigenPack: public EigenPack<FineF, FineFIo>
template <typename FineF, typename CoarseF>
class CoarseEigenPack: public EigenPack<FineF>
{
public:
typedef CoarseF CoarseField;
std::vector<CoarseF> evecCoarse;
typedef CoarseF CoarseField;
public:
std::vector<RealD> evalCoarse;
std::vector<CoarseF> evecCoarse;
public:
CoarseEigenPack(void) = default;
virtual ~CoarseEigenPack(void) = default;
CoarseEigenPack(const size_t sizeFine, const size_t sizeCoarse,
GridBase *gridFine, GridBase *gridCoarse,
GridBase *gridFineIo = nullptr,
GridBase *gridCoarseIo = nullptr)
GridBase *gridFine, GridBase *gridCoarse)
{
if (typeHash<FineF>() != typeHash<FineFIo>())
{
if (gridFineIo == nullptr)
{
HADRONS_ERROR(Definition,
"Fine I/O type different from vector type but null fine I/O grid passed");
}
}
if (typeHash<CoarseF>() != typeHash<CoarseFIo>())
{
if (gridCoarseIo == nullptr)
{
HADRONS_ERROR(Definition,
"Coarse I/O type different from vector type but null coarse I/O grid passed");
}
}
this->gridIo_ = gridFineIo;
gridCoarseIo_ = gridCoarseIo;
resize(sizeFine, sizeCoarse, gridFine, gridCoarse);
}
void resize(const size_t sizeFine, const size_t sizeCoarse,
GridBase *gridFine, GridBase *gridCoarse)
{
EigenPack<FineF, FineFIo>::resize(sizeFine, gridFine);
EigenPack<FineF>::resize(sizeFine, gridFine);
evalCoarse.resize(sizeCoarse);
evecCoarse.resize(sizeCoarse, gridCoarse);
}
void readFine(const std::string fileStem, const bool multiFile, const int traj = -1)
{
EigenPack<FineF, FineFIo>::read(fileStem + "_fine", multiFile, traj);
if (multiFile)
{
for(int k = 0; k < this->evec.size(); ++k)
{
this->basicReadSingle(this->evec[k], this->eval[k], this->evecFilename(fileStem + "_fine", k, traj), k);
}
}
else
{
this->basicRead(this->evec, this->eval, this->evecFilename(fileStem + "_fine", -1, traj), this->evec.size());
}
}
void readCoarse(const std::string fileStem, const bool multiFile, const int traj = -1)
{
PackRecord dummy;
EigenPackIo::readPack<CoarseF, CoarseFIo>(evecCoarse, evalCoarse, dummy,
this->evecFilename(fileStem + "_coarse", traj, multiFile),
evecCoarse.size(), multiFile, gridCoarseIo_);
if (multiFile)
{
for(int k = 0; k < evecCoarse.size(); ++k)
{
this->basicReadSingle(evecCoarse[k], evalCoarse[k], this->evecFilename(fileStem + "_coarse", k, traj), k);
}
}
else
{
this->basicRead(evecCoarse, evalCoarse, this->evecFilename(fileStem + "_coarse", -1, traj), evecCoarse.size());
}
}
virtual void read(const std::string fileStem, const bool multiFile, const int traj = -1)
@ -371,14 +314,32 @@ public:
void writeFine(const std::string fileStem, const bool multiFile, const int traj = -1)
{
EigenPack<FineF, FineFIo>::write(fileStem + "_fine", multiFile, traj);
if (multiFile)
{
for(int k = 0; k < this->evec.size(); ++k)
{
this->basicWriteSingle(this->evecFilename(fileStem + "_fine", k, traj), this->evec[k], this->eval[k], k);
}
}
else
{
this->basicWrite(this->evecFilename(fileStem + "_fine", -1, traj), this->evec, this->eval, this->evec.size());
}
}
void writeCoarse(const std::string fileStem, const bool multiFile, const int traj = -1)
{
EigenPackIo::writePack<CoarseF, CoarseFIo>(this->evecFilename(fileStem + "_coarse", traj, multiFile),
evecCoarse, evalCoarse, this->record,
evecCoarse.size(), multiFile, gridCoarseIo_);
if (multiFile)
{
for(int k = 0; k < evecCoarse.size(); ++k)
{
this->basicWriteSingle(this->evecFilename(fileStem + "_coarse", k, traj), evecCoarse[k], evalCoarse[k], k);
}
}
else
{
this->basicWrite(this->evecFilename(fileStem + "_coarse", -1, traj), evecCoarse, evalCoarse, evecCoarse.size());
}
}
virtual void write(const std::string fileStem, const bool multiFile, const int traj = -1)
@ -386,25 +347,16 @@ public:
writeFine(fileStem, multiFile, traj);
writeCoarse(fileStem, multiFile, traj);
}
private:
GridBase *gridCoarseIo_;
};
template <typename FImpl>
using BaseFermionEigenPack = BaseEigenPack<typename FImpl::FermionField>;
using FermionEigenPack = EigenPack<typename FImpl::FermionField>;
template <typename FImpl, typename FImplIo = FImpl>
using FermionEigenPack = EigenPack<typename FImpl::FermionField, typename FImplIo::FermionField>;
template <typename FImpl, int nBasis, typename FImplIo = FImpl>
template <typename FImpl, int nBasis>
using CoarseFermionEigenPack = CoarseEigenPack<
typename FImpl::FermionField,
typename LocalCoherenceLanczos<typename FImpl::SiteSpinor,
typename FImpl::SiteComplex,
nBasis>::CoarseField,
typename FImplIo::FermionField,
typename LocalCoherenceLanczos<typename FImplIo::SiteSpinor,
typename FImplIo::SiteComplex,
nBasis>::CoarseField>;
#undef HADRONS_DUMP_EP_METADATA

View File

@ -11,7 +11,6 @@ libHadrons_a_SOURCES = \
Exceptions.cc \
Global.cc \
Module.cc \
TimerArray.cc \
VirtualMachine.cc
libHadrons_adir = $(includedir)/Hadrons
nobase_libHadrons_a_HEADERS = \
@ -32,5 +31,4 @@ nobase_libHadrons_a_HEADERS = \
Modules.hpp \
ModuleFactory.hpp \
Solver.hpp \
TimerArray.hpp \
VirtualMachine.hpp

View File

@ -66,6 +66,101 @@ void ModuleBase::operator()(void)
stopAllTimers();
}
// timers //////////////////////////////////////////////////////////////////////
void ModuleBase::startTimer(const std::string &name)
{
if (!name.empty())
{
timer_[name].Start();
}
}
GridTime ModuleBase::getTimer(const std::string &name)
{
GridTime t;
if (!name.empty())
{
try
{
bool running = timer_.at(name).isRunning();
if (running) stopTimer(name);
t = timer_.at(name).Elapsed();
if (running) startTimer(name);
}
catch (std::out_of_range &)
{
t = GridTime::zero();
}
}
else
{
t = GridTime::zero();
}
return t;
}
double ModuleBase::getDTimer(const std::string &name)
{
return static_cast<double>(getTimer(name).count());
}
void ModuleBase::startCurrentTimer(const std::string &name)
{
if (!name.empty())
{
stopCurrentTimer();
startTimer(name);
currentTimer_ = name;
}
}
void ModuleBase::stopTimer(const std::string &name)
{
if (timer_.at(name).isRunning())
{
timer_.at(name).Stop();
}
}
void ModuleBase::stopCurrentTimer(void)
{
if (!currentTimer_.empty())
{
stopTimer(currentTimer_);
currentTimer_ = "";
}
}
void ModuleBase::stopAllTimers(void)
{
for (auto &t: timer_)
{
stopTimer(t.first);
}
currentTimer_ = "";
}
void ModuleBase::resetTimers(void)
{
timer_.clear();
currentTimer_ = "";
}
std::map<std::string, GridTime> ModuleBase::getTimings(void)
{
std::map<std::string, GridTime> timing;
for (auto &t: timer_)
{
timing[t.first] = t.second.Elapsed();
}
return timing;
}
std::string ModuleBase::makeSeedString(void)
{
std::string seed;

View File

@ -30,7 +30,6 @@ See the full license in the file "LICENSE" in the top level distribution directo
#define Hadrons_Module_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/TimerArray.hpp>
#include <Hadrons/VirtualMachine.hpp>
BEGIN_HADRONS_NAMESPACE
@ -153,7 +152,7 @@ if (env().getGrid()->IsBoss() and !ioStem.empty())\
* Module class *
******************************************************************************/
// base class
class ModuleBase: public TimerArray
class ModuleBase
{
public:
// constructor
@ -181,6 +180,16 @@ public:
virtual void execute(void) = 0;
// execution
void operator()(void);
// timers
void startTimer(const std::string &name);
GridTime getTimer(const std::string &name);
double getDTimer(const std::string &name);
void startCurrentTimer(const std::string &name);
void stopTimer(const std::string &name);
void stopCurrentTimer(void);
void stopAllTimers(void);
void resetTimers(void);
std::map<std::string, GridTime> getTimings(void);
protected:
// environment shortcut
DEFINE_ENV_ALIAS;

View File

@ -1,6 +1,6 @@
#include <Hadrons/Modules/MContraction/Baryon.hpp>
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonField.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp>
#include <Hadrons/Modules/MContraction/Meson.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonian.hpp>
#include <Hadrons/Modules/MContraction/WeakHamiltonianNonEye.hpp>
@ -16,7 +16,6 @@
#include <Hadrons/Modules/MSource/Wall.hpp>
#include <Hadrons/Modules/MSource/Z2.hpp>
#include <Hadrons/Modules/MSource/SeqConserved.hpp>
#include <Hadrons/Modules/MSource/Momentum.hpp>
#include <Hadrons/Modules/MSink/Smear.hpp>
#include <Hadrons/Modules/MSink/Point.hpp>
#include <Hadrons/Modules/MSolver/MixedPrecisionRBPrecCG.hpp>
@ -28,11 +27,9 @@
#include <Hadrons/Modules/MGauge/StoutSmearing.hpp>
#include <Hadrons/Modules/MGauge/Unit.hpp>
#include <Hadrons/Modules/MGauge/Random.hpp>
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
#include <Hadrons/Modules/MGauge/FundtoHirep.hpp>
#include <Hadrons/Modules/MGauge/StochEm.hpp>
#include <Hadrons/Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
#include <Hadrons/Modules/MUtilities/PrecisionCast.hpp>
#include <Hadrons/Modules/MUtilities/RandomVectors.hpp>
#include <Hadrons/Modules/MUtilities/TestSeqGamma.hpp>
@ -43,9 +40,6 @@
#include <Hadrons/Modules/MScalar/ScalarVP.hpp>
#include <Hadrons/Modules/MScalar/Scalar.hpp>
#include <Hadrons/Modules/MScalar/ChargedProp.hpp>
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
#include <Hadrons/Modules/MNPR/Amputate.hpp>
#include <Hadrons/Modules/MNPR/FourQuark.hpp>
#include <Hadrons/Modules/MAction/DWF.hpp>
#include <Hadrons/Modules/MAction/MobiusDWF.hpp>
#include <Hadrons/Modules/MAction/Wilson.hpp>
@ -56,6 +50,7 @@
#include <Hadrons/Modules/MScalarSUN/TwoPointNPR.hpp>
#include <Hadrons/Modules/MScalarSUN/ShiftProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/Div.hpp>
#include <Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp>
#include <Hadrons/Modules/MScalarSUN/TrMag.hpp>
#include <Hadrons/Modules/MScalarSUN/EMT.hpp>
#include <Hadrons/Modules/MScalarSUN/TwoPoint.hpp>
@ -66,7 +61,6 @@
#include <Hadrons/Modules/MScalarSUN/TrKinetic.hpp>
#include <Hadrons/Modules/MIO/LoadEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadNersc.hpp>
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
#include <Hadrons/Modules/MIO/LoadCosmHol.hpp>
#include <Hadrons/Modules/MIO/LoadCoarseEigenPack.hpp>
#include <Hadrons/Modules/MIO/LoadBinary.hpp>

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TDWF<FIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TDWF<FIMPLF>;
#endif

View File

@ -73,9 +73,7 @@ protected:
};
MODULE_REGISTER_TMP(DWF, TDWF<FIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(DWFF, TDWF<FIMPLF>, MAction);
#endif
/******************************************************************************
* DWF template implementation *

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TMobiusDWF<FIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TMobiusDWF<FIMPLF>;
#endif

View File

@ -72,9 +72,7 @@ public:
};
MODULE_REGISTER_TMP(MobiusDWF, TMobiusDWF<FIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(MobiusDWFF, TMobiusDWF<FIMPLF>, MAction);
#endif
/******************************************************************************
* TMobiusDWF implementation *

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TScaledDWF<FIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TScaledDWF<FIMPLF>;
#endif

View File

@ -71,9 +71,7 @@ public:
};
MODULE_REGISTER_TMP(ScaledDWF, TScaledDWF<FIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(ScaledDWFF, TScaledDWF<FIMPLF>, MAction);
#endif
/******************************************************************************
* TScaledDWF implementation *

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TWilson<FIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TWilson<FIMPLF>;
#endif

View File

@ -71,9 +71,7 @@ protected:
};
MODULE_REGISTER_TMP(Wilson, TWilson<FIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(WilsonF, TWilson<FIMPLF>, MAction);
#endif
/******************************************************************************
* TWilson template implementation *

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TWilsonClover<FIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TWilsonClover<FIMPLF>;
#endif

View File

@ -75,9 +75,7 @@ public:
};
MODULE_REGISTER_TMP(WilsonClover, TWilsonClover<FIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(WilsonCloverF, TWilsonClover<FIMPLF>, MAction);
#endif
/******************************************************************************
* TWilsonClover template implementation *

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MAction;
template class Grid::Hadrons::MAction::TZMobiusDWF<ZFIMPL>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MAction::TZMobiusDWF<ZFIMPLF>;
#endif

View File

@ -73,9 +73,7 @@ public:
};
MODULE_REGISTER_TMP(ZMobiusDWF, TZMobiusDWF<ZFIMPL>, MAction);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(ZMobiusDWFF, TZMobiusDWF<ZFIMPLF>, MAction);
#endif
/******************************************************************************
* TZMobiusDWF implementation *

View File

@ -1,250 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AAslashField.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MContraction_A2AAslashField_hpp_
#define Hadrons_MContraction_A2AAslashField_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/A2AMatrix.hpp>
#ifndef ASF_IO_TYPE
#define ASF_IO_TYPE ComplexF
#endif
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* A2AAslashField *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MContraction)
class A2AAslashFieldPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AAslashFieldPar,
int, cacheBlock,
int, block,
std::string, left,
std::string, right,
std::string, output,
std::vector<std::string>, emField);
};
class A2AAslashFieldMetadata: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AAslashFieldMetadata,
std::string, emFieldName);
};
template <typename T, typename FImpl>
class AslashFieldKernel: public A2AKernel<T, typename FImpl::FermionField>
{
public:
typedef typename FImpl::FermionField FermionField;
public:
AslashFieldKernel(const std::vector<LatticeComplex> &emB0,
const std::vector<LatticeComplex> &emB1,
GridBase *grid)
: emB0_(emB0), emB1_(emB1), grid_(grid)
{
vol_ = 1.;
for (auto &d: grid_->GlobalDimensions())
{
vol_ *= d;
}
}
virtual ~AslashFieldKernel(void) = default;
virtual void operator()(A2AMatrixSet<T> &m, const FermionField *left,
const FermionField *right,
const unsigned int orthogDim, double &t)
{
A2Autils<FImpl>::AslashField(m, left, right, emB0_, emB1_, orthogDim, &t);
}
virtual double flops(const unsigned int blockSizei, const unsigned int blockSizej)
{
return 0.;
}
virtual double bytes(const unsigned int blockSizei, const unsigned int blockSizej)
{
return 0.;
}
private:
const std::vector<LatticeComplex> &emB0_, &emB1_;
GridBase *grid_;
double vol_;
};
template <typename FImpl, typename PhotonImpl>
class TA2AAslashField: public Module<A2AAslashFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
typedef typename PhotonImpl::GaugeField EmField;
typedef A2AMatrixBlockComputation<Complex,
FermionField,
A2AAslashFieldMetadata,
ASF_IO_TYPE> Computation;
typedef AslashFieldKernel<Complex, FImpl> Kernel;
public:
// constructor
TA2AAslashField(const std::string name);
// destructor
virtual ~TA2AAslashField(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(A2AAslashField, ARG(TA2AAslashField<FIMPL, PhotonR>), MContraction);
/******************************************************************************
* TA2AAslashField implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl, typename PhotonImpl>
TA2AAslashField<FImpl, PhotonImpl>::TA2AAslashField(const std::string name)
: Module<A2AAslashFieldPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl, typename PhotonImpl>
std::vector<std::string> TA2AAslashField<FImpl, PhotonImpl>::getInput(void)
{
std::vector<std::string> in = par().emField;
in.push_back(par().left);
in.push_back(par().right);
return in;
}
template <typename FImpl, typename PhotonImpl>
std::vector<std::string> TA2AAslashField<FImpl, PhotonImpl>::getOutput(void)
{
std::vector<std::string> out = {};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl, typename PhotonImpl>
void TA2AAslashField<FImpl, PhotonImpl>::setup(void)
{
envTmp(Computation, "computation", 1, envGetGrid(FermionField),
env().getNd() - 1, par().emField.size(), 1, par().block,
par().cacheBlock, this);
envTmp(std::vector<ComplexField>, "B0", 1,
par().emField.size(), envGetGrid(ComplexField));
envTmp(std::vector<ComplexField>, "B1", 1,
par().emField.size(), envGetGrid(ComplexField));
envTmpLat(ComplexField, "Amu");
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl, typename PhotonImpl>
void TA2AAslashField<FImpl, PhotonImpl>::execute(void)
{
auto &left = envGet(std::vector<FermionField>, par().left);
auto &right = envGet(std::vector<FermionField>, par().right);
int nt = env().getDim().back();
int N_i = left.size();
int N_j = right.size();
int nem = par().emField.size();
int block = par().block;
int cacheBlock = par().cacheBlock;
LOG(Message) << "Computing all-to-all A-slash fields" << std::endl;
LOG(Message) << "Left: '" << par().left << "' Right: '" << par().right << "'" << std::endl;
LOG(Message) << "EM fields:" << std::endl;
for (auto &name: par().emField)
{
LOG(Message) << " " << name << std::endl;
}
LOG(Message) << "A-slash field size: " << nt << "*" << N_i << "*" << N_j
<< " (filesize " << sizeString(nt*N_i*N_j*sizeof(ASF_IO_TYPE))
<< "/EM field)" << std::endl;
// preparing "B" complexified fields
startTimer("Complexify EM fields");
envGetTmp(std::vector<ComplexField>, B0);
envGetTmp(std::vector<ComplexField>, B1);
for (unsigned int i = 0; i < par().emField.size(); ++i)
{
auto &A = envGet(EmField, par().emField[i]);
envGetTmp(ComplexField, Amu);
B0[i] = peekLorentz(A, 0);
B0[i] += timesI(peekLorentz(A, 1));
B1[i] = peekLorentz(A, 2);
B1[i] += timesI(peekLorentz(A, 3));
}
stopTimer("Complexify EM fields");
// I/O name & metadata lambdas
auto ionameFn = [this](const unsigned int em, const unsigned int dummy)
{
return par().emField[em];
};
auto filenameFn = [this, &ionameFn](const unsigned int em, const unsigned int dummy)
{
return par().output + "." + std::to_string(vm().getTrajectory())
+ "/" + ionameFn(em, dummy) + ".h5";
};
auto metadataFn = [this](const unsigned int em, const unsigned int dummy)
{
A2AAslashFieldMetadata md;
md.emFieldName = par().emField[em];
return md;
};
// executing computation
Kernel kernel(B0, B1, envGetGrid(FermionField));
envGetTmp(Computation, computation);
computation.execute(left, right, kernel, ionameFn, filenameFn, metadataFn);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MContraction_A2AAslashField_hpp_

View File

@ -33,3 +33,4 @@ using namespace Hadrons;
using namespace MContraction;
template class Grid::Hadrons::MContraction::TA2AMesonField<FIMPL>;
template class Grid::Hadrons::MContraction::TA2AMesonField<ZFIMPL>;

View File

@ -33,8 +33,12 @@ See the full license in the file "LICENSE" in the top level distribution directo
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/A2AVectors.hpp>
#include <Hadrons/A2AMatrix.hpp>
#include <Hadrons/Modules/MSolver/A2AVectors.hpp>
#include <Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp>
#define MF_PARALLEL_IO
#ifndef MF_IO_TYPE
#define MF_IO_TYPE ComplexF
#endif
@ -52,8 +56,8 @@ public:
GRID_SERIALIZABLE_CLASS_MEMBERS(A2AMesonFieldPar,
int, cacheBlock,
int, block,
std::string, left,
std::string, right,
std::string, v,
std::string, w,
std::string, output,
std::string, gammas,
std::vector<std::string>, mom);
@ -67,59 +71,22 @@ public:
Gamma::Algebra, gamma);
};
template <typename T, typename FImpl>
class MesonFieldKernel: public A2AKernel<T, typename FImpl::FermionField>
{
public:
typedef typename FImpl::FermionField FermionField;
public:
MesonFieldKernel(const std::vector<Gamma::Algebra> &gamma,
const std::vector<LatticeComplex> &mom,
GridBase *grid)
: gamma_(gamma), mom_(mom), grid_(grid)
{
vol_ = 1.;
for (auto &d: grid_->GlobalDimensions())
{
vol_ *= d;
}
}
virtual ~MesonFieldKernel(void) = default;
virtual void operator()(A2AMatrixSet<T> &m, const FermionField *left,
const FermionField *right,
const unsigned int orthogDim, double &t)
{
A2Autils<FImpl>::MesonField(m, left, right, gamma_, mom_, orthogDim, &t);
}
virtual double flops(const unsigned int blockSizei, const unsigned int blockSizej)
{
return vol_*(2*8.0+6.0+8.0*mom_.size())*blockSizei*blockSizej*gamma_.size();
}
virtual double bytes(const unsigned int blockSizei, const unsigned int blockSizej)
{
return vol_*(12.0*sizeof(T))*blockSizei*blockSizej
+ vol_*(2.0*sizeof(T)*mom_.size())*blockSizei*blockSizej*gamma_.size();
}
private:
const std::vector<Gamma::Algebra> &gamma_;
const std::vector<LatticeComplex> &mom_;
GridBase *grid_;
double vol_;
};
template <typename FImpl>
class TA2AMesonField : public Module<A2AMesonFieldPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
typedef A2AMatrixBlockComputation<Complex,
FermionField,
A2AMesonFieldMetadata,
MF_IO_TYPE> Computation;
typedef MesonFieldKernel<Complex, FImpl> Kernel;
SOLVER_TYPE_ALIASES(FImpl,);
typedef Eigen::TensorMap<Eigen::Tensor<Complex, 5, Eigen::RowMajor>> MesonField;
typedef Eigen::TensorMap<Eigen::Tensor<MF_IO_TYPE, 5, Eigen::RowMajor>> MesonFieldIo;
typedef A2AMatrixIo<MF_IO_TYPE, A2AMesonFieldMetadata> MatrixIo;
struct IoHelper
{
MatrixIo io;
A2AMesonFieldMetadata metadata;
size_t offset;
unsigned int i, j, blockSizei, blockSizej;
};
public:
// constructor
TA2AMesonField(const std::string name);
@ -133,13 +100,20 @@ public:
// execution
virtual void execute(void);
private:
bool hasPhase_{false};
std::string momphName_;
std::vector<Gamma::Algebra> gamma_;
std::vector<std::vector<Real>> mom_;
// IO
std::string ioname(const unsigned int m, const unsigned int g) const;
std::string filename(const unsigned int m, const unsigned int g) const;
void saveBlock(const MF_IO_TYPE *data, IoHelper &h);
private:
bool hasPhase_{false};
std::string momphName_;
std::vector<Gamma::Algebra> gamma_;
std::vector<std::vector<Real>> mom_;
std::vector<IoHelper> nodeIo_;
};
MODULE_REGISTER(A2AMesonField, ARG(TA2AMesonField<FIMPL>), MContraction);
MODULE_REGISTER(ZA2AMesonField, ARG(TA2AMesonField<ZFIMPL>), MContraction);
/******************************************************************************
* TA2AMesonField implementation *
@ -156,7 +130,7 @@ TA2AMesonField<FImpl>::TA2AMesonField(const std::string name)
template <typename FImpl>
std::vector<std::string> TA2AMesonField<FImpl>::getInput(void)
{
std::vector<std::string> in = {par().left, par().right};
std::vector<std::string> in = {par().v, par().w};
return in;
}
@ -212,31 +186,34 @@ void TA2AMesonField<FImpl>::setup(void)
}
mom_.push_back(p);
}
envCache(std::vector<ComplexField>, momphName_, 1,
par().mom.size(), envGetGrid(ComplexField));
envTmpLat(ComplexField, "coor");
envTmp(Computation, "computation", 1, envGetGrid(FermionField),
env().getNd() - 1, mom_.size(), gamma_.size(), par().block,
par().cacheBlock, this);
// preallocate memory for meson field block
auto tgp = env().getDim().back()*gamma_.size()*mom_.size();
envTmp(Vector<MF_IO_TYPE>, "mfBuf", 1, tgp*par().block*par().block);
envTmp(Vector<Complex>, "mfCache", 1, tgp*par().cacheBlock*par().cacheBlock);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TA2AMesonField<FImpl>::execute(void)
{
auto &left = envGet(std::vector<FermionField>, par().left);
auto &right = envGet(std::vector<FermionField>, par().right);
auto &v = envGet(std::vector<FermionField>, par().v);
auto &w = envGet(std::vector<FermionField>, par().w);
int nt = env().getDim().back();
int N_i = left.size();
int N_j = right.size();
int N_i = w.size();
int N_j = v.size();
int ngamma = gamma_.size();
int nmom = mom_.size();
int block = par().block;
int cacheBlock = par().cacheBlock;
LOG(Message) << "Computing all-to-all meson fields" << std::endl;
LOG(Message) << "Left: '" << par().left << "' Right: '" << par().right << "'" << std::endl;
LOG(Message) << "W: '" << par().w << "' V: '" << par().v << "'" << std::endl;
LOG(Message) << "Momenta:" << std::endl;
for (auto &p: mom_)
{
@ -251,6 +228,9 @@ void TA2AMesonField<FImpl>::execute(void)
<< " (filesize " << sizeString(nt*N_i*N_j*sizeof(MF_IO_TYPE))
<< "/momentum/bilinear)" << std::endl;
///////////////////////////////////////////////
// Momentum setup
///////////////////////////////////////////////
auto &ph = envGet(std::vector<ComplexField>, momphName_);
if (!hasPhase_)
@ -273,43 +253,189 @@ void TA2AMesonField<FImpl>::execute(void)
hasPhase_ = true;
stopTimer("Momentum phases");
}
//////////////////////////////////////////////////////////////////////////
// i,j is first loop over SchurBlock factors reusing 5D matrices
// ii,jj is second loop over cacheBlock factors for high perf contractoin
// iii,jjj are loops within cacheBlock
// Total index is sum of these i+ii+iii etc...
//////////////////////////////////////////////////////////////////////////
double flops;
double bytes;
double vol = env().getVolume();
double t_kernel = 0.0;
double nodes = env().getGrid()->NodeCount();
double tot_kernel;
auto ionameFn = [this](const unsigned int m, const unsigned int g)
envGetTmp(Vector<MF_IO_TYPE>, mfBuf);
envGetTmp(Vector<Complex>, mfCache);
double t0 = usecond();
int NBlock_i = N_i/block + (((N_i % block) != 0) ? 1 : 0);
int NBlock_j = N_j/block + (((N_j % block) != 0) ? 1 : 0);
for(int i=0;i<N_i;i+=block)
for(int j=0;j<N_j;j+=block)
{
std::stringstream ss;
// Get the W and V vectors for this block^2 set of terms
int N_ii = MIN(N_i-i,block);
int N_jj = MIN(N_j-j,block);
ss << gamma_[g] << "_";
for (unsigned int mu = 0; mu < mom_[m].size(); ++mu)
LOG(Message) << "Meson field block "
<< j/block + NBlock_j*i/block + 1
<< "/" << NBlock_i*NBlock_j << " [" << i <<" .. "
<< i+N_ii-1 << ", " << j <<" .. " << j+N_jj-1 << "]"
<< std::endl;
MesonFieldIo mfBlock(mfBuf.data(),nmom,ngamma,nt,N_ii,N_jj);
// Series of cache blocked chunks of the contractions within this block
flops = 0.0;
bytes = 0.0;
for(int ii=0;ii<N_ii;ii+=cacheBlock)
for(int jj=0;jj<N_jj;jj+=cacheBlock)
{
ss << mom_[m][mu] << ((mu == mom_[m].size() - 1) ? "" : "_");
int N_iii = MIN(N_ii-ii,cacheBlock);
int N_jjj = MIN(N_jj-jj,cacheBlock);
MesonField mfCacheBlock(mfCache.data(),nmom,ngamma,nt,N_iii,N_jjj);
startTimer("contraction: total");
makeMesonFieldBlock(mfCacheBlock, &w[i+ii], &v[j+jj], gamma_, ph,
env().getNd() - 1, this);
stopTimer("contraction: total");
// flops for general N_c & N_s
flops += vol * ( 2 * 8.0 + 6.0 + 8.0*nmom) * N_iii*N_jjj*ngamma;
bytes += vol * (12.0 * sizeof(Complex) ) * N_iii*N_jjj
+ vol * ( 2.0 * sizeof(Complex) *nmom ) * N_iii*N_jjj* ngamma;
startTimer("cache copy");
parallel_for_nest5(int m =0;m< nmom;m++)
for(int g =0;g< ngamma;g++)
for(int t =0;t< nt;t++)
for(int iii=0;iii< N_iii;iii++)
for(int jjj=0;jjj< N_jjj;jjj++)
{
mfBlock(m,g,t,ii+iii,jj+jjj) = mfCacheBlock(m,g,t,iii,jjj);
}
stopTimer("cache copy");
}
return ss.str();
};
// perf
tot_kernel = getDTimer("contraction: colour trace & mom.")
+ getDTimer("contraction: local space sum");
t_kernel = tot_kernel - t_kernel;
LOG(Message) << "Kernel perf " << flops/t_kernel/1.0e3/nodes
<< " Gflop/s/node " << std::endl;
LOG(Message) << "Kernel perf " << bytes/t_kernel*1.0e6/1024/1024/1024/nodes
<< " GB/s/node " << std::endl;
t_kernel = tot_kernel;
auto filenameFn = [this, &ionameFn](const unsigned int m, const unsigned int g)
{
return par().output + "." + std::to_string(vm().getTrajectory())
+ "/" + ionameFn(m, g) + ".h5";
};
auto metadataFn = [this](const unsigned int m, const unsigned int g)
{
A2AMesonFieldMetadata md;
for (auto pmu: mom_[m])
// IO
if (!par().output.empty())
{
md.momentum.push_back(pmu);
}
md.gamma = gamma_[g];
double blockSize, ioTime;
unsigned int myRank = env().getGrid()->ThisRank(),
nRank = env().getGrid()->RankCount();
return md;
};
LOG(Message) << "Writing block to disk" << std::endl;
ioTime = -getDTimer("IO: write block");
startTimer("IO: total");
makeFileDir(filename(0, 0), env().getGrid());
#ifdef MF_PARALLEL_IO
env().getGrid()->Barrier();
nodeIo_.clear();
for(int f = myRank; f < nmom*ngamma; f += nRank)
{
const unsigned int m = f/ngamma, g = f % ngamma;
IoHelper h;
Kernel kernel(gamma_, ph, envGetGrid(FermionField));
h.io = MatrixIo(filename(m, g), ioname(m, g), nt, N_i, N_j);
for (auto pmu: mom_[m])
{
h.metadata.momentum.push_back(pmu);
}
h.metadata.gamma = gamma_[g];
h.i = i;
h.j = j;
h.blockSizei = mfBlock.dimension(3);
h.blockSizej = mfBlock.dimension(4);
h.offset = (m*ngamma + g)*nt*h.blockSizei*h.blockSizej;
nodeIo_.push_back(h);
}
// parallel IO
for (auto &h: nodeIo_)
{
saveBlock(mfBlock.data(), h);
}
env().getGrid()->Barrier();
#else
// serial IO
for(int m = 0; m < nmom; m++)
for(int g = 0; g < ngamma; g++)
{
IoHelper h;
envGetTmp(Computation, computation);
computation.execute(left, right, kernel, ionameFn, filenameFn, metadataFn);
h.io = MatrixIo(filename(m, g), ioname(m, g), nt, N_i, N_j);
for (auto pmu: mom_[m])
{
h.metadata.momentum.push_back(pmu);
}
h.metadata.gamma = gamma_[g];
h.i = i;
h.j = j;
h.blockSizei = mfBlock.dimension(3);
h.blockSizej = mfBlock.dimension(4);
h.offset = (m*ngamma + g)*nt*h.blockSizei*h.blockSizej;
saveBlock(mfBlock.data(), h);
}
#endif
stopTimer("IO: total");
blockSize = static_cast<double>(nmom*ngamma*nt*N_ii*N_jj*sizeof(MF_IO_TYPE));
ioTime += getDTimer("IO: write block");
LOG(Message) << "HDF5 IO done " << sizeString(blockSize) << " in "
<< ioTime << " us ("
<< blockSize/ioTime*1.0e6/1024/1024
<< " MB/s)" << std::endl;
}
}
}
// IO
template <typename FImpl>
std::string TA2AMesonField<FImpl>::ioname(unsigned int m, unsigned int g) const
{
std::stringstream ss;
ss << gamma_[g] << "_";
for (unsigned int mu = 0; mu < mom_[m].size(); ++mu)
{
ss << mom_[m][mu] << ((mu == mom_[m].size() - 1) ? "" : "_");
}
return ss.str();
}
template <typename FImpl>
std::string TA2AMesonField<FImpl>::filename(unsigned int m, unsigned int g) const
{
return par().output + "." + std::to_string(vm().getTrajectory())
+ "/" + ioname(m, g) + ".h5";
}
template <typename FImpl>
void TA2AMesonField<FImpl>::saveBlock(const MF_IO_TYPE *data, IoHelper &h)
{
if ((h.i == 0) and (h.j == 0))
{
startTimer("IO: file creation");
h.io.initFile(h.metadata, par().block);
stopTimer("IO: file creation");
}
startTimer("IO: write block");
h.io.saveBlock(data + h.offset, h.i, h.j, h.blockSizei, h.blockSizej);
stopTimer("IO: write block");
}
END_MODULE_NAMESPACE

View File

@ -0,0 +1,224 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AMesonFieldKernels.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MContraction_A2AMesonFieldKernels_hpp_
#define Hadrons_MContraction_A2AMesonFieldKernels_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Grid/Eigen/unsupported/CXX11/Tensor>
BEGIN_HADRONS_NAMESPACE
BEGIN_MODULE_NAMESPACE(MContraction)
////////////////////////////////////////////////////////////////////////////////
// Cache blocked arithmetic routine
// Could move to Grid ???
////////////////////////////////////////////////////////////////////////////////
template <typename Field, typename MesonField>
void makeMesonFieldBlock(MesonField &mat,
const Field *lhs_wi,
const Field *rhs_vj,
std::vector<Gamma::Algebra> gamma,
const std::vector<LatticeComplex> &mom,
int orthogdim,
ModuleBase *caller = nullptr)
{
typedef typename Field::vector_object vobj;
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
typedef iSpinMatrix<vector_type> SpinMatrix_v;
typedef iSpinMatrix<scalar_type> SpinMatrix_s;
int Lblock = mat.dimension(3);
int Rblock = mat.dimension(4);
GridBase *grid = lhs_wi[0]._grid;
const int Nd = grid->_ndimension;
const int Nsimd = grid->Nsimd();
int Nt = grid->GlobalDimensions()[orthogdim];
int Ngamma = gamma.size();
int Nmom = mom.size();
int fd=grid->_fdimensions[orthogdim];
int ld=grid->_ldimensions[orthogdim];
int rd=grid->_rdimensions[orthogdim];
// will locally sum vectors first
// sum across these down to scalars
// splitting the SIMD
int MFrvol = rd*Lblock*Rblock*Nmom;
int MFlvol = ld*Lblock*Rblock*Nmom;
Vector<SpinMatrix_v > lvSum(MFrvol);
parallel_for (int r = 0; r < MFrvol; r++)
{
lvSum[r] = zero;
}
Vector<SpinMatrix_s > lsSum(MFlvol);
parallel_for (int r = 0; r < MFlvol; r++)
{
lsSum[r]=scalar_type(0.0);
}
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
if (caller) caller->startTimer("contraction: colour trace & mom.");
// Nested parallelism would be ok
// Wasting cores here. Test case r
parallel_for(int r=0;r<rd;r++)
{
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++)
for(int b=0;b<e2;b++)
{
int ss= so+n*stride+b;
for(int i=0;i<Lblock;i++)
{
auto left = conjugate(lhs_wi[i]._odata[ss]);
for(int j=0;j<Rblock;j++)
{
SpinMatrix_v vv;
auto right = rhs_vj[j]._odata[ss];
for(int s1=0;s1<Ns;s1++)
for(int s2=0;s2<Ns;s2++)
{
vv()(s1,s2)() = left()(s2)(0) * right()(s1)(0)
+ left()(s2)(1) * right()(s1)(1)
+ left()(s2)(2) * right()(s1)(2);
}
// After getting the sitewise product do the mom phase loop
int base = Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*r;
for ( int m=0;m<Nmom;m++)
{
int idx = m+base;
auto phase = mom[m]._odata[ss];
mac(&lvSum[idx],&vv,&phase);
}
}
}
}
}
if (caller) caller->stopTimer("contraction: colour trace & mom.");
// Sum across simd lanes in the plane, breaking out orthog dir.
if (caller) caller->startTimer("contraction: local space sum");
parallel_for(int rt=0;rt<rd;rt++)
{
std::vector<int> icoor(Nd);
std::vector<SpinMatrix_s> extracted(Nsimd);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_rdx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*rt;
extract(lvSum[ij_rdx],extracted);
for(int idx=0;idx<Nsimd;idx++)
{
grid->iCoorFromIindex(icoor,idx);
int ldx = rt+icoor[orthogdim]*rd;
int ij_ldx = m+Nmom*i+Nmom*Lblock*j+Nmom*Lblock*Rblock*ldx;
lsSum[ij_ldx]=lsSum[ij_ldx]+extracted[idx];
}
}
}
if (caller) caller->stopTimer("contraction: local space sum");
// ld loop and local only??
if (caller) caller->startTimer("contraction: spin trace");
int pd = grid->_processors[orthogdim];
int pc = grid->_processor_coor[orthogdim];
parallel_for_nest2(int lt=0;lt<ld;lt++)
{
for(int pt=0;pt<pd;pt++)
{
int t = lt + pt*ld;
if (pt == pc)
{
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int m=0;m<Nmom;m++)
{
int ij_dx = m+Nmom*i + Nmom*Lblock * j + Nmom*Lblock * Rblock * lt;
for(int mu=0;mu<Ngamma;mu++)
{
// this is a bit slow
mat(m,mu,t,i,j) = trace(lsSum[ij_dx]*Gamma(gamma[mu]));
}
}
}
else
{
const scalar_type zz(0.0);
for(int i=0;i<Lblock;i++)
for(int j=0;j<Rblock;j++)
for(int mu=0;mu<Ngamma;mu++)
for(int m=0;m<Nmom;m++)
{
mat(m,mu,t,i,j) =zz;
}
}
}
}
if (caller) caller->stopTimer("contraction: spin trace");
////////////////////////////////////////////////////////////////////
// This global sum is taking as much as 50% of time on 16 nodes
// Vector size is 7 x 16 x 32 x 16 x 16 x sizeof(complex) = 2MB - 60MB depending on volume
// Healthy size that should suffice
////////////////////////////////////////////////////////////////////
if (caller) caller->startTimer("contraction: global sum");
grid->GlobalSumVector(&mat(0,0,0,0,0),Nmom*Ngamma*Nt*Lblock*Rblock);
if (caller) caller->stopTimer("contraction: global sum");
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif //Hadrons_MContraction_A2AMesonField_hpp_

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/GaugeFix.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MGauge/GaugeFix.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MGauge;
template class Grid::Hadrons::MGauge::TGaugeFix<GIMPL>;

View File

@ -1,135 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MGauge/GaugeFix.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MGaugeFix_hpp_
#define Hadrons_MGaugeFix_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/qcd/utils/GaugeFix.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Fix gauge *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MGauge)
class GaugeFixPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(GaugeFixPar,
std::string, gauge,
Real, alpha,
int, maxiter,
Real, Omega_tol,
Real, Phi_tol,
bool, Fourier);
};
template <typename GImpl>
class TGaugeFix: public Module<GaugeFixPar>
{
public:
GAUGE_TYPE_ALIASES(GImpl,);
public:
// constructor
TGaugeFix(const std::string name);
// destructor
virtual ~TGaugeFix(void) {};
// dependencies/products
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(GaugeFix, TGaugeFix<GIMPL>, MGauge);
/******************************************************************************
* TGaugeFix implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename GImpl>
TGaugeFix<GImpl>::TGaugeFix(const std::string name)
: Module<GaugeFixPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename GImpl>
std::vector<std::string> TGaugeFix<GImpl>::getInput(void)
{
std::vector<std::string> in = {par().gauge};
return in;
}
template <typename GImpl>
std::vector<std::string> TGaugeFix<GImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename GImpl>
void TGaugeFix<GImpl>::setup(void)
{
envCreateLat(GaugeField, getName());
}
// execution ///////////////////////////////////////////////////////////////////
template <typename GImpl>
void TGaugeFix<GImpl>::execute(void)
//Loads the gauge and fixes it
{
std::cout << "executing" << std::endl;
LOG(Message) << "Fixing the Gauge" << std::endl;
LOG(Message) << par().gauge << std::endl;
auto &U = envGet(GaugeField, par().gauge);
auto &Umu = envGet(GaugeField, getName());
LOG(Message) << "Gauge Field fetched" << std::endl;
//do we allow maxiter etc to be user set?
Real alpha = par().alpha;
int maxiter = par().maxiter;
Real Omega_tol = par().Omega_tol;
Real Phi_tol = par().Phi_tol;
bool Fourier = par().Fourier;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(U,alpha,maxiter,Omega_tol,Phi_tol,Fourier);
Umu = U;
LOG(Message) << "Gauge Fixed" << std::endl;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MGaugeFix_hpp_

View File

@ -1,34 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MIO/LoadA2AVectors.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MIO/LoadA2AVectors.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MIO;
template class Grid::Hadrons::MIO::TLoadA2AVectors<FIMPL>;

View File

@ -1,120 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MIO/LoadA2AVectors.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MIO_LoadA2AVectors_hpp_
#define Hadrons_MIO_LoadA2AVectors_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/A2AVectors.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Module to load all-to-all vectors *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MIO)
class LoadA2AVectorsPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(LoadA2AVectorsPar,
std::string, filestem,
bool, multiFile,
unsigned int, size);
};
template <typename FImpl>
class TLoadA2AVectors: public Module<LoadA2AVectorsPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
TLoadA2AVectors(const std::string name);
// destructor
virtual ~TLoadA2AVectors(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(LoadA2AVectors, TLoadA2AVectors<FIMPL>, MIO);
/******************************************************************************
* TLoadA2AVectors implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TLoadA2AVectors<FImpl>::TLoadA2AVectors(const std::string name)
: Module<LoadA2AVectorsPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TLoadA2AVectors<FImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImpl>
std::vector<std::string> TLoadA2AVectors<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TLoadA2AVectors<FImpl>::setup(void)
{
envCreate(std::vector<FermionField>, getName(), 1, par().size,
envGetGrid(FermionField));
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TLoadA2AVectors<FImpl>::execute(void)
{
auto &vec = envGet(std::vector<FermionField>, getName());
A2AVectorsIo::read(vec, par().filestem, par().multiFile, vm().getTrajectory());
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MIO_LoadA2AVectors_hpp_

View File

@ -32,6 +32,4 @@ using namespace Hadrons;
using namespace MIO;
template class Grid::Hadrons::MIO::TLoadEigenPack<FermionEigenPack<FIMPL>>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MIO::TLoadEigenPack<FermionEigenPack<FIMPL, FIMPLF>>;
#endif

View File

@ -54,9 +54,7 @@ template <typename Pack>
class TLoadEigenPack: public Module<LoadEigenPackPar>
{
public:
typedef typename Pack::Field Field;
typedef typename Pack::FieldIo FieldIo;
typedef BaseEigenPack<Field> BasePack;
typedef EigenPack<typename Pack::Field> BasePack;
public:
// constructor
TLoadEigenPack(const std::string name);
@ -72,9 +70,6 @@ public:
};
MODULE_REGISTER_TMP(LoadFermionEigenPack, TLoadEigenPack<FermionEigenPack<FIMPL>>, MIO);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(LoadFermionEigenPackIo32, ARG(TLoadEigenPack<FermionEigenPack<FIMPL, FIMPLF>>), MIO);
#endif
/******************************************************************************
* TLoadEigenPack implementation *
@ -106,14 +101,9 @@ std::vector<std::string> TLoadEigenPack<Pack>::getOutput(void)
template <typename Pack>
void TLoadEigenPack<Pack>::setup(void)
{
GridBase *gridIo = nullptr;
if (typeHash<Field>() != typeHash<FieldIo>())
{
gridIo = envGetRbGrid(FieldIo, par().Ls);
}
env().createGrid(par().Ls);
envCreateDerived(BasePack, Pack, getName(), par().Ls, par().size,
envGetRbGrid(Field, par().Ls), gridIo);
env().getRbGrid(par().Ls));
}
// execution ///////////////////////////////////////////////////////////////////

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/Amputate.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MNPR/Amputate.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MNPR;
template class Grid::Hadrons::MNPR::TAmputate<FIMPL,FIMPL>;

View File

@ -1,200 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/Amputate.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Julia Kettle J.R.Kettle-2@sms.ed.ac.uk
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_Amputate_hpp_
#define Hadrons_Amputate_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/Eigen/LU>
//#include <Grid/qcd/utils/PropagatorUtils.h>
//#include <Grid/serialisation/Serialisation.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* TAmputate *
Performs bilinear contractions of the type tr[g5*adj(Sout)*g5*G*Sin]
Suitable for non exceptional momenta
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MNPR)
class AmputatePar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(AmputatePar,
std::string, Sin, //need to make this a propogator type?
std::string, Sout, //same
std::string, vertex,
std::string, pin,
std::string, pout,
std::string, output,
std::string, input);
};
template <typename FImpl1, typename FImpl2>
class TAmputate: public Module<AmputatePar>
{
public:
FERM_TYPE_ALIASES(FImpl1, 1);
FERM_TYPE_ALIASES(FImpl2, 2);
class Result: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
std::vector<Complex>, Vamp,
);
};
public:
// constructor
TAmputate(const std::string name);
// destructor
virtual ~TAmputate(void) {};
// dependencies/products
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
virtual SpinColourMatrix invertspincolmat(SpinColourMatrix &scmat);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(Amputate, ARG(TAmputate<FIMPL, FIMPL>), MNPR);
/******************************************************************************
* TAmputate implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
TAmputate<FImpl1, FImpl2>::TAmputate(const std::string name)
: Module<AmputatePar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TAmputate<FImpl1, FImpl2>::getInput(void)
{
std::vector<std::string> input = {par().Sin, par().Sout, par().vertex};
return input;
}
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TAmputate<FImpl1, FImpl2>::getOutput(void)
{
std::vector<std::string> output = {getName()};
return output;
}
// Invert spin colour matrix using Eigen
template <typename Fimpl1, typename Fimpl2>
SpinColourMatrix TAmputate<Fimpl1, Fimpl2>::invertspincolmat(SpinColourMatrix &scmat)
{
Eigen::MatrixXcf scmat_2d(Ns*Nc,Ns*Nc);
for(int ic=0; ic<Nc; ic++){
for(int jc=0; jc<Nc; jc++){
for(int is=0; is<Ns; is++){
for(int js=0; js<Ns; js++){
scmat_2d(Ns*ic+is,Ns*jc+js) = scmat()(is,js)(ic,jc);
}}
}}
Eigen::MatrixXcf scmat_2d_inv = scmat_2d.inverse();
SpinColourMatrix scmat_inv;
for(int ic=0; ic<Nc; ic++){
for(int jc=0; jc<Nc; jc++){
for(int is=0; is<Ns; is++){
for(int js=0; js<Ns; js++){
scmat_inv()(is,js)(ic,jc) = scmat_2d_inv(Ns*ic+is,Ns*jc+js);
}}
}}
return scmat_inv;
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
void TAmputate<FImpl1, FImpl2>::execute(void)
{
LOG(Message) << "Computing bilinear amputations '" << getName() << "' using"
<< " momentum '" << par().Sin << "' and '" << par().Sout << "'"
<< std::endl;
BinaryWriter writer(par().output);
PropagatorField1 &Sin = *env().template getObject<PropagatorField1>(par().Sin); //Do these have the phases taken into account?? Don't think so. FIX
PropagatorField2 &Sout = *env().template getObject<PropagatorField2>(par().Sout);
std::vector<int> pin = strToVec<int>(par().pin), pout = strToVec<int>(par().pout);
std::vector<Real> latt_size(pin.begin(), pin.end());
LatticeComplex pdotxin(env().getGrid()), pdotxout(env().getGrid()), coor(env().getGrid());
LOG(Message) << "Propagators set up " << std::endl;
std::vector<SpinColourMatrix> vertex; // Let's read from file here
Gamma g5(Gamma::Algebra::Gamma5);
Result result;
LOG(Message) << "reading file - " << par().input << std::endl;
BinaryReader reader(par().input);
Complex Ci(0.0,1.0);
std::string svertex;
read(reader,"vertex", vertex);
LOG(Message) << "vertex read" << std::endl;
pdotxin=zero;
pdotxout=zero;
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
pdotxin = pdotxin +(TwoPiL * pin[mu]) * coor;
pdotxout= pdotxout +(TwoPiL * pout[mu]) * coor;
}
Sin = Sin*exp(-Ci*pdotxin); //phase corrections
Sout = Sout*exp(-Ci*pdotxout);
SpinColourMatrix Sin_mom = sum(Sin);
SpinColourMatrix Sout_mom = sum(Sout);
LOG(Message) << "summed over lattice" << std::endl;
LOG(Message) << "Lattice -> spincolourmatrix conversion" << std::endl;
SpinColourMatrix Sin_inv = invertspincolmat(Sin_mom);
SpinColourMatrix Sout_inv = invertspincolmat(Sout_mom);
LOG(Message) << "Inversions done" << std::endl;
result.Vamp.resize(Gamma::nGamma/2);
for( int mu=0; mu < Gamma::nGamma/2; mu++){
Gamma::Algebra gam = mu;
result.Vamp[mu] = 1/12.0*trace(adj(Gamma(mu*2+1))*g5*Sout_inv*g5*vertex[mu]*Sin_inv);
LOG(Message) << "Vamp[" << mu << "] - " << result.Vamp[mu] << std::endl;
}
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_Amputate_hpp_

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/Bilinear.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MNPR/Bilinear.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MNPR;
template class Grid::Hadrons::MNPR::TBilinear<FIMPL,FIMPL>;

View File

@ -1,225 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/Bilinear.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Julia Kettle J.R.Kettle-2@sms.ed.ac.uk
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_Bilinear_hpp_
#define Hadrons_Bilinear_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/ModuleFactory.hpp>
//#include <Grid/qcd/utils/PropagatorUtils.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* TBilinear *
Performs bilinear contractions of the type tr[g5*adj(Sout)*g5*G*Sin]
Suitable for non exceptional momenta in Rome-Southampton NPR
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MNPR)
class BilinearPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(BilinearPar,
std::string, Sin,
std::string, Sout,
std::string, pin,
std::string, pout,
std::string, output);
};
template <typename FImpl1, typename FImpl2>
class TBilinear: public Module<BilinearPar>
{
public:
FERM_TYPE_ALIASES(FImpl1, 1);
FERM_TYPE_ALIASES(FImpl2, 2);
class Result: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
std::vector<SpinColourMatrix>, bilinear);
};
public:
// constructor
TBilinear(const std::string name);
// destructor
virtual ~TBilinear(void) {};
// dependencies/products
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
//LatticeSpinColourMatrix PhaseProps(LatticeSpinColourMatrix S, std::vector<Real> p);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(Bilinear, ARG(TBilinear<FIMPL, FIMPL>), MNPR);
/******************************************************************************
* TBilinear implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
TBilinear<FImpl1, FImpl2>::TBilinear(const std::string name)
: Module<BilinearPar>(name)
{}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
void TBilinear<FImpl1, FImpl2>::setup(void)
{
//env().template registerLattice<LatticeSpinColourMatrix>(getName());
//env().template registerObject<SpinColourMatrix>(getName());
}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TBilinear<FImpl1, FImpl2>::getInput(void)
{
std::vector<std::string> input = {par().Sin, par().Sout};
return input;
}
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TBilinear<FImpl1, FImpl2>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
/*
/////Phase propagators//////////////////////////
template <typename FImpl1, typename FImpl2>
LatticeSpinColourMatrix TBilinear<FImpl1, FImpl2>::PhaseProps(LatticeSpinColourMatrix S, std::vector<Real> p)
{
GridBase *grid = S._grid;
LatticeComplex pdotx(grid), coor(grid);
std::vector<int> latt_size = grid->_fdimensions;
Complex Ci(0.0,1.0);
pdotx=zero;
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
pdotx = pdotx +(TwoPiL * p[mu]) * coor;
}
S = S*exp(-Ci*pdotx);
return S;
}
*/
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
void TBilinear<FImpl1, FImpl2>::execute(void)
{
/**************************************************************************
Compute the bilinear vertex needed for the NPR.
V(G) = sum_x [ g5 * adj(S'(x,p2)) * g5 * G * S'(x,p1) ]_{si,sj,ci,cj}
G is one of the 16 gamma vertices [I,gmu,g5,g5gmu,sig(mu,nu)]
* G
/ \
p1/ \p2
/ \
/ \
Returns a spin-colour matrix, with indices si,sj, ci,cj
Conventions:
p1 - incoming momenta
p2 - outgoing momenta
q = (p1-p2)
**************************************************************************/
LOG(Message) << "Computing bilinear contractions '" << getName() << "' using"
<< " momentum '" << par().Sin << "' and '" << par().Sout << "'"
<< std::endl;
BinaryWriter writer(par().output);
// Propogators
LatticeSpinColourMatrix &Sin = *env().template getObject<LatticeSpinColourMatrix>(par().Sin);
LatticeSpinColourMatrix &Sout = *env().template getObject<LatticeSpinColourMatrix>(par().Sout);
LatticeComplex pdotxin(env().getGrid()), pdotxout(env().getGrid()), coor(env().getGrid());
// momentum on legs
std::vector<Real> pin = strToVec<Real>(par().pin), pout = strToVec<Real>(par().pout);
std::vector<Real> latt_size(pin.begin(), pin.end());
//bilinears
LatticeSpinColourMatrix bilinear_x(env().getGrid());
SpinColourMatrix bilinear;
Gamma g5(Gamma::Algebra::Gamma5);
Result result;
Complex Ci(0.0,1.0);
//
pdotxin=zero;
pdotxout=zero;
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
pdotxin = pdotxin +(TwoPiL * pin[mu]) * coor;
pdotxout= pdotxout +(TwoPiL * pout[mu]) * coor;
}
Sin = Sin*exp(-Ci*pdotxin); //phase corrections
Sout = Sout*exp(-Ci*pdotxout);
////Set up gamma vector//////////////////////////
std::vector<Gamma> gammavector;
for( int i=0; i<Gamma::nGamma; i++){
Gamma::Algebra gam = i;
gammavector.push_back(Gamma(gam));
}
result.bilinear.resize(Gamma::nGamma);
/////////////////////////////////////////////////
//LatticeSpinMatrix temp = g5*Sout;
////////Form Vertex//////////////////////////////
for (int i=0; i < Gamma::nGamma; i++){
bilinear_x = g5*adj(Sout)*g5*gammavector[i]*Sin;
result.bilinear[i] = sum(bilinear_x); //sum over lattice sites
}
//////////////////////////////////////////////////
write(writer, par().output, result.bilinear);
LOG(Message) << "Complete. Writing results to " << par().output << std:: endl;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_Bilinear_hpp_

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/FourQuark.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MNPR/FourQuark.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MNPR;
template class Grid::Hadrons::MNPR::TFourQuark<FIMPL,FIMPL>;

View File

@ -1,274 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNPR/FourQuark.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Julia Kettle J.R.Kettle-2@sms.ed.ac.uk
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_FourQuark_hpp_
#define Hadrons_FourQuark_hpp_
#include <typeinfo>
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Grid/serialisation/Serialisation.h>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* TFourQuark *
Performs fourquark contractions of the type tr[g5*adj(Sout)*g5*G*Sin]
Suitable for non exceptional momenta
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MNPR)
class FourQuarkPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(FourQuarkPar,
std::string, Sin, //need to make this a propogator type?
std::string, Sout, //same
std::string, pin,
std::string, pout,
bool, fullbasis,
std::string, output);
};
template <typename FImpl1, typename FImpl2>
class TFourQuark: public Module<FourQuarkPar>
{
public:
FERM_TYPE_ALIASES(FImpl1, 1);
FERM_TYPE_ALIASES(FImpl2, 2);
class Result: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(Result,
std::vector<SpinColourSpinColourMatrix>, fourquark);
};
public:
// constructor
TFourQuark(const std::string name);
// destructor
virtual ~TFourQuark(void) {};
// dependencies/products
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void tensorprod(LatticeSpinColourSpinColourMatrix &lret, LatticeSpinColourMatrix a, LatticeSpinColourMatrix b);
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(FourQuark, ARG(TFourQuark<FIMPL, FIMPL>), MNPR);
/******************************************************************************
* TFourQuark implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
TFourQuark<FImpl1, FImpl2>::TFourQuark(const std::string name)
: Module<FourQuarkPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TFourQuark<FImpl1, FImpl2>::getInput(void)
{
std::vector<std::string> input = {par().Sin, par().Sout};
return input;
}
template <typename FImpl1, typename FImpl2>
std::vector<std::string> TFourQuark<FImpl1, FImpl2>::getOutput(void)
{
std::vector<std::string> output = {getName()};
return output;
}
template <typename FImpl1, typename FImpl2>
void TFourQuark<FImpl1, FImpl2>::tensorprod(LatticeSpinColourSpinColourMatrix &lret, LatticeSpinColourMatrix a, LatticeSpinColourMatrix b)
{
#if 0
parallel_for(auto site=lret.begin();site<lret.end();site++) {
for (int si; si < 4; ++si){
for(int sj; sj <4; ++sj){
for (int ci; ci < 3; ++ci){
for (int cj; cj < 3; ++cj){
for (int sk; sk < 4; ++sk){
for(int sl; sl <4; ++sl){
for (int ck; ck < 3; ++ck){
for (int cl; cl < 3; ++cl){
lret[site]()(si,sj)(ci,cj)(sk,sl)(ck,cl)=a[site]()(si,sj)(ci,cj)*b[site]()(sk,sl)(ck,cl);
}}
}}
}}
}}
}
#else
// FIXME ; is there a general need for this construct ? In which case we should encapsulate the
// below loops in a helper function.
//LOG(Message) << "sp co mat a is - " << a << std::endl;
//LOG(Message) << "sp co mat b is - " << b << std::endl;
parallel_for(auto site=lret.begin();site<lret.end();site++) {
vTComplex left;
for(int si=0; si < Ns; ++si){
for(int sj=0; sj < Ns; ++sj){
for (int ci=0; ci < Nc; ++ci){
for (int cj=0; cj < Nc; ++cj){
//LOG(Message) << "si, sj, ci, cj - " << si << ", " << sj << ", "<< ci << ", "<< cj << std::endl;
left()()() = a[site]()(si,sj)(ci,cj);
//LOG(Message) << left << std::endl;
lret[site]()(si,sj)(ci,cj)=left()*b[site]();
}}
}}
}
#endif
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
void TFourQuark<FImpl1, FImpl2>::setup(void)
{
envCreateLat(LatticeSpinColourMatrix, getName());
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl1, typename FImpl2>
void TFourQuark<FImpl1, FImpl2>::execute(void)
{
/*********************************************************************************
TFourQuark : Creates the four quark vertex required for the NPR of four-quark ops
V_{Gamma_1,Gamma_2} = sum_x [ ( g5 * adj(S'(x,p2)) * g5 * G1 * S'(x,p1) )_ci,cj;si,sj x ( g5 * adj(S'(x,p2)) * g5 * G2 S'(x,p1) )_ck,cl;sk,cl ]
Create a bilinear vertex for G1 and G2 the spin and colour indices are kept free. Where there are 16 potential Gs.
We then find the outer product of V1 and V2, keeping the spin and colour indices uncontracted
Then this is summed over the lattice coordinate
Result is a SpinColourSpinColourMatrix - with 4 colour and 4 spin indices.
We have up to 256 of these including the offdiag (G1 != G2).
\ /
\p1 p1/
\ /
\ /
G1 * * G2
/ \
/ \
/p2 p2\
/ \
*********************************************************************************/
LOG(Message) << "Computing fourquark contractions '" << getName() << "' using"
<< " momentum '" << par().Sin << "' and '" << par().Sout << "'"
<< std::endl;
BinaryWriter writer(par().output);
PropagatorField1 &Sin = *env().template getObject<PropagatorField1>(par().Sin);
PropagatorField2 &Sout = *env().template getObject<PropagatorField2>(par().Sout);
std::vector<Real> pin = strToVec<Real>(par().pin), pout = strToVec<Real>(par().pout);
bool fullbasis = par().fullbasis;
Gamma g5(Gamma::Algebra::Gamma5);
Result result;
std::vector<Real> latt_size(pin.begin(), pin.end());
LatticeComplex pdotxin(env().getGrid()), pdotxout(env().getGrid()), coor(env().getGrid());
LatticeSpinColourMatrix bilinear_mu(env().getGrid()), bilinear_nu(env().getGrid());
LatticeSpinColourSpinColourMatrix lret(env().getGrid());
Complex Ci(0.0,1.0);
//Phase propagators
//Sin = Grid::QCD::PropUtils::PhaseProps(Sin,pin);
//Sout = Grid::QCD::PropUtils::PhaseProps(Sout,pout);
//find p.x for in and out so phase can be accounted for in propagators
pdotxin=zero;
pdotxout=zero;
for (unsigned int mu = 0; mu < 4; ++mu)
{
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
pdotxin = pdotxin +(TwoPiL * pin[mu]) * coor;
pdotxout= pdotxout +(TwoPiL * pout[mu]) * coor;
}
Sin = Sin*exp(-Ci*pdotxin); //phase corrections
Sout = Sout*exp(-Ci*pdotxout);
//Set up Gammas
std::vector<Gamma> gammavector;
for( int i=1; i<Gamma::nGamma; i+=2){
Gamma::Algebra gam = i;
gammavector.push_back(Gamma(gam));
}
lret = zero;
if (fullbasis == true){ // all combinations of mu and nu
result.fourquark.resize(Gamma::nGamma/2*Gamma::nGamma/2);
for( int mu=0; mu<Gamma::nGamma/2; mu++){
bilinear_mu = g5*adj(Sout)*g5*gammavector[mu]*Sin;
for ( int nu=0; nu<Gamma::nGamma; nu++){
LatticeSpinColourMatrix bilinear_nu(env().getGrid());
bilinear_nu = g5*adj(Sout)*g5*gammavector[nu]*Sin;
LOG(Message) << "bilinear_nu for nu = " << nu << " is - " << bilinear_mu << std::endl;
result.fourquark[mu*Gamma::nGamma/2 + nu] = zero;
tensorprod(lret,bilinear_mu,bilinear_nu);
result.fourquark[mu*Gamma::nGamma/2 + nu] = sum(lret);
}
}
} else {
result.fourquark.resize(Gamma::nGamma/2);
for ( int mu=0; mu<1; mu++){
//for( int mu=0; mu<Gamma::nGamma/2; mu++ ){
bilinear_mu = g5*adj(Sout)*g5*gammavector[mu]*Sin;
//LOG(Message) << "bilinear_mu for mu = " << mu << " is - " << bilinear_mu << std::endl;
result.fourquark[mu] = zero;
tensorprod(lret,bilinear_mu,bilinear_mu); //tensor outer product
result.fourquark[mu] = sum(lret);
}
}
write(writer, "fourquark", result.fourquark);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_FourQuark_hpp_

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Vera Guelpers <Vera.Guelpers@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MNoise;
template class Grid::Hadrons::MNoise::TFullVolumeSpinColorDiagonal<FIMPL>;
template class Grid::Hadrons::MNoise::TFullVolumeSpinColorDiagonal<ZFIMPL>;

View File

@ -1,121 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MNoise/FullVolumeSpinColorDiagonal.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Vera Guelpers <Vera.Guelpers@ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MNoise_FullVolumeSpinColorDiagonal_hpp_
#define Hadrons_MNoise_FullVolumeSpinColorDiagonal_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/DilutedNoise.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* Generate full volume spin-color diagonal noise *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MNoise)
class FullVolumeSpinColorDiagonalPar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(FullVolumeSpinColorDiagonalPar,
unsigned int, nsrc);
};
template <typename FImpl>
class TFullVolumeSpinColorDiagonal: public Module<FullVolumeSpinColorDiagonalPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
TFullVolumeSpinColorDiagonal(const std::string name);
// destructor
virtual ~TFullVolumeSpinColorDiagonal(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(FullVolumeSpinColorDiagonal, TFullVolumeSpinColorDiagonal<FIMPL>, MNoise);
MODULE_REGISTER_TMP(ZFullVolumeSpinColorDiagonal, TFullVolumeSpinColorDiagonal<ZFIMPL>, MNoise);
/******************************************************************************
* TFullVolumeSpinColorDiagonal implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TFullVolumeSpinColorDiagonal<FImpl>::TFullVolumeSpinColorDiagonal(const std::string name)
: Module<FullVolumeSpinColorDiagonalPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TFullVolumeSpinColorDiagonal<FImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImpl>
std::vector<std::string> TFullVolumeSpinColorDiagonal<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TFullVolumeSpinColorDiagonal<FImpl>::setup(void)
{
envCreateDerived(DilutedNoise<FImpl>,
FullVolumeSpinColorDiagonalNoise<FImpl>,
getName(), 1, envGetGrid(FermionField), par().nsrc);
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl>
void TFullVolumeSpinColorDiagonal<FImpl>::execute(void)
{
auto &noise = envGet(DilutedNoise<FImpl>, getName());
LOG(Message) << "Generating full volume, spin-color diagonal noise" << std::endl;
noise.generateNoise(rng4d());
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MNoise_FullVolumeSpinColorDiagonal_hpp_

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MContraction/A2AAslashField.cc
Source file: Hadrons/Modules/MScalarSUN/TimeMomProbe.cc
Copyright (C) 2015-2018
@ -25,10 +25,14 @@ with this program; if not, write to the Free Software Foundation, Inc.,
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MContraction/A2AAslashField.hpp>
#include <Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MContraction;
using namespace MScalarSUN;
template class Grid::Hadrons::MContraction::TA2AAslashField<FIMPL, PhotonR>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<2>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<3>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<4>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<5>>;
template class Grid::Hadrons::MScalarSUN::TTimeMomProbe<ScalarNxNAdjImplR<6>>;

View File

@ -0,0 +1,268 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MScalarSUN/TimeMomProbe.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_MScalarSUN_TimeMomProbe_hpp_
#define Hadrons_MScalarSUN_TimeMomProbe_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
#include <Hadrons/Modules/MScalarSUN/Utils.hpp>
BEGIN_HADRONS_NAMESPACE
/******************************************************************************
* n-point functions O(t,p)*tr(phi(t_1,p_1)*...*phi(t_n,p_n)) *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MScalarSUN)
class TimeMomProbePar: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(TimeMomProbePar,
std::string, field,
std::vector<std::string>, op,
std::vector<std::vector<std::string>>, timeMom,
std::string, output);
};
class TimeMomProbeResult: Serializable
{
public:
GRID_SERIALIZABLE_CLASS_MEMBERS(TimeMomProbeResult,
std::string, op,
std::vector<std::vector<int>>, timeMom,
std::vector<Complex>, data);
};
template <typename SImpl>
class TTimeMomProbe: public Module<TimeMomProbePar>
{
public:
typedef typename SImpl::Field Field;
typedef typename SImpl::SiteField::scalar_object Site;
typedef typename SImpl::ComplexField ComplexField;
typedef std::vector<Complex> SlicedOp;
public:
// constructor
TTimeMomProbe(const std::string name);
// destructor
virtual ~TTimeMomProbe(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
private:
void vectorModulo(std::vector<int> &v);
};
MODULE_REGISTER_TMP(TimeMomProbeSU2, TTimeMomProbe<ScalarNxNAdjImplR<2>>, MScalarSUN);
MODULE_REGISTER_TMP(TimeMomProbeSU3, TTimeMomProbe<ScalarNxNAdjImplR<3>>, MScalarSUN);
MODULE_REGISTER_TMP(TimeMomProbeSU4, TTimeMomProbe<ScalarNxNAdjImplR<4>>, MScalarSUN);
MODULE_REGISTER_TMP(TimeMomProbeSU5, TTimeMomProbe<ScalarNxNAdjImplR<5>>, MScalarSUN);
MODULE_REGISTER_TMP(TimeMomProbeSU6, TTimeMomProbe<ScalarNxNAdjImplR<6>>, MScalarSUN);
/******************************************************************************
* TTimeMomProbe implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename SImpl>
TTimeMomProbe<SImpl>::TTimeMomProbe(const std::string name)
: Module<TimeMomProbePar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename SImpl>
std::vector<std::string> TTimeMomProbe<SImpl>::getInput(void)
{
std::vector<std::string> in = par().op;
in.push_back(par().field);
return in;
}
template <typename SImpl>
std::vector<std::string> TTimeMomProbe<SImpl>::getOutput(void)
{
std::vector<std::string> out;
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename SImpl>
void TTimeMomProbe<SImpl>::setup(void)
{
envTmpLat(ComplexField, "ftBuf");
envTmpLat(Field, "ftMatBuf");
}
// execution ///////////////////////////////////////////////////////////////////
// NB: time is direction 0
template <typename SImpl>
void TTimeMomProbe<SImpl>::vectorModulo(std::vector<int> &v)
{
for (unsigned int mu = 0; mu < env().getNd(); ++mu)
{
auto d = env().getDim(mu);
v[mu] = ((v[mu] % d) + d) % d;
}
}
template <typename SImpl>
void TTimeMomProbe<SImpl>::execute(void)
{
const unsigned int nd = env().getNd();
const unsigned int nt = env().getDim(0);
double partVol = 1.;
std::set<std::vector<int>> timeMomSet;
std::vector<std::vector<std::vector<int>>> timeMom;
std::vector<std::vector<int>> transferMom;
FFT fft(envGetGrid(Field));
std::vector<int> dMask(nd, 1);
std::vector<TimeMomProbeResult> result;
std::map<std::string, std::vector<SlicedOp>> slicedOp;
std::vector<SlicedOp> slicedProbe;
auto &phi = envGet(Field, par().field);
envGetTmp(ComplexField, ftBuf);
envGetTmp(Field, ftMatBuf);
dMask[0] = 0;
for (unsigned int mu = 1; mu < nd; ++mu)
{
partVol *= env().getDim(mu);
}
timeMom.resize(par().timeMom.size());
for (unsigned int p = 0; p < timeMom.size(); ++p)
{
for (auto &tms: par().timeMom[p])
{
std::vector<int> tm = strToVec<int>(tms);
timeMom[p].push_back(tm);
timeMomSet.insert(tm);
}
transferMom.push_back(std::vector<int>(nd - 1, 0));
for (auto &tm: timeMom[p])
{
for (unsigned int j = 1; j < nd; ++j)
{
transferMom[p][j - 1] -= tm[j];
}
}
LOG(Message) << "Probe " << p << " (" << timeMom[p].size() << " points) : " << std::endl;
LOG(Message) << " phi(t_i, p_i) for (t_i, p_i) in " << timeMom[p] << std::endl;
LOG(Message) << " operator with momentum " << transferMom[p] << std::endl;
}
LOG(Message) << "FFT: field '" << par().field << "'" << std::endl;
fft.FFT_dim_mask(ftMatBuf, phi, dMask, FFT::forward);
slicedProbe.resize(timeMom.size());
for (unsigned int p = 0; p < timeMom.size(); ++p)
{
std::vector<int> qt;
LOG(Message) << "Making probe " << p << std::endl;
slicedProbe[p].resize(nt);
for (unsigned int t = 0; t < nt; ++t)
{
Site acc;
for (unsigned int i = 0; i < timeMom[p].size(); ++i)
{
Site buf;
qt = timeMom[p][i];
qt[0] += t;
vectorModulo(qt);
peekSite(buf, ftMatBuf, qt);
if (i == 0)
{
acc = buf;
}
else
{
acc *= buf;
}
}
slicedProbe[p][t] = TensorRemove(trace(acc));
}
//std::cout << slicedProbe[p]<< std::endl;
}
for (auto &o: par().op)
{
auto &op = envGet(ComplexField, o);
slicedOp[o].resize(transferMom.size());
LOG(Message) << "FFT: operator '" << o << "'" << std::endl;
fft.FFT_dim_mask(ftBuf, op, dMask, FFT::forward);
//std::cout << ftBuf << std::endl;
for (unsigned int p = 0; p < transferMom.size(); ++p)
{
std::vector<int> qt(nd, 0);
for (unsigned int j = 1; j < nd; ++j)
{
qt[j] = transferMom[p][j - 1];
}
slicedOp[o][p].resize(nt);
for (unsigned int t = 0; t < nt; ++t)
{
TComplex buf;
qt[0] = t;
vectorModulo(qt);
peekSite(buf, ftBuf, qt);
slicedOp[o][p][t] = TensorRemove(buf);
}
//std::cout << ftBuf << std::endl;
//std::cout << slicedOp[o][p] << std::endl;
}
}
LOG(Message) << "Making correlators" << std::endl;
for (auto &o: par().op)
for (unsigned int p = 0; p < timeMom.size(); ++p)
{
TimeMomProbeResult r;
LOG(Message) << " <" << o << " probe_" << p << ">" << std::endl;
r.op = o;
r.timeMom = timeMom[p];
r.data = makeTwoPoint(slicedOp[o][p], slicedProbe[p], 1./partVol);
result.push_back(r);
}
saveResult(par().output, "timemomprobe", result);
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_MScalarSUN_TimeMomProbe_hpp_

View File

@ -124,8 +124,7 @@ void TTrMag<SImpl>::execute(void)
std::vector<TrMagResult> result;
auto &phi = envGet(Field, par().field);
auto m2 = sum(phi);
auto mn = m2;
auto m2 = sum(phi), mn = m2;
m2 = -m2*m2;
mn = 1.;

View File

@ -103,7 +103,7 @@ std::vector<Complex> makeTwoPoint(const std::vector<SinkSite> &sink,
{
for (unsigned int t = 0; t < nt; ++t)
{
res[dt] += trace(sink[(t+dt)%nt]*adj(source[t]));
res[dt] += trace(sink[(t+dt)%nt]*source[t]);
}
res[dt] *= factor/static_cast<double>(nt);
}

View File

@ -32,5 +32,5 @@ using namespace Grid;
using namespace Hadrons;
using namespace MSolver;
template class Grid::Hadrons::MSolver::TA2AVectors<FIMPL, BaseFermionEigenPack<FIMPL>>;
template class Grid::Hadrons::MSolver::TA2AVectors<ZFIMPL, BaseFermionEigenPack<ZFIMPL>>;
template class Grid::Hadrons::MSolver::TA2AVectors<FIMPL, FermionEigenPack<FIMPL>>;
template class Grid::Hadrons::MSolver::TA2AVectors<ZFIMPL, FermionEigenPack<ZFIMPL>>;

View File

@ -51,9 +51,7 @@ public:
std::string, noise,
std::string, action,
std::string, eigenPack,
std::string, solver,
std::string, output,
bool, multiFile);
std::string, solver);
};
template <typename FImpl, typename Pack>
@ -81,9 +79,9 @@ private:
};
MODULE_REGISTER_TMP(A2AVectors,
ARG(TA2AVectors<FIMPL, BaseFermionEigenPack<FIMPL>>), MSolver);
ARG(TA2AVectors<FIMPL, FermionEigenPack<FIMPL>>), MSolver);
MODULE_REGISTER_TMP(ZA2AVectors,
ARG(TA2AVectors<ZFIMPL, BaseFermionEigenPack<ZFIMPL>>), MSolver);
ARG(TA2AVectors<ZFIMPL, FermionEigenPack<ZFIMPL>>), MSolver);
/******************************************************************************
* TA2AVectors implementation *
@ -238,17 +236,6 @@ void TA2AVectors<FImpl, Pack>::execute(void)
}
stopTimer("W high mode");
}
// I/O if necessary
if (!par().output.empty())
{
startTimer("V I/O");
A2AVectorsIo::write(par().output + "_v", v, par().multiFile, vm().getTrajectory());
stopTimer("V I/O");
startTimer("W I/O");
A2AVectorsIo::write(par().output + "_w", w, par().multiFile, vm().getTrajectory());
stopTimer("W I/O");
}
}
END_MODULE_NAMESPACE

View File

@ -39,7 +39,7 @@ std::shared_ptr<LinearFunction<typename FImpl::FermionField>>
makeGuesser(const std::string epackName)
{
typedef typename FImpl::FermionField FermionField;
typedef BaseFermionEigenPack<FImpl> EPack;
typedef FermionEigenPack<FImpl> EPack;
typedef CoarseFermionEigenPack<FImpl, nBasis> CoarseEPack;
typedef DeflatedGuesser<FermionField> FineGuesser;
typedef LocalCoherenceDeflatedGuesser<

View File

@ -33,7 +33,4 @@ using namespace MSolver;
template class Grid::Hadrons::MSolver::TLocalCoherenceLanczos<FIMPL,HADRONS_DEFAULT_LANCZOS_NBASIS>;
template class Grid::Hadrons::MSolver::TLocalCoherenceLanczos<ZFIMPL,HADRONS_DEFAULT_LANCZOS_NBASIS>;
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
template class Grid::Hadrons::MSolver::TLocalCoherenceLanczos<FIMPL,HADRONS_DEFAULT_LANCZOS_NBASIS, FIMPLF>;
template class Grid::Hadrons::MSolver::TLocalCoherenceLanczos<ZFIMPL,HADRONS_DEFAULT_LANCZOS_NBASIS, ZFIMPLF>;
#endif

View File

@ -55,17 +55,17 @@ public:
bool, multiFile);
};
template <typename FImpl, int nBasis, typename FImplIo = FImpl>
template <typename FImpl, int nBasis>
class TLocalCoherenceLanczos: public Module<LocalCoherenceLanczosPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
typedef LocalCoherenceLanczos<typename FImpl::SiteSpinor,
typename FImpl::SiteComplex,
nBasis> LCL;
typedef BaseFermionEigenPack<FImpl> BasePack;
typedef CoarseFermionEigenPack<FImpl, nBasis, FImplIo> CoarsePack;
typedef HADRONS_DEFAULT_SCHUR_OP<FMat, FermionField> SchurFMat;
nBasis> LCL;
typedef FermionEigenPack<FImpl> BasePack;
typedef CoarseFermionEigenPack<FImpl, nBasis> CoarsePack;
typedef HADRONS_DEFAULT_SCHUR_OP<FMat, FermionField> SchurFMat;
public:
// constructor
TLocalCoherenceLanczos(const std::string name);
@ -82,31 +82,27 @@ public:
MODULE_REGISTER_TMP(LocalCoherenceLanczos, ARG(TLocalCoherenceLanczos<FIMPL, HADRONS_DEFAULT_LANCZOS_NBASIS>), MSolver);
MODULE_REGISTER_TMP(ZLocalCoherenceLanczos, ARG(TLocalCoherenceLanczos<ZFIMPL, HADRONS_DEFAULT_LANCZOS_NBASIS>), MSolver);
#ifdef GRID_DEFAULT_PRECISION_DOUBLE
MODULE_REGISTER_TMP(LocalCoherenceLanczosIo32, ARG(TLocalCoherenceLanczos<FIMPL, HADRONS_DEFAULT_LANCZOS_NBASIS, FIMPLF>), MSolver);
MODULE_REGISTER_TMP(ZLocalCoherenceLanczosIo32, ARG(TLocalCoherenceLanczos<ZFIMPL, HADRONS_DEFAULT_LANCZOS_NBASIS, ZFIMPLF>), MSolver);
#endif
/******************************************************************************
* TLocalCoherenceLanczos implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl, int nBasis, typename FImplIo>
TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::TLocalCoherenceLanczos(const std::string name)
template <typename FImpl, int nBasis>
TLocalCoherenceLanczos<FImpl, nBasis>::TLocalCoherenceLanczos(const std::string name)
: Module<LocalCoherenceLanczosPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl, int nBasis, typename FImplIo>
std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::getInput(void)
template <typename FImpl, int nBasis>
std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis>::getInput(void)
{
std::vector<std::string> in = {par().action};
return in;
}
template <typename FImpl, int nBasis, typename FImplIo>
std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::getOutput(void)
template <typename FImpl, int nBasis>
std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis>::getOutput(void)
{
std::vector<std::string> out = {getName()};
@ -114,8 +110,8 @@ std::vector<std::string> TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::getOutp
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl, int nBasis, typename FImplIo>
void TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::setup(void)
template <typename FImpl, int nBasis>
void TLocalCoherenceLanczos<FImpl, nBasis>::setup(void)
{
LOG(Message) << "Setting up local coherence Lanczos eigensolver for"
<< " action '" << par().action << "' (" << nBasis
@ -142,8 +138,8 @@ void TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::setup(void)
}
// execution ///////////////////////////////////////////////////////////////////
template <typename FImpl, int nBasis, typename FImplIo>
void TLocalCoherenceLanczos<FImpl, nBasis, FImplIo>::execute(void)
template <typename FImpl, int nBasis>
void TLocalCoherenceLanczos<FImpl, nBasis>::execute(void)
{
auto &finePar = par().fineParams;
auto &coarsePar = par().coarseParams;

View File

@ -1,36 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSource/Momentum.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/Modules/MSource/Momentum.hpp>
using namespace Grid;
using namespace Hadrons;
using namespace MSource;
template class Grid::Hadrons::MSource::TMomentum<FIMPL>;

View File

@ -1,149 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Modules/MSource/Momentum.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_Momentum_hpp_
#define Hadrons_Momentum_hpp_
#include <Hadrons/Global.hpp>
#include <Hadrons/Module.hpp>
#include <Hadrons/ModuleFactory.hpp>
BEGIN_HADRONS_NAMESPACE
/*
Plane Wave source
-----------------
src_x = e^i2pi/L * p *position
*/
/******************************************************************************
* Plane Wave source *
******************************************************************************/
BEGIN_MODULE_NAMESPACE(MSource)
class MomentumPar: Serializable
{
public:
//What is meant by serializable in this context
GRID_SERIALIZABLE_CLASS_MEMBERS(MomentumPar,
std::string, mom);
};
template <typename FImpl>
class TMomentum: public Module<MomentumPar>
{
public:
FERM_TYPE_ALIASES(FImpl,);
public:
// constructor
TMomentum(const std::string name);
// destructor
virtual ~TMomentum(void) {};
// dependency relation
virtual std::vector<std::string> getInput(void);
virtual std::vector<std::string> getOutput(void);
// setup
virtual void setup(void);
// execution
virtual void execute(void);
};
MODULE_REGISTER_TMP(Momentum, TMomentum<FIMPL>, MSource);
//MODULE_REGISTER_NS(Momentum, TMomentum, MSource);
/******************************************************************************
* TMomentum template implementation *
******************************************************************************/
// constructor /////////////////////////////////////////////////////////////////
template <typename FImpl>
TMomentum<FImpl>::TMomentum(const std::string name)
: Module<MomentumPar>(name)
{}
// dependencies/products ///////////////////////////////////////////////////////
template <typename FImpl>
std::vector<std::string> TMomentum<FImpl>::getInput(void)
{
std::vector<std::string> in;
return in;
}
template <typename FImpl>
std::vector<std::string> TMomentum<FImpl>::getOutput(void)
{
std::vector<std::string> out = {getName()};
return out;
}
// setup ///////////////////////////////////////////////////////////////////////
template <typename FImpl>
void TMomentum<FImpl>::setup(void)
{
envCreateLat(PropagatorField, getName());
}
//execution//////////////////////////////////////////////////////////////////
template <typename FImpl>
void TMomentum<FImpl>::execute(void)
{
LOG(Message) << "Generating planewave momentum source with momentum " << par().mom << std::endl;
//what does this env do?
PropagatorField &src = envGet(PropagatorField, getName());
Lattice<iScalar<vInteger>> t(env().getGrid());
LatticeComplex C(env().getGrid()), coor(env().getGrid());
std::vector<Real> p;
std::vector<Real> latt_size(GridDefaultLatt().begin(), GridDefaultLatt().end());
Complex i(0.0,1.0);
LOG(Message) << " " << std::endl;
//get the momentum from parameters
p = strToVec<Real>(par().mom);
C = zero;
LOG(Message) << "momentum converted from string - " << std::to_string(p[0]) <<std::to_string(p[1]) <<std::to_string(p[2]) << std::to_string(p[3]) << std::endl;
for(int mu=0;mu<4;mu++){
Real TwoPiL = M_PI * 2.0/ latt_size[mu];
LatticeCoordinate(coor,mu);
C = C +(TwoPiL * p[mu]) * coor;
}
C = exp(C*i);
LOG(Message) << "exponential of pdotx taken " << std::endl;
src = src + C;
LOG(Message) << "source created" << std::endl;
}
END_MODULE_NAMESPACE
END_HADRONS_NAMESPACE
#endif // Hadrons_Momentum_hpp_

View File

@ -126,11 +126,6 @@ void TPoint<FImpl>::execute(void)
auto &src = envGet(PropagatorField, getName());
SitePropagator id;
if (position.size() != env().getNd())
{
HADRONS_ERROR(Size, "position has " + std::to_string(position.size())
+ " components (must have " + std::to_string(env().getNd()) + ")");
}
id = 1.;
src = zero;
pokeSite(id, src, position);

View File

@ -1,126 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/TimerArray.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/TimerArray.hpp>
using namespace Grid;
using namespace QCD;
using namespace Hadrons;
void TimerArray::startTimer(const std::string &name)
{
if (!name.empty())
{
timer_[name].Start();
}
}
GridTime TimerArray::getTimer(const std::string &name)
{
GridTime t;
if (!name.empty())
{
try
{
bool running = timer_.at(name).isRunning();
if (running) stopTimer(name);
t = timer_.at(name).Elapsed();
if (running) startTimer(name);
}
catch (std::out_of_range &)
{
t = GridTime::zero();
}
}
else
{
t = GridTime::zero();
}
return t;
}
double TimerArray::getDTimer(const std::string &name)
{
return static_cast<double>(getTimer(name).count());
}
void TimerArray::startCurrentTimer(const std::string &name)
{
if (!name.empty())
{
stopCurrentTimer();
startTimer(name);
currentTimer_ = name;
}
}
void TimerArray::stopTimer(const std::string &name)
{
if (timer_.at(name).isRunning())
{
timer_.at(name).Stop();
}
}
void TimerArray::stopCurrentTimer(void)
{
if (!currentTimer_.empty())
{
stopTimer(currentTimer_);
currentTimer_ = "";
}
}
void TimerArray::stopAllTimers(void)
{
for (auto &t: timer_)
{
stopTimer(t.first);
}
currentTimer_ = "";
}
void TimerArray::resetTimers(void)
{
timer_.clear();
currentTimer_ = "";
}
std::map<std::string, GridTime> TimerArray::getTimings(void)
{
std::map<std::string, GridTime> timing;
for (auto &t: timer_)
{
timing[t.first] = t.second.Elapsed();
}
return timing;
}

View File

@ -1,56 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/TimerArray.hpp
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#ifndef Hadrons_TimerArray_hpp_
#define Hadrons_TimerArray_hpp_
#include <Hadrons/Global.hpp>
BEGIN_HADRONS_NAMESPACE
class TimerArray
{
public:
TimerArray(void) = default;
virtual ~TimerArray(void) = default;
void startTimer(const std::string &name);
GridTime getTimer(const std::string &name);
double getDTimer(const std::string &name);
void startCurrentTimer(const std::string &name);
void stopTimer(const std::string &name);
void stopCurrentTimer(void);
void stopAllTimers(void);
void resetTimers(void);
std::map<std::string, GridTime> getTimings(void);
private:
std::string currentTimer_;
std::map<std::string, GridStopWatch> timer_;
};
END_HADRONS_NAMESPACE
#endif // Hadrons_TimerArray_hpp_

View File

@ -1,30 +1,3 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Utilities/EigenPackCast.cc
Copyright (C) 2015-2018
Author: Antonin Portelli <antonin.portelli@me.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Hadrons/EigenPack.hpp>
#include <Hadrons/Environment.hpp>
@ -35,7 +8,7 @@ using namespace Hadrons;
template <typename FOut, typename FIn>
void convert(const std::string outFilename, const std::string inFilename,
const unsigned int Ls, const bool rb, const unsigned int size,
const bool multiFile, const bool testRead)
const bool multiFile)
{
assert(outFilename != inFilename);
@ -86,12 +59,8 @@ void convert(const std::string outFilename, const std::string inFilename,
}
}
FOut bufOut(gOut);
FIn bufIn(gIn), testIn(gIn);
ScidacWriter binWriter(gOut->IsBoss());
ScidacReader binReader;
PackRecord record;
RealD eval;
FOut bufOut(gOut);
FIn bufIn(gIn), testIn(gIn);
LOG(Message) << "==== EIGENPACK CONVERSION" << std::endl;
LOG(Message) << "Lattice : " << gIn->GlobalDimensions() << std::endl;
@ -102,66 +71,60 @@ void convert(const std::string outFilename, const std::string inFilename,
LOG(Message) << "Out type : " << typeName<FOut>() << std::endl;
LOG(Message) << "#vectors : " << size << std::endl;
LOG(Message) << "Multifile : " << (multiFile ? "yes" : "no") << std::endl;
LOG(Message) << "Test read : " << (testRead ? "yes" : "no") << std::endl;
if (multiFile)
{
for(unsigned int k = 0; k < size; ++k)
{
ScidacWriter binWriter(gOut->IsBoss());
ScidacReader binReader;
PackRecord record;
VecRecord vecRecord;
std::string outV = outFilename + "/v" + std::to_string(k) + ".bin";
std::string inV = inFilename + "/v" + std::to_string(k) + ".bin";
LOG(Message) << "==== Converting vector " << k << std::endl;
LOG(Message) << "In : " << inV << std::endl;
LOG(Message) << "Out: " << outV << std::endl;
// conversion
LOG(Message) << "-- Doing conversion" << std::endl;
makeFileDir(outV, gOut);
binWriter.open(outV);
binReader.open(inV);
EigenPackIo::readHeader(record, binReader);
EigenPackIo::writeHeader(binWriter, record);
EigenPackIo::readElement<FIn>(bufIn, eval, k, binReader);
EigenPackIo::writeElement<FIn, FOut>(binWriter, bufIn, eval, k, &bufOut, &testIn);
EPIn::readHeader(record, binReader);
EPOut::writeHeader(binWriter, record);
EPIn::readElement(bufIn, vecRecord, binReader);
precisionChange(bufOut, bufIn);
precisionChange(testIn, bufOut);
testIn -= bufIn;
LOG(Message) << "Diff norm^2: " << norm2(testIn) << std::endl;
EPOut::writeElement(binWriter, bufOut, vecRecord);
binWriter.close();
binReader.close();
// read test
if (testRead)
{
LOG(Message) << "-- Test read" << std::endl;
binReader.open(outV);
EigenPackIo::readElement<FOut>(bufOut, eval, k, binReader);
binReader.close();
}
}
}
else
{
// conversion
LOG(Message) << "-- Doing conversion" << std::endl;
ScidacWriter binWriter(gOut->IsBoss());
ScidacReader binReader;
PackRecord record;
makeFileDir(outFilename, gOut);
binWriter.open(outFilename);
binReader.open(inFilename);
EigenPackIo::readHeader(record, binReader);
EigenPackIo::writeHeader(binWriter, record);
EPIn::readHeader(record, binReader);
EPOut::writeHeader(binWriter, record);
for(unsigned int k = 0; k < size; ++k)
{
EigenPackIo::readElement<FIn>(bufIn, eval, k, binReader);
EigenPackIo::writeElement<FIn, FOut>(binWriter, bufIn, eval, k, &bufOut, &testIn);
VecRecord vecRecord;
LOG(Message) << "==== Converting vector " << k << std::endl;
EPIn::readElement(bufIn, vecRecord, binReader);
precisionChange(bufOut, bufIn);
precisionChange(testIn, bufOut);
testIn -= bufIn;
LOG(Message) << "Diff norm^2: " << norm2(testIn) << std::endl;
EPOut::writeElement(binWriter, bufOut, vecRecord);
}
binWriter.close();
binReader.close();
// read test
if (testRead)
{
LOG(Message) << "-- Test read" << std::endl;
binReader.open(outFilename);
EigenPackIo::readHeader(record, binReader);
for(unsigned int k = 0; k < size; ++k)
{
EigenPackIo::readElement<FOut>(bufOut, eval, k, binReader);
}
binReader.close();
}
}
}
@ -179,11 +142,11 @@ int main(int argc, char *argv[])
// parse command line
std::string outFilename, inFilename;
unsigned int size, Ls;
bool rb, multiFile, testRead;
bool rb, multiFile;
if (argc < 8)
if (argc < 7)
{
std::cerr << "usage: " << argv[0] << " <out eigenpack> <in eigenpack> <Ls> <red-black {0|1}> <#vector> <multifile {0|1}> <test read {0|1}> [Grid options]";
std::cerr << "usage: " << argv[0] << " <out eigenpack> <in eigenpack> <Ls> <red-black (0|1)> <#vector> <multifile (0|1)> [Grid options]";
std::cerr << std::endl;
std::exit(EXIT_FAILURE);
}
@ -193,7 +156,6 @@ int main(int argc, char *argv[])
rb = (std::string(argv[4]) != "0");
size = std::stoi(std::string(argv[5]));
multiFile = (std::string(argv[6]) != "0");
testRead = (std::string(argv[7]) != "0");
// initialization
Grid_init(&argc, &argv);
@ -202,7 +164,7 @@ int main(int argc, char *argv[])
// execution
try
{
convert<FOUT, FIN>(outFilename, inFilename, Ls, rb, size, multiFile, testRead);
convert<FOUT, FIN>(outFilename, inFilename, Ls, rb, size, multiFile);
}
catch (const std::exception& e)
{

View File

@ -2,7 +2,7 @@
Grid physics library, www.github.com/paboyle/Grid
Source file: Hadrons/Utilities/HadronsXmlRun.cc
Source file: Hadrons/HadronsXmlRun.cc
Copyright (C) 2015-2018

View File

@ -1,8 +1,10 @@
AM_LDFLAGS += -L../../Hadrons
bin_PROGRAMS = HadronsXmlRun HadronsFermionEP64To32
HadronsXmlRun_SOURCES = HadronsXmlRun.cc
HadronsXmlRun_LDADD = ../libHadrons.a ../../Grid/libGrid.a
HadronsXmlRun_LDADD = -lHadrons -lGrid
HadronsFermionEP64To32_SOURCES = EigenPackCast.cc
HadronsFermionEP64To32_CXXFLAGS = $(AM_CXXFLAGS) -DFIN=WilsonImplD::FermionField -DFOUT=WilsonImplF::FermionField
HadronsFermionEP64To32_LDADD = ../libHadrons.a ../../Grid/libGrid.a
HadronsFermionEP64To32_LDADD = -lHadrons -lGrid

View File

@ -4,14 +4,12 @@ modules_cc =\
Modules/MContraction/Meson.cc \
Modules/MContraction/WeakNeutral4ptDisc.cc \
Modules/MContraction/WeakHamiltonianNonEye.cc \
Modules/MContraction/A2AAslashField.cc \
Modules/MContraction/WardIdentity.cc \
Modules/MContraction/A2AMesonField.cc \
Modules/MContraction/DiscLoop.cc \
Modules/MContraction/Gamma3pt.cc \
Modules/MFermion/FreeProp.cc \
Modules/MFermion/GaugeProp.cc \
Modules/MSource/Momentum.cc \
Modules/MSource/Point.cc \
Modules/MSource/Wall.cc \
Modules/MSource/SeqConserved.cc \
@ -29,9 +27,7 @@ modules_cc =\
Modules/MGauge/StochEm.cc \
Modules/MGauge/Random.cc \
Modules/MGauge/FundtoHirep.cc \
Modules/MGauge/GaugeFix.cc \
Modules/MNoise/TimeDilutedSpinColorDiagonal.cc \
Modules/MNoise/FullVolumeSpinColorDiagonal.cc \
Modules/MUtilities/RandomVectors.cc \
Modules/MUtilities/TestSeqGamma.cc \
Modules/MUtilities/PrecisionCast.cc \
@ -41,9 +37,6 @@ modules_cc =\
Modules/MScalar/VPCounterTerms.cc \
Modules/MScalar/ChargedProp.cc \
Modules/MScalar/ScalarVP.cc \
Modules/MNPR/Amputate.cc \
Modules/MNPR/Bilinear.cc \
Modules/MNPR/FourQuark.cc \
Modules/MAction/Wilson.cc \
Modules/MAction/MobiusDWF.cc \
Modules/MAction/ZMobiusDWF.cc \
@ -52,6 +45,7 @@ modules_cc =\
Modules/MAction/ScaledDWF.cc \
Modules/MScalarSUN/TrPhi.cc \
Modules/MScalarSUN/Grad.cc \
Modules/MScalarSUN/TimeMomProbe.cc \
Modules/MScalarSUN/TrMag.cc \
Modules/MScalarSUN/TrKinetic.cc \
Modules/MScalarSUN/EMT.cc \
@ -65,13 +59,12 @@ modules_cc =\
Modules/MIO/LoadBinary.cc \
Modules/MIO/LoadNersc.cc \
Modules/MIO/LoadCoarseEigenPack.cc \
Modules/MIO/LoadCosmHol.cc \
Modules/MIO/LoadA2AVectors.cc
Modules/MIO/LoadCosmHol.cc
modules_hpp =\
Modules/MContraction/Baryon.hpp \
Modules/MContraction/A2AAslashField.hpp \
Modules/MContraction/A2AMesonField.hpp \
Modules/MContraction/A2AMesonFieldKernels.hpp \
Modules/MContraction/Meson.hpp \
Modules/MContraction/WeakHamiltonian.hpp \
Modules/MContraction/WeakHamiltonianNonEye.hpp \
@ -87,7 +80,6 @@ modules_hpp =\
Modules/MSource/Wall.hpp \
Modules/MSource/Z2.hpp \
Modules/MSource/SeqConserved.hpp \
Modules/MSource/Momentum.hpp \
Modules/MSink/Smear.hpp \
Modules/MSink/Point.hpp \
Modules/MSolver/MixedPrecisionRBPrecCG.hpp \
@ -99,11 +91,9 @@ modules_hpp =\
Modules/MGauge/StoutSmearing.hpp \
Modules/MGauge/Unit.hpp \
Modules/MGauge/Random.hpp \
Modules/MGauge/GaugeFix.hpp \
Modules/MGauge/FundtoHirep.hpp \
Modules/MGauge/StochEm.hpp \
Modules/MNoise/TimeDilutedSpinColorDiagonal.hpp \
Modules/MNoise/FullVolumeSpinColorDiagonal.hpp \
Modules/MUtilities/PrecisionCast.hpp \
Modules/MUtilities/RandomVectors.hpp \
Modules/MUtilities/TestSeqGamma.hpp \
@ -114,9 +104,6 @@ modules_hpp =\
Modules/MScalar/ScalarVP.hpp \
Modules/MScalar/Scalar.hpp \
Modules/MScalar/ChargedProp.hpp \
Modules/MNPR/Bilinear.hpp \
Modules/MNPR/Amputate.hpp \
Modules/MNPR/FourQuark.hpp \
Modules/MAction/DWF.hpp \
Modules/MAction/MobiusDWF.hpp \
Modules/MAction/Wilson.hpp \
@ -127,6 +114,7 @@ modules_hpp =\
Modules/MScalarSUN/TwoPointNPR.hpp \
Modules/MScalarSUN/ShiftProbe.hpp \
Modules/MScalarSUN/Div.hpp \
Modules/MScalarSUN/TimeMomProbe.hpp \
Modules/MScalarSUN/TrMag.hpp \
Modules/MScalarSUN/EMT.hpp \
Modules/MScalarSUN/TwoPoint.hpp \
@ -137,7 +125,6 @@ modules_hpp =\
Modules/MScalarSUN/TrKinetic.hpp \
Modules/MIO/LoadEigenPack.hpp \
Modules/MIO/LoadNersc.hpp \
Modules/MIO/LoadA2AVectors.hpp \
Modules/MIO/LoadCosmHol.hpp \
Modules/MIO/LoadCoarseEigenPack.hpp \
Modules/MIO/LoadBinary.hpp

View File

@ -1,48 +1,108 @@
#include <Grid/Grid.h>
#ifdef HAVE_LIME
#include "Benchmark_IO.hpp"
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
#define MSG cout << GridLogMessage
#define SEP \
"============================================================================="
#ifndef BENCH_IO_LMAX
#define BENCH_IO_LMAX 40
#endif
using namespace Grid;
using namespace QCD;
typedef function<void(const string, LatticeFermion &)> WriterFn;
typedef function<void(LatticeFermion &, const string)> ReaderFn;
std::string filestem(const int l)
string filestem(const int l)
{
return "iobench_l" + std::to_string(l);
return "iobench_l" + to_string(l);
}
void limeWrite(const string filestem, LatticeFermion &vec)
{
emptyUserRecord record;
ScidacWriter binWriter(vec._grid->IsBoss());
binWriter.open(filestem + ".bin");
binWriter.writeScidacFieldRecord(vec, record);
binWriter.close();
}
void limeRead(LatticeFermion &vec, const string filestem)
{
emptyUserRecord record;
ScidacReader binReader;
binReader.open(filestem + ".bin");
binReader.readScidacFieldRecord(vec, record);
binReader.close();
}
void writeBenchmark(const int l, const WriterFn &write)
{
auto mpi = GridDefaultMpi();
auto simd = GridDefaultSimd(Nd, vComplex::Nsimd());
vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
unique_ptr<GridCartesian> gPt(SpaceTimeGrid::makeFourDimGrid(latt, simd, mpi));
GridCartesian *g = gPt.get();
GridParallelRNG rng(g);
LatticeFermion vec(g);
emptyUserRecord record;
ScidacWriter binWriter(g->IsBoss());
cout << "-- Local volume " << l << "^4" << endl;
random(rng, vec);
write(filestem(l), vec);
}
void readBenchmark(const int l, const ReaderFn &read)
{
auto mpi = GridDefaultMpi();
auto simd = GridDefaultSimd(Nd, vComplex::Nsimd());
vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
unique_ptr<GridCartesian> gPt(SpaceTimeGrid::makeFourDimGrid(latt, simd, mpi));
GridCartesian *g = gPt.get();
LatticeFermion vec(g);
emptyUserRecord record;
ScidacReader binReader;
cout << "-- Local volume " << l << "^4" << endl;
read(vec, filestem(l));
}
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
auto simd = GridDefaultSimd(Nd,vComplex::Nsimd());
auto mpi = GridDefaultMpi();
int64_t threads = GridThread::GetThreads();
MSG << "Grid is setup to use " << threads << " threads" << std::endl;
MSG << SEP << std::endl;
MSG << "Benchmark Lime write" << std::endl;
MSG << SEP << std::endl;
MSG << "Grid is setup to use " << threads << " threads" << endl;
MSG << SEP << endl;
MSG << "Benchmark Lime write" << endl;
MSG << SEP << endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{
auto mpi = GridDefaultMpi();
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::cout << "-- Local volume " << l << "^4" << std::endl;
writeBenchmark<LatticeFermion>(latt, filestem(l), limeWrite<LatticeFermion>);
writeBenchmark(l, limeWrite);
}
MSG << "Benchmark Lime read" << std::endl;
MSG << SEP << std::endl;
MSG << "Benchmark Lime read" << endl;
MSG << SEP << endl;
for (int l = 4; l <= BENCH_IO_LMAX; l += 2)
{
auto mpi = GridDefaultMpi();
std::vector<int> latt = {l*mpi[0], l*mpi[1], l*mpi[2], l*mpi[3]};
std::cout << "-- Local volume " << l << "^4" << std::endl;
readBenchmark<LatticeFermion>(latt, filestem(l), limeRead<LatticeFermion>);
readBenchmark(l, limeRead);
}
Grid_finalize();
return EXIT_SUCCESS;
}
#else
int main (int argc, char ** argv)
{
return EXIT_SUCCESS;
}
#endif

View File

@ -1,107 +0,0 @@
#ifndef Benchmark_IO_hpp_
#define Benchmark_IO_hpp_
#include <Grid/Grid.h>
#define MSG std::cout << GridLogMessage
#define SEP \
"============================================================================="
namespace Grid {
template <typename Field>
using WriterFn = std::function<void(const std::string, Field &)> ;
template <typename Field>
using ReaderFn = std::function<void(Field &, const std::string)>;
template <typename Field>
void limeWrite(const std::string filestem, Field &vec)
{
emptyUserRecord record;
QCD::ScidacWriter binWriter(vec._grid->IsBoss());
binWriter.open(filestem + ".bin");
binWriter.writeScidacFieldRecord(vec, record);
binWriter.close();
}
template <typename Field>
void limeRead(Field &vec, const std::string filestem)
{
emptyUserRecord record;
QCD::ScidacReader binReader;
binReader.open(filestem + ".bin");
binReader.readScidacFieldRecord(vec, record);
binReader.close();
}
inline void makeGrid(std::shared_ptr<GridBase> &gPt,
const std::shared_ptr<GridCartesian> &gBasePt,
const unsigned int Ls = 1, const bool rb = false)
{
if (rb)
{
if (Ls > 1)
{
gPt.reset(QCD::SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls, gBasePt.get()));
}
else
{
gPt.reset(QCD::SpaceTimeGrid::makeFourDimRedBlackGrid(gBasePt.get()));
}
}
else
{
if (Ls > 1)
{
gPt.reset(QCD::SpaceTimeGrid::makeFiveDimGrid(Ls, gBasePt.get()));
}
else
{
gPt = gBasePt;
}
}
}
template <typename Field>
void writeBenchmark(const std::vector<int> &latt, const std::string filename,
const WriterFn<Field> &write,
const unsigned int Ls = 1, const bool rb = false)
{
auto mpi = GridDefaultMpi();
auto simd = GridDefaultSimd(latt.size(), Field::vector_type::Nsimd());
std::shared_ptr<GridCartesian> gBasePt(QCD::SpaceTimeGrid::makeFourDimGrid(latt, simd, mpi));
std::shared_ptr<GridBase> gPt;
makeGrid(gPt, gBasePt, Ls, rb);
GridBase *g = gPt.get();
GridParallelRNG rng(g);
Field vec(g);
random(rng, vec);
write(filename, vec);
}
template <typename Field>
void readBenchmark(const std::vector<int> &latt, const std::string filename,
const ReaderFn<Field> &read,
const unsigned int Ls = 1, const bool rb = false)
{
auto mpi = GridDefaultMpi();
auto simd = GridDefaultSimd(latt.size(), Field::vector_type::Nsimd());
std::shared_ptr<GridCartesian> gBasePt(QCD::SpaceTimeGrid::makeFourDimGrid(latt, simd, mpi));
std::shared_ptr<GridBase> gPt;
makeGrid(gPt, gBasePt, Ls, rb);
GridBase *g = gPt.get();
Field vec(g);
read(vec, filename);
}
}
#endif // Benchmark_IO_hpp_

View File

@ -1,79 +0,0 @@
#include "Benchmark_IO.hpp"
#define MSG std::cout << GridLogMessage
#define SEP \
"============================================================================="
using namespace Grid;
using namespace QCD;
int main (int argc, char ** argv)
{
std::vector<std::string> dir;
unsigned int Ls;
bool rb;
if (argc < 4)
{
std::cerr << "usage: " << argv[0] << " <Ls> <RB {0|1}> <dir1> [<dir2> ... <dirn>] [Grid options]";
std::cerr << std::endl;
}
Ls = std::stoi(argv[1]);
rb = (std::string(argv[2]) == "1");
for (unsigned int i = 3; i < argc; ++i)
{
std::string a = argv[i];
if (a[0] != '-')
{
dir.push_back(std::string(argv[i]));
}
else
{
break;
}
}
Grid_init(&argc,&argv);
int64_t threads = GridThread::GetThreads();
MSG << "Grid is setup to use " << threads << " threads" << std::endl;
MSG << SEP << std::endl;
MSG << "Benchmark double precision Lime write" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
writeBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermion>, Ls, rb);
}
MSG << SEP << std::endl;
MSG << "Benchmark double precision Lime read" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
readBenchmark<LatticeFermion>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermion>, Ls, rb);
}
MSG << SEP << std::endl;
MSG << "Benchmark single precision Lime write" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
writeBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeWrite<LatticeFermionF>, Ls, rb);
}
MSG << SEP << std::endl;
MSG << "Benchmark single precision Lime read" << std::endl;
MSG << SEP << std::endl;
for (auto &d: dir)
{
MSG << "-- Directory " << d << std::endl;
readBenchmark<LatticeFermionF>(GridDefaultLatt(), d + "/ioBench", limeRead<LatticeFermionF>, Ls, rb);
}
Grid_finalize();
return EXIT_SUCCESS;
}

View File

@ -1,4 +1,5 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_cayley_cg.cc
@ -26,7 +27,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/qcd/action/fermion/Reconstruct5Dprop.h>
#include <Grid/algorithms/iterative/Reconstruct5Dprop.h>
using namespace std;
using namespace Grid;
@ -46,7 +47,6 @@ struct scal {
template<class What>
void TestCGinversions(What & Ddwf,
LatticeGaugeField &Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
@ -78,23 +78,12 @@ void TestCGprec(What & Ddwf,
template<class What>
void TestReconstruct5D(What & Ddwf,
LatticeGaugeField &Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
GridParallelRNG *RNG4,
GridParallelRNG *RNG5);
template<class What,class WhatF>
void TestReconstruct5DFA(What & Ddwf,
WhatF & DdwfF,
LatticeGaugeField &Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
GridParallelRNG *RNG4,
GridParallelRNG *RNG5);
int main (int argc, char ** argv)
{
Grid_init(&argc,&argv);
@ -103,104 +92,63 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
const int Ls=8;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
GridCartesian * UGridF = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplexF::Nsimd()),
GridDefaultMpi());
GridRedBlackCartesian * UrbGridF = SpaceTimeGrid::makeFourDimRedBlackGrid(UGridF);
GridCartesian * FGridF = SpaceTimeGrid::makeFiveDimGrid(Ls,UGridF);
GridRedBlackCartesian * FrbGridF = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGridF);
std::vector<int> seeds4({1,2,3,4});
std::vector<int> seeds5({5,6,7,8});
GridParallelRNG RNG5(FGrid); RNG5.SeedFixedIntegers(seeds5);
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
LatticeGaugeFieldF UmuF(UGridF);
SU3::HotConfiguration(RNG4,Umu);
precisionChange(UmuF,Umu);
std::vector<LatticeColourMatrix> U(4,UGrid);
RealD mass=0.1;
RealD M5 =1.8;
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"DomainWallFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
DomainWallFermionF DdwfF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5);
TestCGinversions<DomainWallFermionR>(Ddwf,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<DomainWallFermionR,DomainWallFermionF>(Ddwf,DdwfF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<DomainWallFermionR>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
RealD b=1.5;// Scale factor b+c=2, b-c=1
RealD c=0.5;
std::vector<ComplexD> gamma(Ls,ComplexD(1.0,0.0));
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusFermionR Dmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c);
MobiusFermionF DmobF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,b,c);
TestCGinversions<MobiusFermionR>(Dmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<MobiusFermionR,MobiusFermionF>(Dmob,DmobF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<MobiusFermionR>(Dmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ZMobiusFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ZMobiusFermionR ZDmob(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,gamma,b,c);
TestCGinversions<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ZMobiusFermionR>(ZDmob,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<ZMobiusFermionR>(ZDmob,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
MobiusZolotarevFermionR Dzolo(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,b,c,0.1,2.0);
TestCGinversions<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<MobiusZolotarevFermionR>(Dzolo,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<MobiusZolotarevFermionR>(Dzolo,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ScaledShamirFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ScaledShamirFermionR Dsham(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,2.0);
ScaledShamirFermionF DshamF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,2.0);
TestCGinversions<ScaledShamirFermionR>(Dsham,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<ScaledShamirFermionR,ScaledShamirFermionF>(Dsham,DshamF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<ScaledShamirFermionR>(Dsham,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"ShamirZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
ShamirZolotarevFermionR Dshamz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<ShamirZolotarevFermionR>(Dshamz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<ShamirZolotarevFermionR>(Dshamz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyTanhFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyTanhFermionR Dov (Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
OverlapWilsonCayleyTanhFermionF DovF(UmuF,*FGridF,*FrbGridF,*UGridF,*UrbGridF,mass,M5,1.0);
TestCGinversions<OverlapWilsonCayleyTanhFermionR>(Dov,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5DFA<OverlapWilsonCayleyTanhFermionR,OverlapWilsonCayleyTanhFermionF>(Dov,DovF,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
OverlapWilsonCayleyTanhFermionR Dov(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,1.0);
TestCGinversions<OverlapWilsonCayleyTanhFermionR>(Dov,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"OverlapWilsonCayleyZolotarevFermion test"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
OverlapWilsonCayleyZolotarevFermionR Dovz(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5,0.1,2.0);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestReconstruct5D<OverlapWilsonCayleyZolotarevFermionR>(Dovz,Umu,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
TestCGinversions<OverlapWilsonCayleyZolotarevFermionR>(Dovz,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,&RNG4,&RNG5);
Grid_finalize();
}
template<class What>
void TestCGinversions(What & Ddwf,
LatticeGaugeField &Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
@ -213,8 +161,10 @@ void TestCGinversions(What & Ddwf,
TestCGprec<What>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,RNG4,RNG5);
std::cout<<GridLogMessage << "Testing red black Schur inverter"<<std::endl;
TestCGschur<What>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,RNG4,RNG5);
}
std::cout<<GridLogMessage << "Testing 5D PV reconstruction"<<std::endl;
TestReconstruct5D<What>(Ddwf,FGrid,FrbGrid,UGrid,UrbGrid,mass,M5,RNG4,RNG5);
}
template<class What>
void TestCGunprec(What & Ddwf,
@ -253,7 +203,6 @@ void TestCGprec(What & Ddwf,
template<class What>
void TestReconstruct5D(What & Ddwf,
LatticeGaugeField & Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
@ -267,13 +216,9 @@ void TestReconstruct5D(What & Ddwf,
LatticeFermion src_NE(FGrid);
LatticeFermion result(FGrid);
LatticeFermion result_rec(FGrid);
LatticeFermion result_madwf(FGrid);
MdagMLinearOperator<What,LatticeFermion> HermOp(Ddwf);
double Resid = 1.0e-12;
double Residi = 1.0e-6;
ConjugateGradient<LatticeFermion> CG(Resid,10000);
ConjugateGradient<LatticeFermion> CGi(Residi,10000);
ConjugateGradient<LatticeFermion> CG(1.0e-12,10000);
Ddwf.ImportPhysicalFermionSource(src4,src);
Ddwf.Mdag(src,src_NE);
@ -287,16 +232,9 @@ void TestReconstruct5D(What & Ddwf,
std::cout <<GridLogMessage<< " Reconstructing " <<std::endl;
////////////////////////////
// RBprec PV inverse
////////////////////////////
typedef LatticeFermion Field;
typedef SchurRedBlackDiagTwoSolve<Field> SchurSolverType;
typedef SchurRedBlackDiagTwoSolve<Field> SchurSolverTypei;
typedef PauliVillarsSolverRBprec<Field,SchurSolverType> PVinverter;
SchurSolverType SchurSolver(CG);
PVinverter PVinverse(SchurSolver);
// typedef PauliVillarsSolverUnprec<LatticeFermion> PVinverter;
typedef PauliVillarsSolverRBprec<LatticeFermion> PVinverter;
PVinverter PVinverse(CG);
Reconstruct5DfromPhysical<LatticeFermion,PVinverter> reconstructor(PVinverse);
reconstructor(Ddwf,res4,src4,result_rec);
@ -307,88 +245,6 @@ void TestReconstruct5D(What & Ddwf,
result_rec = result_rec - result;
std::cout <<GridLogMessage << "Difference "<<norm2(result_rec)<<std::endl;
//////////////////////////////
// Now try MADWF
//////////////////////////////
SchurSolverTypei SchurSolveri(CGi);
ZeroGuesser<LatticeFermion> Guess;
MADWF<What,What,PVinverter,SchurSolverTypei,ZeroGuesser<LatticeFermion> >
madwf(Ddwf,Ddwf,PVinverse,SchurSolveri,Guess,Resid,10);
madwf(src4,result_madwf);
result_madwf = result_madwf - result;
std::cout <<GridLogMessage << "Difference "<<norm2(result_madwf)<<std::endl;
}
template<class What,class WhatF>
void TestReconstruct5DFA(What & Ddwf,
WhatF & DdwfF,
LatticeGaugeField & Umu,
GridCartesian * FGrid, GridRedBlackCartesian * FrbGrid,
GridCartesian * UGrid, GridRedBlackCartesian * UrbGrid,
RealD mass, RealD M5,
GridParallelRNG *RNG4,
GridParallelRNG *RNG5)
{
LatticeFermion src4 (UGrid); random(*RNG4,src4);
LatticeFermion res4 (UGrid); res4 = zero;
LatticeFermion src (FGrid);
LatticeFermion src_NE(FGrid);
LatticeFermion result(FGrid);
LatticeFermion result_rec(FGrid);
LatticeFermion result_madwf(FGrid);
MdagMLinearOperator<What,LatticeFermion> HermOp(Ddwf);
double Resid = 1.0e-12;
double Residi = 1.0e-5;
ConjugateGradient<LatticeFermion> CG(Resid,10000);
ConjugateGradient<LatticeFermionF> CGi(Residi,10000);
Ddwf.ImportPhysicalFermionSource(src4,src);
Ddwf.Mdag(src,src_NE);
CG(HermOp,src_NE,result);
Ddwf.ExportPhysicalFermionSolution(result, res4);
Ddwf.M(result,src_NE);
src_NE = src_NE - src;
std::cout <<GridLogMessage<< " True residual is " << norm2(src_NE)<<std::endl;
std::cout <<GridLogMessage<< " Reconstructing " <<std::endl;
////////////////////////////
// Fourier accel PV inverse
////////////////////////////
typedef LatticeFermion Field;
typedef LatticeFermionF FieldF;
typedef SchurRedBlackDiagTwoSolve<FieldF> SchurSolverTypei;
typedef PauliVillarsSolverFourierAccel<LatticeFermion,LatticeGaugeField> PVinverter;
PVinverter PVinverse(Umu,CG);
Reconstruct5DfromPhysical<LatticeFermion,PVinverter> reconstructor(PVinverse);
reconstructor(Ddwf,res4,src4,result_rec);
std::cout <<GridLogMessage << "Result "<<norm2(result)<<std::endl;
std::cout <<GridLogMessage << "Result_rec "<<norm2(result_rec)<<std::endl;
result_rec = result_rec - result;
std::cout <<GridLogMessage << "Difference "<<norm2(result_rec)<<std::endl;
//////////////////////////////
// Now try MADWF
//////////////////////////////
SchurSolverTypei SchurSolver(CGi);
ZeroGuesser<LatticeFermionF> Guess;
MADWF<What,WhatF,PVinverter,SchurSolverTypei,ZeroGuesser<LatticeFermionF> >
madwf(Ddwf,DdwfF,PVinverse,SchurSolver,Guess,Resid,10);
madwf(src4,result_madwf);
result_madwf = result_madwf - result;
std::cout <<GridLogMessage << "Difference "<<norm2(result_madwf)<<std::endl;
}

View File

@ -91,22 +91,6 @@ int main(int argc, char *argv[])
v13r = v[13];
LOG(Message) << "v[13] correct? "
<< ((v13r == v13w) ? "yes" : "no" ) << std::endl;
LOG(Message) << "hit ratio " << v.hitRatio() << std::endl;
EigenDiskVector<ComplexD> w("eigendiskvector_test", 1000, 4);
EigenDiskVector<ComplexD>::Matrix m,n;
w[2] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
m = w[2];
w[3] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
w[4] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
w[5] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
w[6] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
w[7] = EigenDiskVectorMat<ComplexD>::Random(2000, 2000);
n = w[2];
LOG(Message) << "w[2] correct? "
<< ((m == n) ? "yes" : "no" ) << std::endl;
LOG(Message) << "hit ratio " << w.hitRatio() << std::endl;
Grid_finalize();

View File

@ -38,7 +38,6 @@ int main (int argc, char ** argv)
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
double stp=1.0e-5;
const int Ls=4;
Grid_init(&argc,&argv);
@ -198,7 +197,7 @@ int main (int argc, char ** argv)
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((stp),10000);
ConjugateGradient<FermionField> CG((1.0e-2),10000);
s_res = zero;
CG(HermOp,s_src,s_res);
@ -228,11 +227,5 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage<<" resid["<<n<<"] "<< norm2(tmp)/norm2(src[n])<<std::endl;
}
for(int s=0;s<nrhs;s++) result[s]=zero;
int blockDim = 0;//not used for BlockCGVec
BlockConjugateGradient<FermionField> BCGV (BlockCGVec,blockDim,stp,10000);
BCGV.PrintInterval=10;
BCGV(HermOpCk,src,result);
Grid_finalize();
}

View File

@ -1,220 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename MobiusFermionR::FermionField FermionField;
typedef typename MobiusFermionR::ComplexField ComplexField;
typename MobiusFermionR::ImplParams params;
const int Ls=12;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<int> mpi_split (mpi_layout.size(),1);
std::vector<int> split_coor (mpi_layout.size(),1);
std::vector<int> split_dim (mpi_layout.size(),1);
std::vector<ComplexD> boundary_phases(Nd,1.);
boundary_phases[Nd-1]=-1.;
params.boundary_phases = boundary_phases;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
/////////////////////////////////////////////
// Split into 1^4 mpi communicators
/////////////////////////////////////////////
for(int i=0;i<argc;i++){
if(std::string(argv[i]) == "--split"){
for(int k=0;k<mpi_layout.size();k++){
std::stringstream ss;
ss << argv[i+1+k];
ss >> mpi_split[k];
}
break;
}
}
double stp = 1.e-8;
int nrhs = 1;
int me;
for(int i=0;i<mpi_layout.size();i++){
// split_dim[i] = (mpi_layout[i]/mpi_split[i]);
nrhs *= (mpi_layout[i]/mpi_split[i]);
// split_coor[i] = FGrid._processor_coor[i]/mpi_split[i];
}
std::cout << GridLogMessage << "Creating split grids " <<std::endl;
GridCartesian * SGrid = new GridCartesian(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
mpi_split,
*UGrid,me);
std::cout << GridLogMessage <<"Creating split ferm grids " <<std::endl;
GridCartesian * SFGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,SGrid);
std::cout << GridLogMessage <<"Creating split rb grids " <<std::endl;
GridRedBlackCartesian * SrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(SGrid);
std::cout << GridLogMessage <<"Creating split ferm rb grids " <<std::endl;
GridRedBlackCartesian * SFrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,SGrid);
std::cout << GridLogMessage << "Made the grids"<<std::endl;
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
std::cout << GridLogMessage << "Made the Fermion Fields"<<std::endl;
for(int s=0;s<nrhs;s++) result[s]=zero;
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
if(0) {
FieldMetaData header;
std::string file("./lat.in");
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " "<<file<<" successfully read" <<std::endl;
} else {
GridParallelRNG pRNG(UGrid );
std::cout << GridLogMessage << "Intialising 4D RNG "<<std::endl;
pRNG.SeedFixedIntegers(seeds);
std::cout << GridLogMessage << "Intialised 4D RNG "<<std::endl;
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
std::cout << " Site zero "<< Umu._odata[0] <<std::endl;
}
/////////////////
// MPI only sends
/////////////////
LatticeGaugeField s_Umu(SGrid);
FermionField s_src(SFGrid);
FermionField s_tmp(SFGrid);
FermionField s_res(SFGrid);
std::cout << GridLogMessage << "Made the split grid fields"<<std::endl;
///////////////////////////////////////////////////////////////
// split the source out using MPI instead of I/O
///////////////////////////////////////////////////////////////
Grid_split (Umu,s_Umu);
Grid_split (src,s_src);
std::cout << GridLogMessage << " split rank " <<me << " s_src "<<norm2(s_src)<<std::endl;
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << " Building the solvers"<<std::endl;
// RealD mass=0.00107;
RealD mass=0.1;
RealD M5=1.8;
RealD mobius_factor=32./12.;
RealD mobius_b=0.5*(mobius_factor+1.);
RealD mobius_c=0.5*(mobius_factor-1.);
MobiusFermionR Dchk(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5,mobius_b,mobius_c,params);
MobiusFermionR Ddwf(s_Umu,*SFGrid,*SFrbGrid,*SGrid,*SrbGrid,mass,M5,mobius_b,mobius_c,params);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<MobiusFermionR,FermionField> HermOp(Ddwf);
MdagMLinearOperator<MobiusFermionR,FermionField> HermOpCk(Dchk);
ConjugateGradient<FermionField> CG((stp),100000);
s_res = zero;
CG(HermOp,s_src,s_res);
std::cout << GridLogMessage << " split residual norm "<<norm2(s_res)<<std::endl;
/////////////////////////////////////////////////////////////
// Report how long they all took
/////////////////////////////////////////////////////////////
std::vector<uint32_t> iterations(nrhs,0);
iterations[me] = CG.IterationsToComplete;
for(int n=0;n<nrhs;n++){
UGrid->GlobalSum(iterations[n]);
std::cout << GridLogMessage<<" Rank "<<n<<" "<< iterations[n]<<" CG iterations"<<std::endl;
}
/////////////////////////////////////////////////////////////
// Gather and residual check on the results
/////////////////////////////////////////////////////////////
std::cout << GridLogMessage<< "Unsplitting the result"<<std::endl;
Grid_unsplit(result,s_res);
std::cout << GridLogMessage<< "Checking the residuals"<<std::endl;
for(int n=0;n<nrhs;n++){
std::cout << GridLogMessage<< " res["<<n<<"] norm "<<norm2(result[n])<<std::endl;
HermOpCk.HermOp(result[n],tmp); tmp = tmp - src[n];
std::cout << GridLogMessage<<" resid["<<n<<"] "<< std::sqrt(norm2(tmp)/norm2(src[n]))<<std::endl;
}
for(int s=0;s<nrhs;s++){
result[s]=zero;
}
/////////////////////////////////////////////////////////////
// Try block CG
/////////////////////////////////////////////////////////////
int blockDim = 0;//not used for BlockCGVec
BlockConjugateGradient<FermionField> BCGV (BlockCGrQVec,blockDim,stp,100000);
{
BCGV(HermOpCk,src,result);
}
Grid_finalize();
}

View File

@ -1,144 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
const int Ls=16;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<ComplexD> boundary_phases(Nd,1.);
boundary_phases[Nd-1]=-1.;
params.boundary_phases = boundary_phases;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
double stp = 1.e-8;
int nrhs = 2;
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
std::cout << GridLogMessage << "Made the Fermion Fields"<<std::endl;
for(int s=0;s<nrhs;s++) result[s]=zero;
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
int conf = 0;
if(conf==0) {
FieldMetaData header;
std::string file("./lat.in");
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " Config "<<file<<" successfully read" <<std::endl;
} else if (conf==1){
GridParallelRNG pRNG(UGrid );
pRNG.SeedFixedIntegers(seeds);
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
} else {
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
}
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << " Building the solvers"<<std::endl;
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5,params);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
ConjugateGradient<FermionField> CG((stp),100000);
for(int rhs=0;rhs<1;rhs++){
result[rhs] = zero;
CG(HermOp,src[rhs],result[rhs]);
}
for(int rhs=0;rhs<1;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
/////////////////////////////////////////////////////////////
// Try block CG
/////////////////////////////////////////////////////////////
int blockDim = 0;//not used for BlockCGVec
for(int s=0;s<nrhs;s++){
result[s]=zero;
}
BlockConjugateGradient<FermionField> BCGV (BlockCGrQVec,blockDim,stp,100000);
{
BCGV(HermOp,src,result);
}
for(int rhs=0;rhs<nrhs;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
Grid_finalize();
}

View File

@ -1,148 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
const int Ls=16;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<ComplexD> boundary_phases(Nd,1.);
boundary_phases[Nd-1]=-1.;
params.boundary_phases = boundary_phases;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
double stp = 1.e-8;
int nrhs = 2;
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src4(nrhs,UGrid);
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
std::cout << GridLogMessage << "Made the Fermion Fields"<<std::endl;
for(int s=0;s<nrhs;s++) result[s]=zero;
GridParallelRNG pRNG4(UGrid); pRNG4.SeedFixedIntegers(seeds);
for(int s=0;s<nrhs;s++) {
random(pRNG4,src4[s]);
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
int conf = 0;
if(conf==0) {
FieldMetaData header;
std::string file("./lat.in");
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " Config "<<file<<" successfully read" <<std::endl;
} else if (conf==1){
GridParallelRNG pRNG(UGrid );
pRNG.SeedFixedIntegers(seeds);
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
} else {
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
}
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << " Building the solvers"<<std::endl;
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5,params);
for(int s=0;s<nrhs;s++) {
Ddwf.ImportPhysicalFermionSource(src4[s],src[s]);
}
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
ConjugateGradient<FermionField> CG((stp),100000);
for(int rhs=0;rhs<1;rhs++){
result[rhs] = zero;
// CG(HermOp,src[rhs],result[rhs]);
}
for(int rhs=0;rhs<1;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
/////////////////////////////////////////////////////////////
// Try block CG
/////////////////////////////////////////////////////////////
int blockDim = 0;//not used for BlockCGVec
for(int s=0;s<nrhs;s++){
result[s]=zero;
}
BlockConjugateGradient<FermionField> BCGV (BlockCGrQVec,blockDim,stp,100000);
{
BCGV(HermOp,src,result);
}
for(int rhs=0;rhs<nrhs;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
Grid_finalize();
}

View File

@ -1,147 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./tests/Test_dwf_mrhs_cg.cc
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/Grid.h>
#include <Grid/algorithms/iterative/BlockConjugateGradient.h>
using namespace std;
using namespace Grid;
using namespace Grid::QCD;
int main (int argc, char ** argv)
{
typedef typename DomainWallFermionR::FermionField FermionField;
typedef typename DomainWallFermionR::ComplexField ComplexField;
typename DomainWallFermionR::ImplParams params;
const int Ls=16;
Grid_init(&argc,&argv);
std::vector<int> latt_size = GridDefaultLatt();
std::vector<int> simd_layout = GridDefaultSimd(Nd,vComplex::Nsimd());
std::vector<int> mpi_layout = GridDefaultMpi();
std::vector<ComplexD> boundary_phases(Nd,1.);
boundary_phases[Nd-1]=-1.;
params.boundary_phases = boundary_phases;
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
GridDefaultSimd(Nd,vComplex::Nsimd()),
GridDefaultMpi());
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * rbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
double stp = 1.e-8;
int nrhs = 4;
///////////////////////////////////////////////
// Set up the problem as a 4d spreadout job
///////////////////////////////////////////////
std::vector<int> seeds({1,2,3,4});
std::vector<FermionField> src(nrhs,FGrid);
std::vector<FermionField> src_chk(nrhs,FGrid);
std::vector<FermionField> result(nrhs,FGrid);
FermionField tmp(FGrid);
std::cout << GridLogMessage << "Made the Fermion Fields"<<std::endl;
for(int s=0;s<nrhs;s++) result[s]=zero;
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
for(int s=0;s<nrhs;s++) {
random(pRNG5,src[s]);
std::cout << GridLogMessage << " src ["<<s<<"] "<<norm2(src[s])<<std::endl;
}
std::cout << GridLogMessage << "Intialised the Fermion Fields"<<std::endl;
LatticeGaugeField Umu(UGrid);
int conf = 2;
if(conf==0) {
FieldMetaData header;
std::string file("./lat.in");
NerscIO::readConfiguration(Umu,header,file);
std::cout << GridLogMessage << " Config "<<file<<" successfully read" <<std::endl;
} else if (conf==1){
GridParallelRNG pRNG(UGrid );
pRNG.SeedFixedIntegers(seeds);
SU3::HotConfiguration(pRNG,Umu);
std::cout << GridLogMessage << "Intialised the HOT Gauge Field"<<std::endl;
} else {
SU3::ColdConfiguration(Umu);
std::cout << GridLogMessage << "Intialised the COLD Gauge Field"<<std::endl;
}
///////////////////////////////////////////////////////////////
// Set up N-solvers as trivially parallel
///////////////////////////////////////////////////////////////
std::cout << GridLogMessage << " Building the solvers"<<std::endl;
RealD mass=0.01;
RealD M5=1.8;
DomainWallFermionR Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*rbGrid,mass,M5,params);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling DWF CG "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
MdagMLinearOperator<DomainWallFermionR,FermionField> HermOp(Ddwf);
ConjugateGradient<FermionField> CG((stp),100000);
for(int rhs=0;rhs<1;rhs++){
result[rhs] = zero;
CG(HermOp,src[rhs],result[rhs]);
}
for(int rhs=0;rhs<1;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
/////////////////////////////////////////////////////////////
// Try block CG
/////////////////////////////////////////////////////////////
int blockDim = 0;//not used for BlockCGVec
for(int s=0;s<nrhs;s++){
result[s]=zero;
}
{
BlockConjugateGradient<FermionField> BCGV (BlockCGrQVec,blockDim,stp,100000);
SchurRedBlackDiagTwoSolve<FermionField> SchurSolver(BCGV);
SchurSolver(Ddwf,src,result);
}
for(int rhs=0;rhs<nrhs;rhs++){
std::cout << " Result["<<rhs<<"] norm = "<<norm2(result[rhs])<<std::endl;
}
Grid_finalize();
}

View File

@ -67,22 +67,7 @@ int main (int argc, char ** argv)
GridParallelRNG pRNG(UGrid ); pRNG.SeedFixedIntegers(seeds);
GridParallelRNG pRNG5(FGrid); pRNG5.SeedFixedIntegers(seeds);
FermionField src(FGrid);
FermionField tt(FGrid);
#if 1
random(pRNG5,src);
#else
src=zero;
ComplexField coor(FGrid);
LatticeCoordinate(coor,0);
for(int ss=0;ss<FGrid->oSites();ss++){
src._odata[ss]()()(0)=coor._odata[ss]()()();
}
LatticeCoordinate(coor,1);
for(int ss=0;ss<FGrid->oSites();ss++){
src._odata[ss]()()(0)+=coor._odata[ss]()()();
}
#endif
FermionField src(FGrid); random(pRNG5,src);
FermionField src_o(FrbGrid); pickCheckerboard(Odd,src_o,src);
FermionField result_o(FrbGrid); result_o=zero;
RealD nrm = norm2(src);
@ -104,8 +89,7 @@ int main (int argc, char ** argv)
ConjugateGradient<FermionField> CG(1.0e-8,10000);
int blockDim = 0;
BlockConjugateGradient<FermionField> BCGrQ(BlockCGrQ,blockDim,1.0e-8,10000);
BlockConjugateGradient<FermionField> BCG (BlockCGrQ,blockDim,1.0e-8,10000);
BlockConjugateGradient<FermionField> BCGv (BlockCGrQVec,blockDim,1.0e-8,10000);
BlockConjugateGradient<FermionField> BCG (BlockCG,blockDim,1.0e-8,10000);
BlockConjugateGradient<FermionField> mCG (CGmultiRHS,blockDim,1.0e-8,10000);
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
@ -174,7 +158,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << " Calling Block CGrQ for "<<Ls <<" right hand sides" <<std::endl;
std::cout << GridLogMessage << " Calling Block CG for "<<Ls <<" right hand sides" <<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
Ds.ZeroCounters();
result_o=zero;
@ -192,49 +176,6 @@ int main (int argc, char ** argv)
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << " Calling Block CG for "<<Ls <<" right hand sides" <<std::endl;
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
Ds.ZeroCounters();
result_o=zero;
{
double t1=usecond();
BCG(HermOp,src_o,result_o);
double t2=usecond();
double ncall=BCGrQ.IterationsToComplete*Ls;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
HermOp.Report();
}
Ds.Report();
std::cout << GridLogMessage << "************************************************************************ "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::cout << GridLogMessage << " Calling BCGvec "<<std::endl;
std::cout << GridLogMessage << "****************************************************************** "<<std::endl;
std::vector<FermionField> src_v (Ls,UrbGrid);
std::vector<FermionField> result_v(Ls,UrbGrid);
for(int s=0;s<Ls;s++) result_v[s] = zero;
for(int s=0;s<Ls;s++) {
FermionField src4(UGrid);
ExtractSlice(src4,src,s,0);
pickCheckerboard(Odd,src_v[s],src4);
}
{
double t1=usecond();
BCGv(HermOp4d,src_v,result_v);
double t2=usecond();
double ncall=BCGv.IterationsToComplete*Ls;
double flops = deodoe_flops * ncall;
std::cout<<GridLogMessage << "usec = "<< (t2-t1)<<std::endl;
std::cout<<GridLogMessage << "flops = "<< flops<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t2-t1)<<std::endl;
// HermOp4d.Report();
}
Grid_finalize();
}