1
0
mirror of https://github.com/paboyle/Grid.git synced 2024-11-09 23:45:36 +00:00

Warning free compile on Tursa.

Hopefully got all reqd virtual dtors
This commit is contained in:
Peter Boyle 2021-10-21 19:56:52 +01:00
parent 99e7a5d18a
commit ba7e371b90
11 changed files with 191 additions and 158 deletions

View File

@ -36,6 +36,7 @@ NAMESPACE_BEGIN(Grid);
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
public:
using LinearFunction<FieldD>::operator();
RealD Tolerance;
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
Integer MaxInnerIterations;

View File

@ -33,16 +33,19 @@ namespace Grid {
template<class Field>
class ZeroGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { guess = Zero(); };
};
template<class Field>
class DoNothingGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { };
};
template<class Field>
class SourceGuesser: public LinearFunction<Field> {
public:
using LinearFunction<Field>::operator();
virtual void operator()(const Field &src, Field &guess) { guess = src; };
};
@ -57,6 +60,7 @@ private:
const unsigned int N;
public:
using LinearFunction<Field>::operator();
DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval)
: DeflatedGuesser(_evec, _eval, _evec.size())
@ -87,6 +91,7 @@ private:
const std::vector<RealD> &eval_coarse;
public:
using LinearFunction<FineField>::operator();
LocalCoherenceDeflatedGuesser(const std::vector<FineField> &_subspace,
const std::vector<CoarseField> &_evec_coarse,
const std::vector<RealD> &_eval_coarse)

View File

@ -159,7 +159,6 @@ void MemoryManager::Init(void)
char * str;
int Nc;
int NcS;
str= getenv("GRID_ALLOC_NCACHE_LARGE");
if ( str ) {

View File

@ -77,23 +77,23 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define REGISTER
#ifdef GRID_SIMT
#define LOAD_CHIMU(ptype) \
#define LOAD_CHIMU(Ptype) \
{const SiteSpinor & ref (in[offset]); \
Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm,lane); \
Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm,lane); \
Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm,lane); \
Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm,lane); \
Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm,lane); \
Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm,lane); \
Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm,lane); \
Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm,lane); \
Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm,lane); \
Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm,lane); \
Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm,lane); \
Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm,lane); }
Chimu_00=coalescedReadPermute<Ptype>(ref()(0)(0),perm,lane); \
Chimu_01=coalescedReadPermute<Ptype>(ref()(0)(1),perm,lane); \
Chimu_02=coalescedReadPermute<Ptype>(ref()(0)(2),perm,lane); \
Chimu_10=coalescedReadPermute<Ptype>(ref()(1)(0),perm,lane); \
Chimu_11=coalescedReadPermute<Ptype>(ref()(1)(1),perm,lane); \
Chimu_12=coalescedReadPermute<Ptype>(ref()(1)(2),perm,lane); \
Chimu_20=coalescedReadPermute<Ptype>(ref()(2)(0),perm,lane); \
Chimu_21=coalescedReadPermute<Ptype>(ref()(2)(1),perm,lane); \
Chimu_22=coalescedReadPermute<Ptype>(ref()(2)(2),perm,lane); \
Chimu_30=coalescedReadPermute<Ptype>(ref()(3)(0),perm,lane); \
Chimu_31=coalescedReadPermute<Ptype>(ref()(3)(1),perm,lane); \
Chimu_32=coalescedReadPermute<Ptype>(ref()(3)(2),perm,lane); }
#define PERMUTE_DIR(dir) ;
#else
#define LOAD_CHIMU(ptype) \
#define LOAD_CHIMU(Ptype) \
{const SiteSpinor & ref (in[offset]); \
Chimu_00=ref()(0)(0);\
Chimu_01=ref()(0)(1);\
@ -110,10 +110,10 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#define PERMUTE_DIR(dir) \
permute##dir(Chi_00,Chi_00); \
permute##dir(Chi_01,Chi_01);\
permute##dir(Chi_02,Chi_02);\
permute##dir(Chi_01,Chi_01); \
permute##dir(Chi_02,Chi_02); \
permute##dir(Chi_10,Chi_10); \
permute##dir(Chi_11,Chi_11);\
permute##dir(Chi_11,Chi_11); \
permute##dir(Chi_12,Chi_12);
#endif
@ -371,10 +371,11 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
result_32-= UChi_12;
#define HAND_STENCIL_LEGB(PROJ,PERM,DIR,RECON) \
{int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
@ -386,14 +387,14 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON;
RECON; }
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
SE=&st_p[DIR+8*ss]; \
ptype=st_perm[DIR]; \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
{ SE=&st_p[DIR+8*ss]; \
auto ptype=st_perm[DIR]; \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
@ -405,24 +406,25 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
} \
acceleratorSynchronise(); \
MULT_2SPIN(DIR); \
RECON;
RECON; }
#define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON) \
SE=&st_p[DIR+8*ss]; \
ptype=st_perm[DIR]; \
{ SE=&st_p[DIR+8*ss]; \
auto ptype=st_perm[DIR]; \
/*SE=st.GetEntry(ptype,DIR,ss);*/ \
offset = SE->_offset; \
perm = SE->_permute; \
auto offset = SE->_offset; \
auto perm = SE->_permute; \
LOAD_CHIMU(PERM); \
PROJ; \
MULT_2SPIN(DIR); \
RECON;
RECON; }
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
{ int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
local = SE->_is_local; \
perm = SE->_permute; \
auto offset = SE->_offset; \
auto local = SE->_is_local; \
auto perm = SE->_permute; \
if ( local ) { \
LOAD_CHIMU(PERM); \
PROJ; \
@ -437,18 +439,19 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
MULT_2SPIN(DIR); \
RECON; \
} \
acceleratorSynchronise();
acceleratorSynchronise(); }
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
{ int ptype; \
SE=st.GetEntry(ptype,DIR,ss); \
offset = SE->_offset; \
auto offset = SE->_offset; \
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
LOAD_CHI; \
MULT_2SPIN(DIR); \
RECON; \
nmu++; \
} \
acceleratorSynchronise();
acceleratorSynchronise(); }
#define HAND_RESULT(ss) \
{ \
@ -563,7 +566,6 @@ WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p,
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
@ -593,9 +595,7 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
@ -623,8 +623,6 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
int offset,local,perm, ptype;
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
@ -640,8 +638,8 @@ template<class Impl> accelerator_inline void
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
@ -652,7 +650,6 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
HAND_DECLARATIONS(Simt);
int offset,local,perm, ptype;
StencilEntry *SE;
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
@ -670,8 +667,8 @@ template<class Impl> accelerator_inline
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
@ -682,7 +679,6 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
int offset,local,perm, ptype;
ZERO_RESULT;
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
@ -699,8 +695,8 @@ template<class Impl> accelerator_inline void
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
@ -711,7 +707,7 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
HAND_DECLARATIONS(Simt);
int offset, ptype;
// int offset, ptype;
StencilEntry *SE;
int nmu=0;
ZERO_RESULT;
@ -730,8 +726,8 @@ template<class Impl> accelerator_inline
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
{
auto st_p = st._entries_p;
auto st_perm = st._permute_type;
// auto st_p = st._entries_p;
// auto st_perm = st._permute_type;
typedef typename Simd::scalar_type S;
typedef typename Simd::vector_type V;
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
@ -742,7 +738,7 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldVi
HAND_DECLARATIONS(Simt);
StencilEntry *SE;
int offset, ptype;
// int offset, ptype;
int nmu=0;
ZERO_RESULT;
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);

View File

@ -322,8 +322,8 @@ public:
int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ;
int recv_from_rank;
int xmit_to_rank;
// int recv_from_rank;
// int xmit_to_rank;
if ( ! comm_dim ) return 1;
if ( displacement == 0 ) return 1;

View File

@ -137,7 +137,7 @@ int main (int argc, char ** argv)
Eigen::MatrixXd mean(nVol, 4), stdDev(nVol, 4), rob(nVol, 4);
Eigen::VectorXd avMean(4), avStdDev(4), avRob(4);
double n = BENCH_IO_NPASS;
// double n = BENCH_IO_NPASS;
stats(mean, stdDev, perf);
stats(avMean, avStdDev, avPerf);
@ -164,7 +164,7 @@ int main (int argc, char ** argv)
mean(volInd(l), gWrite), stdDev(volInd(l), gWrite));
}
MSG << std::endl;
MSG << "Robustness of individual results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
MSG << "Robustness of individual results, in %. (rob = 100% - std dev / mean)" << std::endl;
MSG << std::endl;
grid_printf("%4s %12s %12s %12s %12s\n",
"L", "std read", "std write", "Grid read", "Grid write");
@ -185,7 +185,7 @@ int main (int argc, char ** argv)
avMean(sRead), avStdDev(sRead), avMean(sWrite), avStdDev(sWrite),
avMean(gRead), avStdDev(gRead), avMean(gWrite), avStdDev(gWrite));
MSG << std::endl;
MSG << "Robustness of volume-averaged results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
MSG << "Robustness of volume-averaged results, in %. (rob = 100% - std dev / mean)" << std::endl;
MSG << std::endl;
grid_printf("%12s %12s %12s %12s\n",
"std read", "std write", "Grid read", "Grid write");

View File

@ -142,7 +142,7 @@ public:
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
}
int ncomm;
// int ncomm;
double dbytes;
for(int dir=0;dir<8;dir++) {
@ -290,7 +290,7 @@ public:
LatticeSU4 z(&Grid); z=Zero();
LatticeSU4 x(&Grid); x=Zero();
LatticeSU4 y(&Grid); y=Zero();
double a=2.0;
// double a=2.0;
uint64_t Nloop=NLOOP;

View File

@ -72,7 +72,7 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "Number of iterations to average: "<< Nloop << std::endl;
std::vector<double> t_time(Nloop);
time_statistics timestat;
// time_statistics timestat;
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
std::cout<<GridLogMessage << "= Benchmarking sequential halo exchange from host memory "<<std::endl;

View File

@ -184,8 +184,10 @@ int main (int argc, char ** argv)
double bytes=1.0*vol*Nvec*sizeof(Real);
double flops=vol*Nvec*2;// mul,add
std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"<<bytes<<" \t\t"<<bytes/time<<"\t\t"<<flops/time<< "\t\t"<<(stop-start)/1000./1000.<< "\t\t " <<std::endl;
std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"
<<bytes<<" \t\t"<<bytes/time<<"\t\t"<<flops/time<< "\t\t"
<<(stop-start)/1000./1000.<< "\t\t " <<std::endl;
assert(nn==nn);
}
Grid_finalize();

View File

@ -9,6 +9,7 @@ using namespace std;
using namespace Grid;
typedef SpinColourMatrix Propagator;
typedef SpinColourVector Fermion;
typedef PeriodicGimplR GimplR;
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
{
@ -55,6 +56,16 @@ void MakePhase(Coordinate mom,LatticeComplex &phase)
}
phase = exp(phase*ci);
}
void LinkSmear(int nstep, RealD rho,LatticeGaugeField &Uin,LatticeGaugeField &Usmr)
{
Smear_Stout<GimplR> Stout(rho);
LatticeGaugeField Utmp(Uin.Grid());
Utmp = Uin;
for(int i=0;i<nstep;i++){
Stout.smear(Usmr,Utmp);
Utmp = Usmr;
}
}
void PointSource(Coordinate &coor,LatticePropagator &source)
{
// Coordinate coor({0,0,0,0});
@ -97,23 +108,23 @@ void GaugeFix(LatticeGaugeField &U,LatticeGaugeField &Ufix)
{
Real alpha=0.05;
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(U);
Real plaq=WilsonLoops<GimplR>::avgPlaquette(U);
std::cout << " Initial plaquette "<<plaq << std::endl;
LatticeColourMatrix xform(U.Grid());
Ufix = U;
int orthog=Nd-1;
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Ufix,xform,alpha,10000,1.0e-12, 1.0e-12,true,orthog);
FourierAcceleratedGaugeFixer<GimplR>::SteepestDescentGaugeFix(Ufix,xform,alpha,100000,1.0e-14, 1.0e-14,true,orthog);
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Ufix);
plaq=WilsonLoops<GimplR>::avgPlaquette(Ufix);
std::cout << " Final plaquette "<<plaq << std::endl;
}
template<class Field>
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
{
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
typedef CovariantLaplacianCshift <GimplR,Field> Laplacian_t;
Laplacian_t Laplacian(U);
Integer Iterations = 40;
@ -167,19 +178,21 @@ void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
GridBase *UGrid = D.GaugeGrid();
GridBase *FGrid = D.FermionGrid();
LatticeFermion src4 (UGrid);
LatticeFermion src4 (UGrid); src4 = Zero();
LatticeFermion src5 (FGrid);
LatticeFermion result5(FGrid);
LatticeFermion result4(UGrid);
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
ConjugateGradient<LatticeFermion> CG(1.0e-12,100000);
SchurRedBlackDiagTwoSolve<LatticeFermion> schur(CG);
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
std::cout<<GridLogMessage<< " source4 "<<norm2(source)<<std::endl;
for(int s=0;s<Nd;s++){
for(int c=0;c<Nc;c++){
PropToFerm<Action>(src4,source,s,c);
std::cout<<GridLogMessage<< s<<c<<" src4 "<<norm2(src4)<<std::endl;
D.ImportPhysicalFermionSource(src4,src5);
std::cout<<GridLogMessage<< s<<c<<" src5 "<<norm2(src5)<<std::endl;
result5=Zero();
schur(D,src5,result5,ZG);
@ -287,15 +300,10 @@ int main (int argc, char ** argv)
GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
//////////////////////////////////////////////////////////////////////
// You can manage seeds however you like.
// Recommend SeedUniqueString.
//////////////////////////////////////////////////////////////////////
std::vector<int> seeds4({1,2,3,4});
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
LatticeGaugeField Umu(UGrid);
LatticeGaugeField Ufixed(UGrid);
LatticeGaugeField Utmp(UGrid);
LatticeGaugeField Usmr(UGrid);
std::string config;
if( argc > 1 && argv[1][0] != '-' )
{
@ -308,13 +316,20 @@ int main (int argc, char ** argv)
{
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
SU<Nc>::ColdConfiguration(Umu);
// SU<Nc>::HotConfiguration(RNG4,Umu);
config="HotConfig";
config="ColdConfig";
}
GaugeFix(Umu,Ufixed);
Umu=Ufixed;
// GaugeFix(Umu,Utmp);
// Umu=Utmp;
int nsmr=3;
RealD rho=0.1;
RealD plaq_gf =WilsonLoops<GimplR>::avgPlaquette(Umu);
LinkSmear(nsmr,rho,Umu,Usmr);
RealD plaq_smr=WilsonLoops<GimplR>::avgPlaquette(Usmr);
std::cout << GridLogMessage << " GF Plaquette " <<plaq_gf<<std::endl;
std::cout << GridLogMessage << " SM Plaquette " <<plaq_smr<<std::endl;
std::vector<int> smeared_link({ 0,0,1} );
std::vector<RealD> masses({ 0.004,0.02477,0.447} ); // u/d, s, c ??
std::vector<RealD> M5s ({ 1.8,1.8,1.0} );
std::vector<RealD> bs ({ 1.0,1.0,1.5} ); // DDM
@ -330,6 +345,9 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
std::cout<<GridLogMessage <<"======================"<<std::endl;
std::vector<Complex> boundary = {1,1,1,-1};
typedef MobiusFermionR FermionAction;
FermionAction::ImplParams Params(boundary);
for(int m=0;m<masses.size();m++) {
@ -339,31 +357,41 @@ int main (int argc, char ** argv)
RealD c = cs[m];
int Ls = Ls_s[m];
if ( smeared_link[m] ) Utmp = Usmr;
else Utmp = Umu;
FGrids.push_back(SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid));
FrbGrids.push_back(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid));
FermActs.push_back(new MobiusFermionR(Umu,*FGrids[m],*FrbGrids[m],*UGrid,*UrbGrid,mass,M5,b,c));
FermActs.push_back(new MobiusFermionR(Utmp,*FGrids[m],*FrbGrids[m],*UGrid,*UrbGrid,mass,M5,b,c,Params));
}
LatticePropagator point_source(UGrid);
LatticePropagator z2wall_source(UGrid);
LatticePropagator gfwall_source(UGrid);
Coordinate Origin({0,0,0,0});
PointSource (Origin,point_source);
Z2WallSource (RNG4,0,z2wall_source);
GFWallSource (0,gfwall_source);
int tslice = 0;
//////////////////////////////////////////////////////////////////////
// RNG seeded for Z2 wall
//////////////////////////////////////////////////////////////////////
// You can manage seeds however you like.
// Recommend SeedUniqueString.
//////////////////////////////////////////////////////////////////////
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString("Study2-Source_Z2_p_0_0_0_t_0-880");
Z2WallSource (RNG4,tslice,z2wall_source);
GFWallSource (tslice,gfwall_source);
std::vector<LatticePropagator> PointProps(nmass,UGrid);
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
std::vector<LatticePropagator> GFProps (nmass,UGrid);
for(int m=0;m<nmass;m++) {
std::cout << GridLogMessage << " Mass " <<m << " z2wall source "<<norm2(z2wall_source)<<std::endl;
Solve(*FermActs[m],z2wall_source ,Z2Props[m]);
std::cout << GridLogMessage << " Mass " <<m << " gfwall source "<<norm2(gfwall_source)<<std::endl;
Solve(*FermActs[m],gfwall_source ,GFProps[m]);
std::cout << GridLogMessage << " Mass " <<m << " z2wall source "<<norm2(z2wall_source)<< " " << norm2(gfwall_source)<<std::endl;
}
LatticeComplex phase(UGrid);
@ -383,14 +411,15 @@ int main (int argc, char ** argv)
std::stringstream wssg,wssz;
/// Point sinks
ssg<<config<< "_m" << m1 << "_m"<< m2 << "p_gf_meson.xml";
ssz<<config<< "_m" << m1 << "_m"<< m2 << "p_z2_meson.xml";
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_p_gf_meson.xml";
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_p_z2_meson.xml";
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
MesonTrace(ssg.str(),GFProps[m1],GFProps[m2],phase);
/// Wall sinks
wssg<<config<< "_m" << m1 << "_m"<< m2 << "w_gf_meson.xml";
wssz<<config<< "_m" << m1 << "_m"<< m2 << "w_z2_meson.xml";
wssg<<config<< "_m" << m1 << "_m"<< m2 << "_w_gf_meson.xml";
wssz<<config<< "_m" << m1 << "_m"<< m2 << "_w_z2_meson.xml";
WallSinkMesonTrace(wssg.str(),wsnk_gfProps[m1],wsnk_gfProps[m2]);
WallSinkMesonTrace(wssz.str(),wsnk_z2Props[m1],wsnk_z2Props[m2]);

View File

@ -1,5 +1,6 @@
module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1
module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1
module load cuda/11.4.1 openmpi/4.1.1-cuda11.4.1 ucx/1.12.0-cuda11.4.1
#module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1
export PREFIX=/home/tc002/tc002/shared/env/prefix/
export LD_LIBRARY_PATH=$PREFIX/lib/:$LD_LIBRARY_PATH
unset SBATCH_EXPORT