mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-23 18:22:02 +01:00
Compare commits
96 Commits
gauge-grou
...
bb5c16b97f
Author | SHA1 | Date | |
---|---|---|---|
bb5c16b97f | |||
0d80eeb545 | |||
b0f4eee78b | |||
5340e50427 | |||
0f1c5b08a1 | |||
70988e43d2 | |||
aab3bcb46f | |||
da06d15f73 | |||
e8b1251b8c | |||
fad5a74a4b | |||
e83f6a6ae9 | |||
0c1618197f | |||
f49d5c2d22 | |||
a3b022d469 | |||
48772f0976 | |||
c322420580 | |||
6283d11d50 | |||
86f4e17928 | |||
6616d5d090 | |||
215df671be | |||
1b6b12589f | |||
3082ab8252 | |||
add86cd7f4 | |||
0b6fd20c54 | |||
e83423fee6 | |||
b4f8e87982 | |||
135808dcfa | |||
7f7d06d963 | |||
2bf3b4d576 | |||
f34d34bd17 | |||
e32d5141b4 | |||
6d5277f2d7 | |||
14d82777e0 | |||
2a4e739513 | |||
8079dc2a14 | |||
6ceb556684 | |||
76cde73705 | |||
cc094366a9 | |||
41a575ff9b | |||
12ef413065 | |||
829a328451 | |||
402523c62e | |||
d7bef70b5c | |||
2ad1811642 | |||
a65a497bae | |||
b27b12828e | |||
fe9edf8526 | |||
44204c7e06 | |||
33b3789598 | |||
195ab2888d | |||
85f750d753 | |||
a4ce6e42c7 | |||
5398b7e7e3 | |||
fd13a3f2be | |||
c144b32368 | |||
ba7e371b90 | |||
99e7a5d18a | |||
f824d99059 | |||
749b8022a4 | |||
7e0057d2c4 | |||
cfe9e870d3 | |||
e9c4f06cbf | |||
1f9688417a | |||
16c2a99965 | |||
cda915a345 | |||
7c16189e16 | |||
ecbfccea43 | |||
a8eda8f6da | |||
9b1a0653cf | |||
7cb1ff7395 | |||
ab6ea29913 | |||
b5c81a02b6 | |||
d899ee80fc | |||
4016e705fc | |||
2f4e85e5d6 | |||
8ed0b57b09 | |||
7e130076d6 | |||
6efdad6f21 | |||
a822c48565 | |||
014fb76e88 | |||
30e5311b43 | |||
11ee8a1061 | |||
770680669d | |||
0cdfc5cf22 | |||
428b8ba907 | |||
54c6b1376d | |||
f3f11b586f | |||
8083e3f7e8 | |||
364793154b | |||
3e2ae1e9af | |||
d38ae2fd18 | |||
030e7754e4 | |||
3b7fce1e76 | |||
4d15417f93 | |||
ab3c855f65 | |||
92e2c517d8 |
@ -358,7 +358,7 @@ public:
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||
auto& geom_v = geom;
|
||||
int npoint = geom.npoint;
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
@ -380,7 +380,7 @@ public:
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int point=0;point<geom_v.npoint;point++){
|
||||
for(int point=0;point<npoint;point++){
|
||||
|
||||
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||
|
||||
@ -424,7 +424,7 @@ public:
|
||||
autoView( in_v , in, AcceleratorRead);
|
||||
autoView( out_v , out, AcceleratorWrite);
|
||||
autoView( Stencil_v , Stencil, AcceleratorRead);
|
||||
auto& geom_v = geom;
|
||||
int npoint = geom.npoint;
|
||||
typedef LatticeView<Cobj> Aview;
|
||||
|
||||
Vector<Aview> AcceleratorViewContainer;
|
||||
@ -454,7 +454,7 @@ public:
|
||||
int ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
for(int p=0;p<geom_v.npoint;p++){
|
||||
for(int p=0;p<npoint;p++){
|
||||
int point = points_p[p];
|
||||
|
||||
SE=Stencil_v.GetEntry(ptype,point,ss);
|
||||
|
@ -52,6 +52,7 @@ public:
|
||||
virtual void AdjOp (const Field &in, Field &out) = 0; // Abstract base
|
||||
virtual void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2)=0;
|
||||
virtual void HermOp(const Field &in, Field &out)=0;
|
||||
virtual ~LinearOperatorBase(){};
|
||||
};
|
||||
|
||||
|
||||
@ -507,7 +508,7 @@ class SchurStaggeredOperator : public SchurOperatorBase<Field> {
|
||||
virtual void MpcDag (const Field &in, Field &out){
|
||||
Mpc(in,out);
|
||||
}
|
||||
virtual void MpcDagMpc(const Field &in, Field &out,RealD &ni,RealD &no) {
|
||||
virtual void MpcDagMpc(const Field &in, Field &out) {
|
||||
assert(0);// Never need with staggered
|
||||
}
|
||||
};
|
||||
@ -585,6 +586,7 @@ class HermOpOperatorFunction : public OperatorFunction<Field> {
|
||||
template<typename Field>
|
||||
class PlainHermOp : public LinearFunction<Field> {
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
LinearOperatorBase<Field> &_Linop;
|
||||
|
||||
PlainHermOp(LinearOperatorBase<Field>& linop) : _Linop(linop)
|
||||
@ -598,6 +600,7 @@ public:
|
||||
template<typename Field>
|
||||
class FunctionHermOp : public LinearFunction<Field> {
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
OperatorFunction<Field> & _poly;
|
||||
LinearOperatorBase<Field> &_Linop;
|
||||
|
||||
|
@ -30,13 +30,19 @@ Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Field> class Preconditioner : public LinearFunction<Field> {
|
||||
template<class Field> using Preconditioner = LinearFunction<Field> ;
|
||||
|
||||
/*
|
||||
template<class Field> class Preconditioner : public LinearFunction<Field> {
|
||||
using LinearFunction<Field>::operator();
|
||||
virtual void operator()(const Field &src, Field & psi)=0;
|
||||
};
|
||||
*/
|
||||
|
||||
template<class Field> class TrivialPrecon : public Preconditioner<Field> {
|
||||
public:
|
||||
void operator()(const Field &src, Field & psi){
|
||||
using Preconditioner<Field>::operator();
|
||||
virtual void operator()(const Field &src, Field & psi){
|
||||
psi = src;
|
||||
}
|
||||
TrivialPrecon(void){};
|
||||
|
@ -48,6 +48,7 @@ public:
|
||||
virtual void Mdiag (const Field &in, Field &out)=0;
|
||||
virtual void Mdir (const Field &in, Field &out,int dir, int disp)=0;
|
||||
virtual void MdirAll (const Field &in, std::vector<Field> &out)=0;
|
||||
virtual ~SparseMatrixBase() {};
|
||||
};
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -72,7 +73,7 @@ public:
|
||||
virtual void MeooeDag (const Field &in, Field &out)=0;
|
||||
virtual void MooeeDag (const Field &in, Field &out)=0;
|
||||
virtual void MooeeInvDag (const Field &in, Field &out)=0;
|
||||
|
||||
virtual ~CheckerBoardedSparseMatrixBase() {};
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -36,7 +36,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
template<class FieldD, class FieldF, typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0, typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
|
||||
class MixedPrecisionBiCGSTAB : public LinearFunction<FieldD>
|
||||
{
|
||||
public:
|
||||
public:
|
||||
using LinearFunction<FieldD>::operator();
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; // Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
|
@ -35,7 +35,8 @@ NAMESPACE_BEGIN(Grid);
|
||||
typename std::enable_if< getPrecision<FieldD>::value == 2, int>::type = 0,
|
||||
typename std::enable_if< getPrecision<FieldF>::value == 1, int>::type = 0>
|
||||
class MixedPrecisionConjugateGradient : public LinearFunction<FieldD> {
|
||||
public:
|
||||
public:
|
||||
using LinearFunction<FieldD>::operator();
|
||||
RealD Tolerance;
|
||||
RealD InnerTolerance; //Initial tolerance for inner CG. Defaults to Tolerance but can be changed
|
||||
Integer MaxInnerIterations;
|
||||
|
@ -33,16 +33,19 @@ namespace Grid {
|
||||
template<class Field>
|
||||
class ZeroGuesser: public LinearFunction<Field> {
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
virtual void operator()(const Field &src, Field &guess) { guess = Zero(); };
|
||||
};
|
||||
template<class Field>
|
||||
class DoNothingGuesser: public LinearFunction<Field> {
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
virtual void operator()(const Field &src, Field &guess) { };
|
||||
};
|
||||
template<class Field>
|
||||
class SourceGuesser: public LinearFunction<Field> {
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
virtual void operator()(const Field &src, Field &guess) { guess = src; };
|
||||
};
|
||||
|
||||
@ -57,6 +60,7 @@ private:
|
||||
const unsigned int N;
|
||||
|
||||
public:
|
||||
using LinearFunction<Field>::operator();
|
||||
|
||||
DeflatedGuesser(const std::vector<Field> & _evec,const std::vector<RealD> & _eval)
|
||||
: DeflatedGuesser(_evec, _eval, _evec.size())
|
||||
@ -87,6 +91,7 @@ private:
|
||||
const std::vector<RealD> &eval_coarse;
|
||||
public:
|
||||
|
||||
using LinearFunction<FineField>::operator();
|
||||
LocalCoherenceDeflatedGuesser(const std::vector<FineField> &_subspace,
|
||||
const std::vector<CoarseField> &_evec_coarse,
|
||||
const std::vector<RealD> &_eval_coarse)
|
||||
|
@ -67,6 +67,7 @@ public:
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class ProjectedHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
|
||||
public:
|
||||
using LinearFunction<Lattice<iVector<CComplex,nbasis > > >::operator();
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
@ -97,6 +98,7 @@ public:
|
||||
template<class Fobj,class CComplex,int nbasis>
|
||||
class ProjectedFunctionHermOp : public LinearFunction<Lattice<iVector<CComplex,nbasis > > > {
|
||||
public:
|
||||
using LinearFunction<Lattice<iVector<CComplex,nbasis > > >::operator();
|
||||
typedef iVector<CComplex,nbasis > CoarseSiteVector;
|
||||
typedef Lattice<CoarseSiteVector> CoarseField;
|
||||
typedef Lattice<CComplex> CoarseScalar; // used for inner products on fine field
|
||||
|
@ -43,7 +43,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
template<class Field>
|
||||
class PrecGeneralisedConjugateResidual : public LinearFunction<Field> {
|
||||
public:
|
||||
|
||||
using LinearFunction<Field>::operator();
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
int verbose;
|
||||
|
@ -43,7 +43,7 @@ NAMESPACE_BEGIN(Grid);
|
||||
template<class Field>
|
||||
class PrecGeneralisedConjugateResidualNonHermitian : public LinearFunction<Field> {
|
||||
public:
|
||||
|
||||
using LinearFunction<Field>::operator();
|
||||
RealD Tolerance;
|
||||
Integer MaxIterations;
|
||||
int verbose;
|
||||
@ -119,7 +119,8 @@ public:
|
||||
RealD GCRnStep(const Field &src, Field &psi,RealD rsq){
|
||||
|
||||
RealD cp;
|
||||
ComplexD a, b, zAz;
|
||||
ComplexD a, b;
|
||||
// ComplexD zAz;
|
||||
RealD zAAz;
|
||||
ComplexD rq;
|
||||
|
||||
@ -146,7 +147,7 @@ public:
|
||||
//////////////////////////////////
|
||||
MatTimer.Start();
|
||||
Linop.Op(psi,Az);
|
||||
zAz = innerProduct(Az,psi);
|
||||
// zAz = innerProduct(Az,psi);
|
||||
zAAz= norm2(Az);
|
||||
MatTimer.Stop();
|
||||
|
||||
@ -170,7 +171,7 @@ public:
|
||||
|
||||
LinalgTimer.Start();
|
||||
|
||||
zAz = innerProduct(Az,psi);
|
||||
// zAz = innerProduct(Az,psi);
|
||||
zAAz= norm2(Az);
|
||||
|
||||
//p[0],q[0],qq[0]
|
||||
@ -212,7 +213,7 @@ public:
|
||||
MatTimer.Start();
|
||||
Linop.Op(z,Az);
|
||||
MatTimer.Stop();
|
||||
zAz = innerProduct(Az,psi);
|
||||
// zAz = innerProduct(Az,psi);
|
||||
zAAz= norm2(Az);
|
||||
|
||||
LinalgTimer.Start();
|
||||
|
@ -9,14 +9,30 @@ NAMESPACE_BEGIN(Grid);
|
||||
#define AccSmall (3)
|
||||
#define Shared (4)
|
||||
#define SharedSmall (5)
|
||||
#undef GRID_MM_VERBOSE
|
||||
uint64_t total_shared;
|
||||
uint64_t total_device;
|
||||
uint64_t total_host;;
|
||||
void MemoryManager::PrintBytes(void)
|
||||
{
|
||||
std::cout << " MemoryManager : "<<total_shared<<" shared bytes "<<std::endl;
|
||||
std::cout << " MemoryManager : "<<total_device<<" accelerator bytes "<<std::endl;
|
||||
std::cout << " MemoryManager : "<<total_host <<" cpu bytes "<<std::endl;
|
||||
std::cout << " MemoryManager : ------------------------------------ "<<std::endl;
|
||||
std::cout << " MemoryManager : PrintBytes "<<std::endl;
|
||||
std::cout << " MemoryManager : ------------------------------------ "<<std::endl;
|
||||
std::cout << " MemoryManager : "<<(total_shared>>20)<<" shared Mbytes "<<std::endl;
|
||||
std::cout << " MemoryManager : "<<(total_device>>20)<<" accelerator Mbytes "<<std::endl;
|
||||
std::cout << " MemoryManager : "<<(total_host>>20) <<" cpu Mbytes "<<std::endl;
|
||||
uint64_t cacheBytes;
|
||||
cacheBytes = CacheBytes[Cpu];
|
||||
std::cout << " MemoryManager : "<<(cacheBytes>>20) <<" cpu cache Mbytes "<<std::endl;
|
||||
cacheBytes = CacheBytes[Acc];
|
||||
std::cout << " MemoryManager : "<<(cacheBytes>>20) <<" acc cache Mbytes "<<std::endl;
|
||||
cacheBytes = CacheBytes[Shared];
|
||||
std::cout << " MemoryManager : "<<(cacheBytes>>20) <<" shared cache Mbytes "<<std::endl;
|
||||
|
||||
#ifdef GRID_CUDA
|
||||
cuda_mem();
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -24,86 +40,114 @@ void MemoryManager::PrintBytes(void)
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
||||
int MemoryManager::Victim[MemoryManager::NallocType];
|
||||
int MemoryManager::Ncache[MemoryManager::NallocType] = { 8, 32, 8, 32, 8, 32 };
|
||||
|
||||
int MemoryManager::Ncache[MemoryManager::NallocType] = { 2, 8, 2, 8, 2, 8 };
|
||||
uint64_t MemoryManager::CacheBytes[MemoryManager::NallocType];
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Actual allocation and deallocation utils
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
void *MemoryManager::AcceleratorAllocate(size_t bytes)
|
||||
{
|
||||
total_device+=bytes;
|
||||
void *ptr = (void *) Lookup(bytes,Acc);
|
||||
if ( ptr == (void *) NULL ) {
|
||||
ptr = (void *) acceleratorAllocDevice(bytes);
|
||||
total_device+=bytes;
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"AcceleratorAllocate "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void MemoryManager::AcceleratorFree (void *ptr,size_t bytes)
|
||||
{
|
||||
total_device-=bytes;
|
||||
void *__freeme = Insert(ptr,bytes,Acc);
|
||||
if ( __freeme ) {
|
||||
acceleratorFreeDevice(__freeme);
|
||||
total_device-=bytes;
|
||||
// PrintBytes();
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"AcceleratorFree "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
}
|
||||
void *MemoryManager::SharedAllocate(size_t bytes)
|
||||
{
|
||||
total_shared+=bytes;
|
||||
void *ptr = (void *) Lookup(bytes,Shared);
|
||||
if ( ptr == (void *) NULL ) {
|
||||
ptr = (void *) acceleratorAllocShared(bytes);
|
||||
total_shared+=bytes;
|
||||
// std::cout <<"AcceleratorAllocate: allocated Shared pointer "<<std::hex<<ptr<<std::dec<<std::endl;
|
||||
// PrintBytes();
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"SharedAllocate "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void MemoryManager::SharedFree (void *ptr,size_t bytes)
|
||||
{
|
||||
total_shared-=bytes;
|
||||
void *__freeme = Insert(ptr,bytes,Shared);
|
||||
if ( __freeme ) {
|
||||
acceleratorFreeShared(__freeme);
|
||||
total_shared-=bytes;
|
||||
// PrintBytes();
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"SharedFree "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
}
|
||||
#ifdef GRID_UVM
|
||||
void *MemoryManager::CpuAllocate(size_t bytes)
|
||||
{
|
||||
total_host+=bytes;
|
||||
void *ptr = (void *) Lookup(bytes,Cpu);
|
||||
if ( ptr == (void *) NULL ) {
|
||||
ptr = (void *) acceleratorAllocShared(bytes);
|
||||
total_host+=bytes;
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"CpuAllocate "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void MemoryManager::CpuFree (void *_ptr,size_t bytes)
|
||||
{
|
||||
total_host-=bytes;
|
||||
NotifyDeletion(_ptr);
|
||||
void *__freeme = Insert(_ptr,bytes,Cpu);
|
||||
if ( __freeme ) {
|
||||
acceleratorFreeShared(__freeme);
|
||||
total_host-=bytes;
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"CpuFree "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
void *MemoryManager::CpuAllocate(size_t bytes)
|
||||
{
|
||||
total_host+=bytes;
|
||||
void *ptr = (void *) Lookup(bytes,Cpu);
|
||||
if ( ptr == (void *) NULL ) {
|
||||
ptr = (void *) acceleratorAllocCpu(bytes);
|
||||
total_host+=bytes;
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"CpuAllocate "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
void MemoryManager::CpuFree (void *_ptr,size_t bytes)
|
||||
{
|
||||
total_host-=bytes;
|
||||
NotifyDeletion(_ptr);
|
||||
void *__freeme = Insert(_ptr,bytes,Cpu);
|
||||
if ( __freeme ) {
|
||||
acceleratorFreeCpu(__freeme);
|
||||
total_host-=bytes;
|
||||
}
|
||||
#ifdef GRID_MM_VERBOSE
|
||||
std::cout <<"CpuFree "<<std::endl;
|
||||
PrintBytes();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -115,7 +159,6 @@ void MemoryManager::Init(void)
|
||||
|
||||
char * str;
|
||||
int Nc;
|
||||
int NcS;
|
||||
|
||||
str= getenv("GRID_ALLOC_NCACHE_LARGE");
|
||||
if ( str ) {
|
||||
@ -181,13 +224,13 @@ void *MemoryManager::Insert(void *ptr,size_t bytes,int type)
|
||||
#ifdef ALLOCATION_CACHE
|
||||
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
|
||||
int cache = type + small;
|
||||
return Insert(ptr,bytes,Entries[cache],Ncache[cache],Victim[cache]);
|
||||
return Insert(ptr,bytes,Entries[cache],Ncache[cache],Victim[cache],CacheBytes[cache]);
|
||||
#else
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
|
||||
void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim)
|
||||
void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim, uint64_t &cacheBytes)
|
||||
{
|
||||
assert(ncache>0);
|
||||
#ifdef GRID_OMP
|
||||
@ -211,6 +254,7 @@ void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries
|
||||
|
||||
if ( entries[v].valid ) {
|
||||
ret = entries[v].address;
|
||||
cacheBytes -= entries[v].bytes;
|
||||
entries[v].valid = 0;
|
||||
entries[v].address = NULL;
|
||||
entries[v].bytes = 0;
|
||||
@ -219,6 +263,7 @@ void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries
|
||||
entries[v].address=ptr;
|
||||
entries[v].bytes =bytes;
|
||||
entries[v].valid =1;
|
||||
cacheBytes += bytes;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -228,13 +273,13 @@ void *MemoryManager::Lookup(size_t bytes,int type)
|
||||
#ifdef ALLOCATION_CACHE
|
||||
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
|
||||
int cache = type+small;
|
||||
return Lookup(bytes,Entries[cache],Ncache[cache]);
|
||||
return Lookup(bytes,Entries[cache],Ncache[cache],CacheBytes[cache]);
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
void *MemoryManager::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache)
|
||||
void *MemoryManager::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t & cacheBytes)
|
||||
{
|
||||
assert(ncache>0);
|
||||
#ifdef GRID_OMP
|
||||
@ -243,6 +288,7 @@ void *MemoryManager::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncach
|
||||
for(int e=0;e<ncache;e++){
|
||||
if ( entries[e].valid && ( entries[e].bytes == bytes ) ) {
|
||||
entries[e].valid = 0;
|
||||
cacheBytes -= entries[e].bytes;
|
||||
return entries[e].address;
|
||||
}
|
||||
}
|
||||
|
@ -82,14 +82,15 @@ private:
|
||||
static AllocationCacheEntry Entries[NallocType][NallocCacheMax];
|
||||
static int Victim[NallocType];
|
||||
static int Ncache[NallocType];
|
||||
static uint64_t CacheBytes[NallocType];
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Free pool
|
||||
/////////////////////////////////////////////////
|
||||
static void *Insert(void *ptr,size_t bytes,int type) ;
|
||||
static void *Lookup(size_t bytes,int type) ;
|
||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
|
||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache) ;
|
||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim,uint64_t &cbytes) ;
|
||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t &cbytes) ;
|
||||
|
||||
static void PrintBytes(void);
|
||||
public:
|
||||
@ -169,6 +170,7 @@ private:
|
||||
|
||||
public:
|
||||
static void Print(void);
|
||||
static void PrintState( void* CpuPtr);
|
||||
static int isOpen (void* CpuPtr);
|
||||
static void ViewClose(void* CpuPtr,ViewMode mode);
|
||||
static void *ViewOpen (void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#warning "Using explicit device memory copies"
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
//define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
||||
//#define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
||||
#define dprintf(...)
|
||||
|
||||
|
||||
@ -429,6 +429,7 @@ void MemoryManager::NotifyDeletion(void *_ptr)
|
||||
}
|
||||
void MemoryManager::Print(void)
|
||||
{
|
||||
PrintBytes();
|
||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||
std::cout << GridLogDebug << "Memory Manager " << std::endl;
|
||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||
@ -473,6 +474,32 @@ int MemoryManager::isOpen (void* _CpuPtr)
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryManager::PrintState(void* _CpuPtr)
|
||||
{
|
||||
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
||||
|
||||
if ( EntryPresent(CpuPtr) ){
|
||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||
auto & AccCache = AccCacheIterator->second;
|
||||
std::string str;
|
||||
if ( AccCache.state==Empty ) str = std::string("Empty");
|
||||
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
|
||||
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
||||
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
||||
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
|
||||
|
||||
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
||||
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||
<< "\t" << AccCache.cpuLock
|
||||
<< "\t" << AccCache.accLock
|
||||
<< "\t" << AccCache.LRU_valid<<std::endl;
|
||||
|
||||
} else {
|
||||
std::cout << GridLogMessage << "No Entry in AccCache table." << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
||||
|
@ -16,6 +16,10 @@ uint64_t MemoryManager::DeviceToHostXfer;
|
||||
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
||||
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
|
||||
void MemoryManager::PrintState(void* CpuPtr)
|
||||
{
|
||||
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
|
||||
};
|
||||
void MemoryManager::Print(void){};
|
||||
void MemoryManager::NotifyDeletion(void *ptr){};
|
||||
|
||||
|
@ -53,10 +53,11 @@ public:
|
||||
// Communicator should know nothing of the physics grid, only processor grid.
|
||||
////////////////////////////////////////////
|
||||
int _Nprocessors; // How many in all
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
int _processor; // linear processor rank
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
unsigned long _ndimension;
|
||||
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||
Coordinate _processor_coor; // linear processor coordinate
|
||||
static Grid_MPI_Comm communicator_world;
|
||||
Grid_MPI_Comm communicator;
|
||||
std::vector<Grid_MPI_Comm> communicator_halo;
|
||||
@ -97,8 +98,9 @@ public:
|
||||
int BossRank(void) ;
|
||||
int ThisRank(void) ;
|
||||
const Coordinate & ThisProcessorCoor(void) ;
|
||||
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
|
||||
const Coordinate & ProcessorGrid(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
int ProcessorCount(void) ;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// very VERY rarely (Log, serial RNG) we need world without a grid
|
||||
@ -142,16 +144,16 @@ public:
|
||||
int bytes);
|
||||
|
||||
double StencilSendToRecvFrom(void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int do_xmit,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int do_recv,
|
||||
int bytes,int dir);
|
||||
|
||||
|
||||
|
@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
// Remap using the shared memory optimising routine
|
||||
// The remap creates a comm which must be freed
|
||||
////////////////////////////////////////////////////
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
|
||||
InitFromMPICommunicator(processors,optimal_comm);
|
||||
SetCommunicator(optimal_comm);
|
||||
///////////////////////////////////////////////////
|
||||
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||
Coordinate parent_processor_coor(_ndimension,0);
|
||||
Coordinate parent_processors (_ndimension,1);
|
||||
|
||||
Coordinate shm_processors (_ndimension,1);
|
||||
// Can make 5d grid from 4d etc...
|
||||
int pad = _ndimension-parent_ndimension;
|
||||
for(int d=0;d<parent_ndimension;d++){
|
||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||
parent_processors [pad+d]=parent._processors[d];
|
||||
shm_processors [pad+d]=parent._shm_processors[d];
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||
ssize[d] = parent_processors[d] / processors[d];
|
||||
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
|
||||
}
|
||||
|
||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
||||
}
|
||||
// Basic Halo comms primitive
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int dest,
|
||||
int dest, int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from, int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
std::vector<CommsRequest_t> list;
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
|
||||
StencilSendToRecvFromComplete(list,dir);
|
||||
return offbytes;
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int dest,
|
||||
int dest,int dox,
|
||||
void *recv,
|
||||
int from,
|
||||
int from,int dor,
|
||||
int bytes,int dir)
|
||||
{
|
||||
int ncomm =communicator_halo.size();
|
||||
@ -370,28 +372,32 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
double off_node_bytes=0.0;
|
||||
int tag;
|
||||
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
if ( dox ) {
|
||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+from*32;
|
||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(rrq);
|
||||
off_node_bytes+=bytes;
|
||||
}
|
||||
}
|
||||
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
|
||||
if (dor) {
|
||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||
tag= dir+_processor*32;
|
||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||
assert(ierr==0);
|
||||
list.push_back(xrq);
|
||||
off_node_bytes+=bytes;
|
||||
} else {
|
||||
// TODO : make a OMP loop on CPU, call threaded bcopy
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
acceleratorCopySynchronise(); // MPI prob slower
|
||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||
assert(shm!=NULL);
|
||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||
this->StencilSendToRecvFromComplete(list,dir);
|
||||
}
|
||||
@ -400,6 +406,9 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
||||
}
|
||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
||||
{
|
||||
// std::cout << "Copy Synchronised\n"<<std::endl;
|
||||
acceleratorCopySynchronise();
|
||||
|
||||
int nreq=list.size();
|
||||
|
||||
if (nreq==0) return;
|
||||
|
@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||
: CartesianCommunicator(processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
srank=0;
|
||||
SetCommunicator(communicator_world);
|
||||
}
|
||||
|
||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||
{
|
||||
_shm_processors = Coordinate(processors.size(),1);
|
||||
_processors = processors;
|
||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||
_processor_coor.resize(_ndimension);
|
||||
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
||||
}
|
||||
|
||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
}
|
||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||
void *xmit,
|
||||
int xmit_to_rank,
|
||||
int xmit_to_rank,int dox,
|
||||
void *recv,
|
||||
int recv_from_rank,
|
||||
int recv_from_rank,int dor,
|
||||
int bytes, int dir)
|
||||
{
|
||||
return 2.0*bytes;
|
||||
|
@ -93,9 +93,10 @@ public:
|
||||
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
||||
//////////////////////////////////////////////////////////////////////////////////////
|
||||
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
// Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
||||
///////////////////////////////////////////////////
|
||||
// Provide shared memory facilities off comm world
|
||||
|
@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
|
||||
}
|
||||
return log2size;
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
||||
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
||||
gethostname(name,namelen);
|
||||
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
||||
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
|
||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
|
||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
|
||||
}
|
||||
static inline int divides(int a,int b)
|
||||
{
|
||||
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
||||
dim=(dim+1) %ndimension;
|
||||
}
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Assert power of two shm_size.
|
||||
@ -294,7 +294,8 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
Coordinate HyperCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
|
||||
SHM = ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
||||
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
||||
assert(ierr==0);
|
||||
}
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Identify subblock of ranks on node spreading across dims
|
||||
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
|
||||
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
||||
|
||||
GetShmDims(WorldDims,ShmDims);
|
||||
SHM=ShmDims;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Establish torus of processes and nodes with sub-blockings
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
||||
_ShmSetup=1;
|
||||
}
|
||||
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||
{
|
||||
optimal_comm = WorldComm;
|
||||
SHM = Coordinate(processors.size(),1);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -46,3 +46,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
#include <Grid/lattice/Lattice_unary.h>
|
||||
#include <Grid/lattice/Lattice_transfer.h>
|
||||
#include <Grid/lattice/Lattice_basis.h>
|
||||
#include <Grid/lattice/Lattice_crc.h>
|
||||
|
@ -88,6 +88,13 @@ public:
|
||||
LatticeView<vobj> accessor(*( (LatticeAccelerator<vobj> *) this),mode);
|
||||
accessor.ViewClose();
|
||||
}
|
||||
|
||||
// Helper function to print the state of this object in the AccCache
|
||||
void PrintCacheState(void)
|
||||
{
|
||||
MemoryManager::PrintState(this->_odata);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// Return a view object that may be dereferenced in site loops.
|
||||
// The view is trivially copy constructible and may be copied to an accelerator device
|
||||
|
55
Grid/lattice/Lattice_crc.h
Normal file
55
Grid/lattice/Lattice_crc.h
Normal file
@ -0,0 +1,55 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/lattice/Lattice_crc.h
|
||||
|
||||
Copyright (C) 2021
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
|
||||
{
|
||||
auto ff = localNorm2(f);
|
||||
if ( mu==-1 ) mu = f.Grid()->Nd()-1;
|
||||
typedef typename vobj::tensor_reduced normtype;
|
||||
typedef typename normtype::scalar_object scalar;
|
||||
std::vector<scalar> sff;
|
||||
sliceSum(ff,sff,mu);
|
||||
for(int t=0;t<sff.size();t++){
|
||||
std::cout << s<<" "<<t<<" "<<sff[t]<<std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
|
||||
{
|
||||
autoView( buf_v , buf, CpuRead);
|
||||
return ::crc32(0L,(unsigned char *)&buf_v[0],(size_t)sizeof(vobj)*buf.oSites());
|
||||
}
|
||||
|
||||
#define CRC(U) std::cout << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
@ -42,7 +42,6 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
|
||||
std::cout << GridLogDebug << "\twarpSize = " << warpSize << std::endl;
|
||||
std::cout << GridLogDebug << "\tsharedMemPerBlock = " << sharedMemPerBlock << std::endl;
|
||||
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << maxThreadsPerBlock << std::endl;
|
||||
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << warpSize << std::endl;
|
||||
std::cout << GridLogDebug << "\tmultiProcessorCount = " << multiProcessorCount << std::endl;
|
||||
|
||||
if (warpSize != WARP_SIZE) {
|
||||
@ -52,6 +51,10 @@ void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator
|
||||
|
||||
// let the number of threads in a block be a multiple of 2, starting from warpSize
|
||||
threads = warpSize;
|
||||
if ( threads*sizeofsobj > sharedMemPerBlock ) {
|
||||
std::cout << GridLogError << "The object is too large for the shared memory." << std::endl;
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
while( 2*threads*sizeofsobj < sharedMemPerBlock && 2*threads <= maxThreadsPerBlock ) threads *= 2;
|
||||
// keep all the streaming multiprocessors busy
|
||||
blocks = nextPow2(multiProcessorCount);
|
||||
|
@ -85,6 +85,76 @@ template<class vobj> inline void setCheckerboard(Lattice<vobj> &full,const Latti
|
||||
});
|
||||
}
|
||||
|
||||
template<class vobj> inline void acceleratorPickCheckerboard(int cb,Lattice<vobj> &half,const Lattice<vobj> &full, int checker_dim_half=0)
|
||||
{
|
||||
half.Checkerboard() = cb;
|
||||
autoView(half_v, half, AcceleratorWrite);
|
||||
autoView(full_v, full, AcceleratorRead);
|
||||
Coordinate rdim_full = full.Grid()->_rdimensions;
|
||||
Coordinate rdim_half = half.Grid()->_rdimensions;
|
||||
unsigned long ndim_half = half.Grid()->_ndimension;
|
||||
Coordinate checker_dim_mask_half = half.Grid()->_checker_dim_mask;
|
||||
Coordinate ostride_half = half.Grid()->_ostride;
|
||||
accelerator_for(ss, full.Grid()->oSites(),full.Grid()->Nsimd(),{
|
||||
|
||||
Coordinate coor;
|
||||
int cbos;
|
||||
int linear=0;
|
||||
|
||||
Lexicographic::CoorFromIndex(coor,ss,rdim_full);
|
||||
assert(coor.size()==ndim_half);
|
||||
|
||||
for(int d=0;d<ndim_half;d++){
|
||||
if(checker_dim_mask_half[d]) linear += coor[d];
|
||||
}
|
||||
cbos = (linear&0x1);
|
||||
|
||||
if (cbos==cb) {
|
||||
int ssh=0;
|
||||
for(int d=0;d<ndim_half;d++) {
|
||||
if (d == checker_dim_half) ssh += ostride_half[d] * ((coor[d] / 2) % rdim_half[d]);
|
||||
else ssh += ostride_half[d] * (coor[d] % rdim_half[d]);
|
||||
}
|
||||
coalescedWrite(half_v[ssh],full_v(ss));
|
||||
}
|
||||
});
|
||||
}
|
||||
template<class vobj> inline void acceleratorSetCheckerboard(Lattice<vobj> &full,const Lattice<vobj> &half, int checker_dim_half=0)
|
||||
{
|
||||
int cb = half.Checkerboard();
|
||||
autoView(half_v , half, AcceleratorRead);
|
||||
autoView(full_v , full, AcceleratorWrite);
|
||||
Coordinate rdim_full = full.Grid()->_rdimensions;
|
||||
Coordinate rdim_half = half.Grid()->_rdimensions;
|
||||
unsigned long ndim_half = half.Grid()->_ndimension;
|
||||
Coordinate checker_dim_mask_half = half.Grid()->_checker_dim_mask;
|
||||
Coordinate ostride_half = half.Grid()->_ostride;
|
||||
accelerator_for(ss,full.Grid()->oSites(),full.Grid()->Nsimd(),{
|
||||
|
||||
Coordinate coor;
|
||||
int cbos;
|
||||
int linear=0;
|
||||
|
||||
Lexicographic::CoorFromIndex(coor,ss,rdim_full);
|
||||
assert(coor.size()==ndim_half);
|
||||
|
||||
for(int d=0;d<ndim_half;d++){
|
||||
if(checker_dim_mask_half[d]) linear += coor[d];
|
||||
}
|
||||
cbos = (linear&0x1);
|
||||
|
||||
if (cbos==cb) {
|
||||
int ssh=0;
|
||||
for(int d=0;d<ndim_half;d++){
|
||||
if (d == checker_dim_half) ssh += ostride_half[d] * ((coor[d] / 2) % rdim_half[d]);
|
||||
else ssh += ostride_half[d] * (coor[d] % rdim_half[d]);
|
||||
}
|
||||
coalescedWrite(full_v[ss],half_v(ssh));
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Flexible Type Conversion for internal promotion to double as well as graceful
|
||||
// treatment of scalar-compatible types
|
||||
|
@ -69,6 +69,7 @@ GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
||||
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
||||
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
||||
|
||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogError.Active(0);
|
||||
@ -79,6 +80,7 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
GridLogPerformance.Active(0);
|
||||
GridLogIntegrator.Active(1);
|
||||
GridLogColours.Active(0);
|
||||
GridLogHMC.Active(1);
|
||||
|
||||
for (int i = 0; i < logstreams.size(); i++) {
|
||||
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
|
||||
@ -87,7 +89,8 @@ void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
||||
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
||||
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
||||
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
|
||||
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
|
||||
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
||||
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ extern GridLogger GridLogDebug ;
|
||||
extern GridLogger GridLogPerformance;
|
||||
extern GridLogger GridLogIterative ;
|
||||
extern GridLogger GridLogIntegrator ;
|
||||
extern GridLogger GridLogHMC;
|
||||
extern Colours GridLogColours;
|
||||
|
||||
std::string demangle(const char* name) ;
|
||||
|
@ -576,6 +576,8 @@ class ScidacReader : public GridLimeReader {
|
||||
std::string rec_name(ILDG_BINARY_DATA);
|
||||
while ( limeReaderNextRecord(LimeR) == LIME_SUCCESS ) {
|
||||
if ( !strncmp(limeReaderType(LimeR), rec_name.c_str(),strlen(rec_name.c_str()) ) ) {
|
||||
// in principle should do the line below, but that breaks backard compatibility with old data
|
||||
// skipPastObjectRecord(std::string(GRID_FIELD_NORM));
|
||||
skipPastObjectRecord(std::string(SCIDAC_CHECKSUM));
|
||||
return;
|
||||
}
|
||||
|
@ -40,6 +40,29 @@ class Action
|
||||
|
||||
public:
|
||||
bool is_smeared = false;
|
||||
RealD deriv_norm_sum;
|
||||
RealD deriv_max_sum;
|
||||
int deriv_num;
|
||||
RealD deriv_us;
|
||||
RealD S_us;
|
||||
RealD refresh_us;
|
||||
void reset_timer(void) {
|
||||
deriv_us = S_us = refresh_us = 0.0;
|
||||
deriv_num=0;
|
||||
deriv_norm_sum = deriv_max_sum=0.0;
|
||||
}
|
||||
void deriv_log(RealD nrm, RealD max) { deriv_max_sum+=max; deriv_norm_sum+=nrm; deriv_num++;}
|
||||
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
|
||||
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
|
||||
RealD deriv_timer(void) { return deriv_us; };
|
||||
RealD S_timer(void) { return deriv_us; };
|
||||
RealD refresh_timer(void) { return deriv_us; };
|
||||
void deriv_timer_start(void) { deriv_us-=usecond(); }
|
||||
void deriv_timer_stop(void) { deriv_us+=usecond(); }
|
||||
void refresh_timer_start(void) { refresh_us-=usecond(); }
|
||||
void refresh_timer_stop(void) { refresh_us+=usecond(); }
|
||||
void S_timer_start(void) { S_us-=usecond(); }
|
||||
void S_timer_stop(void) { S_us+=usecond(); }
|
||||
// Heatbath?
|
||||
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
|
||||
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
|
||||
|
@ -37,6 +37,10 @@ NAMESPACE_CHECK(ActionSet);
|
||||
#include <Grid/qcd/action/ActionParams.h>
|
||||
NAMESPACE_CHECK(ActionParams);
|
||||
|
||||
#include <Grid/qcd/action/filters/MomentumFilter.h>
|
||||
#include <Grid/qcd/action/filters/DirichletFilter.h>
|
||||
#include <Grid/qcd/action/filters/DDHMCFilter.h>
|
||||
|
||||
////////////////////////////////////////////
|
||||
// Gauge Actions
|
||||
////////////////////////////////////////////
|
||||
|
240
Grid/qcd/action/fermion/CompactWilsonCloverFermion.h
Normal file
240
Grid/qcd/action/fermion/CompactWilsonCloverFermion.h
Normal file
@ -0,0 +1,240 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/CompactWilsonCloverFermion.h
|
||||
|
||||
Copyright (C) 2020 - 2022
|
||||
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
// see Grid/qcd/action/fermion/WilsonCloverFermion.h for description
|
||||
//
|
||||
// Modifications done here:
|
||||
//
|
||||
// Original: clover term = 12x12 matrix per site
|
||||
//
|
||||
// But: Only two diagonal 6x6 hermitian blocks are non-zero (also true for original, verified by running)
|
||||
// Sufficient to store/transfer only the real parts of the diagonal and one triangular part
|
||||
// 2 * (6 + 15 * 2) = 72 real or 36 complex words to be stored/transfered
|
||||
//
|
||||
// Here: Above but diagonal as complex numbers, i.e., need to store/transfer
|
||||
// 2 * (6 * 2 + 15 * 2) = 84 real or 42 complex words
|
||||
//
|
||||
// Words per site and improvement compared to original (combined with the input and output spinors):
|
||||
//
|
||||
// - Original: 2*12 + 12*12 = 168 words -> 1.00 x less
|
||||
// - Minimal: 2*12 + 36 = 60 words -> 2.80 x less
|
||||
// - Here: 2*12 + 42 = 66 words -> 2.55 x less
|
||||
//
|
||||
// These improvements directly translate to wall-clock time
|
||||
//
|
||||
// Data layout:
|
||||
//
|
||||
// - diagonal and triangle part as separate lattice fields,
|
||||
// this was faster than as 1 combined field on all tested machines
|
||||
// - diagonal: as expected
|
||||
// - triangle: store upper right triangle in row major order
|
||||
// - graphical:
|
||||
// 0 1 2 3 4
|
||||
// 5 6 7 8
|
||||
// 9 10 11 = upper right triangle indices
|
||||
// 12 13
|
||||
// 14
|
||||
// 0
|
||||
// 1
|
||||
// 2
|
||||
// 3 = diagonal indices
|
||||
// 4
|
||||
// 5
|
||||
// 0
|
||||
// 1 5
|
||||
// 2 6 9 = lower left triangle indices
|
||||
// 3 7 10 12
|
||||
// 4 8 11 13 14
|
||||
//
|
||||
// Impact on total memory consumption:
|
||||
// - Original: (2 * 1 + 8 * 1/2) 12x12 matrices = 6 12x12 matrices = 864 complex words per site
|
||||
// - Here: (2 * 1 + 4 * 1/2) diagonal parts = 4 diagonal parts = 24 complex words per site
|
||||
// + (2 * 1 + 4 * 1/2) triangle parts = 4 triangle parts = 60 complex words per site
|
||||
// = 84 complex words per site
|
||||
|
||||
template<class Impl>
|
||||
class CompactWilsonCloverFermion : public WilsonFermion<Impl>,
|
||||
public WilsonCloverHelpers<Impl>,
|
||||
public CompactWilsonCloverHelpers<Impl> {
|
||||
/////////////////////////////////////////////
|
||||
// Sizes
|
||||
/////////////////////////////////////////////
|
||||
|
||||
public:
|
||||
|
||||
INHERIT_COMPACT_CLOVER_SIZES(Impl);
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Type definitions
|
||||
/////////////////////////////////////////////
|
||||
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
INHERIT_CLOVER_TYPES(Impl);
|
||||
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
||||
|
||||
typedef WilsonFermion<Impl> WilsonBase;
|
||||
typedef WilsonCloverHelpers<Impl> Helpers;
|
||||
typedef CompactWilsonCloverHelpers<Impl> CompactHelpers;
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Constructors
|
||||
/////////////////////////////////////////////
|
||||
|
||||
public:
|
||||
|
||||
CompactWilsonCloverFermion(GaugeField& _Umu,
|
||||
GridCartesian& Fgrid,
|
||||
GridRedBlackCartesian& Hgrid,
|
||||
const RealD _mass,
|
||||
const RealD _csw_r = 0.0,
|
||||
const RealD _csw_t = 0.0,
|
||||
const RealD _cF = 1.0,
|
||||
const WilsonAnisotropyCoefficients& clover_anisotropy = WilsonAnisotropyCoefficients(),
|
||||
const ImplParams& impl_p = ImplParams());
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Member functions (implementing interface)
|
||||
/////////////////////////////////////////////
|
||||
|
||||
public:
|
||||
|
||||
virtual void Instantiatable() {};
|
||||
int ConstEE() override { return 0; };
|
||||
int isTrivialEE() override { return 0; };
|
||||
|
||||
void Dhop(const FermionField& in, FermionField& out, int dag) override;
|
||||
|
||||
void DhopOE(const FermionField& in, FermionField& out, int dag) override;
|
||||
|
||||
void DhopEO(const FermionField& in, FermionField& out, int dag) override;
|
||||
|
||||
void DhopDir(const FermionField& in, FermionField& out, int dir, int disp) override;
|
||||
|
||||
void DhopDirAll(const FermionField& in, std::vector<FermionField>& out) /* override */;
|
||||
|
||||
void M(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void Mdag(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void Meooe(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void MeooeDag(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void Mooee(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void MooeeDag(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void MooeeInv(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void MooeeInvDag(const FermionField& in, FermionField& out) override;
|
||||
|
||||
void Mdir(const FermionField& in, FermionField& out, int dir, int disp) override;
|
||||
|
||||
void MdirAll(const FermionField& in, std::vector<FermionField>& out) override;
|
||||
|
||||
void MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) override;
|
||||
|
||||
void MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) override;
|
||||
|
||||
void MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) override;
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Member functions (internals)
|
||||
/////////////////////////////////////////////
|
||||
|
||||
void MooeeInternal(const FermionField& in,
|
||||
FermionField& out,
|
||||
const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle);
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Helpers
|
||||
/////////////////////////////////////////////
|
||||
|
||||
void ImportGauge(const GaugeField& _Umu) override;
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Helpers
|
||||
/////////////////////////////////////////////
|
||||
|
||||
private:
|
||||
|
||||
template<class Field>
|
||||
const MaskField* getCorrectMaskField(const Field &in) const {
|
||||
if(in.Grid()->_isCheckerBoarded) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
return &this->BoundaryMaskOdd;
|
||||
} else {
|
||||
return &this->BoundaryMaskEven;
|
||||
}
|
||||
} else {
|
||||
return &this->BoundaryMask;
|
||||
}
|
||||
}
|
||||
|
||||
template<class Field>
|
||||
void ApplyBoundaryMask(Field& f) {
|
||||
const MaskField* m = getCorrectMaskField(f); assert(m != nullptr);
|
||||
assert(m != nullptr);
|
||||
CompactHelpers::ApplyBoundaryMask(f, *m);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////
|
||||
// Member Data
|
||||
/////////////////////////////////////////////
|
||||
|
||||
public:
|
||||
|
||||
RealD csw_r;
|
||||
RealD csw_t;
|
||||
RealD cF;
|
||||
|
||||
bool open_boundaries;
|
||||
|
||||
CloverDiagonalField Diagonal, DiagonalEven, DiagonalOdd;
|
||||
CloverDiagonalField DiagonalInv, DiagonalInvEven, DiagonalInvOdd;
|
||||
|
||||
CloverTriangleField Triangle, TriangleEven, TriangleOdd;
|
||||
CloverTriangleField TriangleInv, TriangleInvEven, TriangleInvOdd;
|
||||
|
||||
FermionField Tmp;
|
||||
|
||||
MaskField BoundaryMask, BoundaryMaskEven, BoundaryMaskOdd;
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -53,6 +53,7 @@ NAMESPACE_CHECK(Wilson);
|
||||
#include <Grid/qcd/action/fermion/WilsonTMFermion.h> // 4d wilson like
|
||||
NAMESPACE_CHECK(WilsonTM);
|
||||
#include <Grid/qcd/action/fermion/WilsonCloverFermion.h> // 4d wilson clover fermions
|
||||
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h> // 4d compact wilson clover fermions
|
||||
NAMESPACE_CHECK(WilsonClover);
|
||||
#include <Grid/qcd/action/fermion/WilsonFermion5D.h> // 5d base used by all 5d overlap types
|
||||
NAMESPACE_CHECK(Wilson5D);
|
||||
@ -153,6 +154,23 @@ typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> WilsonCloverTwoInd
|
||||
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||
|
||||
// Compact Clover fermions
|
||||
typedef CompactWilsonCloverFermion<WilsonImplR> CompactWilsonCloverFermionR;
|
||||
typedef CompactWilsonCloverFermion<WilsonImplF> CompactWilsonCloverFermionF;
|
||||
typedef CompactWilsonCloverFermion<WilsonImplD> CompactWilsonCloverFermionD;
|
||||
|
||||
typedef CompactWilsonCloverFermion<WilsonAdjImplR> CompactWilsonCloverAdjFermionR;
|
||||
typedef CompactWilsonCloverFermion<WilsonAdjImplF> CompactWilsonCloverAdjFermionF;
|
||||
typedef CompactWilsonCloverFermion<WilsonAdjImplD> CompactWilsonCloverAdjFermionD;
|
||||
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplR> CompactWilsonCloverTwoIndexSymmetricFermionR;
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplF> CompactWilsonCloverTwoIndexSymmetricFermionF;
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplD> CompactWilsonCloverTwoIndexSymmetricFermionD;
|
||||
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> CompactWilsonCloverTwoIndexAntiSymmetricFermionR;
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> CompactWilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> CompactWilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||
|
||||
// Domain Wall fermions
|
||||
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
||||
typedef DomainWallFermion<WilsonImplF> DomainWallFermionF;
|
||||
|
@ -49,6 +49,8 @@ public:
|
||||
|
||||
virtual FermionField &tmp(void) = 0;
|
||||
|
||||
virtual void DirichletBlock(Coordinate & _Block) { assert(0); };
|
||||
|
||||
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
|
||||
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };
|
||||
|
||||
|
@ -4,10 +4,11 @@
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermion.h
|
||||
|
||||
Copyright (C) 2017
|
||||
Copyright (C) 2017 - 2022
|
||||
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: David Preti <>
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -29,7 +30,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -50,18 +52,15 @@ NAMESPACE_BEGIN(Grid);
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
||||
template <class Impl>
|
||||
class WilsonCloverFermion : public WilsonFermion<Impl>
|
||||
class WilsonCloverFermion : public WilsonFermion<Impl>,
|
||||
public WilsonCloverHelpers<Impl>
|
||||
{
|
||||
public:
|
||||
// Types definitions
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
template <typename vtype>
|
||||
using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
||||
typedef iImplClover<Simd> SiteCloverType;
|
||||
typedef Lattice<SiteCloverType> CloverFieldType;
|
||||
INHERIT_CLOVER_TYPES(Impl);
|
||||
|
||||
public:
|
||||
typedef WilsonFermion<Impl> WilsonBase;
|
||||
typedef WilsonFermion<Impl> WilsonBase;
|
||||
typedef WilsonCloverHelpers<Impl> Helpers;
|
||||
|
||||
virtual int ConstEE(void) { return 0; };
|
||||
virtual void Instantiatable(void){};
|
||||
@ -72,42 +71,7 @@ public:
|
||||
const RealD _csw_r = 0.0,
|
||||
const RealD _csw_t = 0.0,
|
||||
const WilsonAnisotropyCoefficients &clover_anisotropy = WilsonAnisotropyCoefficients(),
|
||||
const ImplParams &impl_p = ImplParams()) : WilsonFermion<Impl>(_Umu,
|
||||
Fgrid,
|
||||
Hgrid,
|
||||
_mass, impl_p, clover_anisotropy),
|
||||
CloverTerm(&Fgrid),
|
||||
CloverTermInv(&Fgrid),
|
||||
CloverTermEven(&Hgrid),
|
||||
CloverTermOdd(&Hgrid),
|
||||
CloverTermInvEven(&Hgrid),
|
||||
CloverTermInvOdd(&Hgrid),
|
||||
CloverTermDagEven(&Hgrid),
|
||||
CloverTermDagOdd(&Hgrid),
|
||||
CloverTermInvDagEven(&Hgrid),
|
||||
CloverTermInvDagOdd(&Hgrid)
|
||||
{
|
||||
assert(Nd == 4); // require 4 dimensions
|
||||
|
||||
if (clover_anisotropy.isAnisotropic)
|
||||
{
|
||||
csw_r = _csw_r * 0.5 / clover_anisotropy.xi_0;
|
||||
diag_mass = _mass + 1.0 + (Nd - 1) * (clover_anisotropy.nu / clover_anisotropy.xi_0);
|
||||
}
|
||||
else
|
||||
{
|
||||
csw_r = _csw_r * 0.5;
|
||||
diag_mass = 4.0 + _mass;
|
||||
}
|
||||
csw_t = _csw_t * 0.5;
|
||||
|
||||
if (csw_r == 0)
|
||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_r = 0" << std::endl;
|
||||
if (csw_t == 0)
|
||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_t = 0" << std::endl;
|
||||
|
||||
ImportGauge(_Umu);
|
||||
}
|
||||
const ImplParams &impl_p = ImplParams());
|
||||
|
||||
virtual void M(const FermionField &in, FermionField &out);
|
||||
virtual void Mdag(const FermionField &in, FermionField &out);
|
||||
@ -124,250 +88,21 @@ public:
|
||||
void ImportGauge(const GaugeField &_Umu);
|
||||
|
||||
// Derivative parts unpreconditioned pseudofermions
|
||||
void MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
||||
{
|
||||
conformable(X.Grid(), Y.Grid());
|
||||
conformable(X.Grid(), force.Grid());
|
||||
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
||||
GaugeField clover_force(force.Grid());
|
||||
PropagatorField Lambda(force.Grid());
|
||||
void MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag);
|
||||
|
||||
// Guido: Here we are hitting some performance issues:
|
||||
// need to extract the components of the DoubledGaugeField
|
||||
// for each call
|
||||
// Possible solution
|
||||
// Create a vector object to store them? (cons: wasting space)
|
||||
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
||||
|
||||
Impl::extractLinkField(U, this->Umu);
|
||||
|
||||
force = Zero();
|
||||
// Derivative of the Wilson hopping term
|
||||
this->DhopDeriv(force, X, Y, dag);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Clover term derivative
|
||||
///////////////////////////////////////////////////////////
|
||||
Impl::outerProductImpl(Lambda, X, Y);
|
||||
//std::cout << "Lambda:" << Lambda << std::endl;
|
||||
|
||||
Gamma::Algebra sigma[] = {
|
||||
Gamma::Algebra::SigmaXY,
|
||||
Gamma::Algebra::SigmaXZ,
|
||||
Gamma::Algebra::SigmaXT,
|
||||
Gamma::Algebra::MinusSigmaXY,
|
||||
Gamma::Algebra::SigmaYZ,
|
||||
Gamma::Algebra::SigmaYT,
|
||||
Gamma::Algebra::MinusSigmaXZ,
|
||||
Gamma::Algebra::MinusSigmaYZ,
|
||||
Gamma::Algebra::SigmaZT,
|
||||
Gamma::Algebra::MinusSigmaXT,
|
||||
Gamma::Algebra::MinusSigmaYT,
|
||||
Gamma::Algebra::MinusSigmaZT};
|
||||
|
||||
/*
|
||||
sigma_{\mu \nu}=
|
||||
| 0 sigma[0] sigma[1] sigma[2] |
|
||||
| sigma[3] 0 sigma[4] sigma[5] |
|
||||
| sigma[6] sigma[7] 0 sigma[8] |
|
||||
| sigma[9] sigma[10] sigma[11] 0 |
|
||||
*/
|
||||
|
||||
int count = 0;
|
||||
clover_force = Zero();
|
||||
for (int mu = 0; mu < 4; mu++)
|
||||
{
|
||||
force_mu = Zero();
|
||||
for (int nu = 0; nu < 4; nu++)
|
||||
{
|
||||
if (mu == nu)
|
||||
continue;
|
||||
|
||||
RealD factor;
|
||||
if (nu == 4 || mu == 4)
|
||||
{
|
||||
factor = 2.0 * csw_t;
|
||||
}
|
||||
else
|
||||
{
|
||||
factor = 2.0 * csw_r;
|
||||
}
|
||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||
force_mu -= factor*Cmunu(U, lambda, mu, nu); // checked
|
||||
count++;
|
||||
}
|
||||
|
||||
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
||||
}
|
||||
//clover_force *= csw;
|
||||
force += clover_force;
|
||||
}
|
||||
|
||||
// Computing C_{\mu \nu}(x) as in Eq.(B.39) in Zbigniew Sroczynski's PhD thesis
|
||||
GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu)
|
||||
{
|
||||
conformable(lambda.Grid(), U[0].Grid());
|
||||
GaugeLinkField out(lambda.Grid()), tmp(lambda.Grid());
|
||||
// insertion in upper staple
|
||||
// please check redundancy of shift operations
|
||||
|
||||
// C1+
|
||||
tmp = lambda * U[nu];
|
||||
out = Impl::ShiftStaple(Impl::CovShiftForward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||
|
||||
// C2+
|
||||
tmp = U[mu] * Impl::ShiftStaple(adj(lambda), mu);
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(tmp, mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||
|
||||
// C3+
|
||||
tmp = U[nu] * Impl::ShiftStaple(adj(lambda), nu);
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(tmp, nu))), mu);
|
||||
|
||||
// C4+
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu) * lambda;
|
||||
|
||||
// insertion in lower staple
|
||||
// C1-
|
||||
out -= Impl::ShiftStaple(lambda, mu) * Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||
|
||||
// C2-
|
||||
tmp = adj(lambda) * U[nu];
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||
|
||||
// C3-
|
||||
tmp = lambda * U[nu];
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, tmp)), mu);
|
||||
|
||||
// C4-
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu) * lambda;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
protected:
|
||||
public:
|
||||
// here fixing the 4 dimensions, make it more general?
|
||||
|
||||
RealD csw_r; // Clover coefficient - spatial
|
||||
RealD csw_t; // Clover coefficient - temporal
|
||||
RealD diag_mass; // Mass term
|
||||
CloverFieldType CloverTerm, CloverTermInv; // Clover term
|
||||
CloverFieldType CloverTermEven, CloverTermOdd; // Clover term EO
|
||||
CloverFieldType CloverTermInvEven, CloverTermInvOdd; // Clover term Inv EO
|
||||
CloverFieldType CloverTermDagEven, CloverTermDagOdd; // Clover term Dag EO
|
||||
CloverFieldType CloverTermInvDagEven, CloverTermInvDagOdd; // Clover term Inv Dag EO
|
||||
|
||||
public:
|
||||
// eventually these can be compressed into 6x6 blocks instead of the 12x12
|
||||
// using the DeGrand-Rossi basis for the gamma matrices
|
||||
CloverFieldType fillCloverYZ(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
T = Zero();
|
||||
autoView(T_v,T,AcceleratorWrite);
|
||||
autoView(F_v,F,AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 1) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(1, 0) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(2, 3) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(3, 2) = timesMinusI(F_v[i]()());
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
CloverFieldType fillCloverXZ(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView(T_v, T,AcceleratorWrite);
|
||||
autoView(F_v, F,AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 1) = -F_v[i]()();
|
||||
T_v[i]()(1, 0) = F_v[i]()();
|
||||
T_v[i]()(2, 3) = -F_v[i]()();
|
||||
T_v[i]()(3, 2) = F_v[i]()();
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
CloverFieldType fillCloverXY(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView(T_v,T,AcceleratorWrite);
|
||||
autoView(F_v,F,AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 0) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(1, 1) = timesI(F_v[i]()());
|
||||
T_v[i]()(2, 2) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(3, 3) = timesI(F_v[i]()());
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
CloverFieldType fillCloverXT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v , T, AcceleratorWrite);
|
||||
autoView( F_v , F, AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 1) = timesI(F_v[i]()());
|
||||
T_v[i]()(1, 0) = timesI(F_v[i]()());
|
||||
T_v[i]()(2, 3) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(3, 2) = timesMinusI(F_v[i]()());
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
CloverFieldType fillCloverYT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v ,T,AcceleratorWrite);
|
||||
autoView( F_v ,F,AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 1) = -(F_v[i]()());
|
||||
T_v[i]()(1, 0) = (F_v[i]()());
|
||||
T_v[i]()(2, 3) = (F_v[i]()());
|
||||
T_v[i]()(3, 2) = -(F_v[i]()());
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
CloverFieldType fillCloverZT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverFieldType T(F.Grid());
|
||||
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v , T,AcceleratorWrite);
|
||||
autoView( F_v , F,AcceleratorRead);
|
||||
accelerator_for(i, CloverTerm.Grid()->oSites(),1,
|
||||
{
|
||||
T_v[i]()(0, 0) = timesI(F_v[i]()());
|
||||
T_v[i]()(1, 1) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(2, 2) = timesMinusI(F_v[i]()());
|
||||
T_v[i]()(3, 3) = timesI(F_v[i]()());
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
CloverField CloverTerm, CloverTermInv; // Clover term
|
||||
CloverField CloverTermEven, CloverTermOdd; // Clover term EO
|
||||
CloverField CloverTermInvEven, CloverTermInvOdd; // Clover term Inv EO
|
||||
CloverField CloverTermDagEven, CloverTermDagOdd; // Clover term Dag EO
|
||||
CloverField CloverTermInvDagEven, CloverTermInvDagOdd; // Clover term Inv Dag EO
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
|
||||
|
761
Grid/qcd/action/fermion/WilsonCloverHelpers.h
Normal file
761
Grid/qcd/action/fermion/WilsonCloverHelpers.h
Normal file
@ -0,0 +1,761 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonCloverHelpers.h
|
||||
|
||||
Copyright (C) 2021 - 2022
|
||||
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
// Helper routines that implement common clover functionality
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl> class WilsonCloverHelpers {
|
||||
public:
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
INHERIT_CLOVER_TYPES(Impl);
|
||||
|
||||
// Computing C_{\mu \nu}(x) as in Eq.(B.39) in Zbigniew Sroczynski's PhD thesis
|
||||
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu)
|
||||
{
|
||||
conformable(lambda.Grid(), U[0].Grid());
|
||||
GaugeLinkField out(lambda.Grid()), tmp(lambda.Grid());
|
||||
// insertion in upper staple
|
||||
// please check redundancy of shift operations
|
||||
|
||||
// C1+
|
||||
tmp = lambda * U[nu];
|
||||
out = Impl::ShiftStaple(Impl::CovShiftForward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||
|
||||
// C2+
|
||||
tmp = U[mu] * Impl::ShiftStaple(adj(lambda), mu);
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(tmp, mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu);
|
||||
|
||||
// C3+
|
||||
tmp = U[nu] * Impl::ShiftStaple(adj(lambda), nu);
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(tmp, nu))), mu);
|
||||
|
||||
// C4+
|
||||
out += Impl::ShiftStaple(Impl::CovShiftForward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, Impl::CovShiftIdentityBackward(U[nu], nu))), mu) * lambda;
|
||||
|
||||
// insertion in lower staple
|
||||
// C1-
|
||||
out -= Impl::ShiftStaple(lambda, mu) * Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||
|
||||
// C2-
|
||||
tmp = adj(lambda) * U[nu];
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(tmp, nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu);
|
||||
|
||||
// C3-
|
||||
tmp = lambda * U[nu];
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, tmp)), mu);
|
||||
|
||||
// C4-
|
||||
out -= Impl::ShiftStaple(Impl::CovShiftBackward(U[nu], nu, Impl::CovShiftBackward(U[mu], mu, U[nu])), mu) * lambda;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
static CloverField fillCloverYZ(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
T = Zero();
|
||||
autoView(T_v,T,AcceleratorWrite);
|
||||
autoView(F_v,F,AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 1), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(1, 0), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(2, 3), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(3, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
static CloverField fillCloverXZ(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView(T_v, T,AcceleratorWrite);
|
||||
autoView(F_v, F,AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 1), coalescedRead(-F_v[i]()()));
|
||||
coalescedWrite(T_v[i]()(1, 0), coalescedRead(F_v[i]()()));
|
||||
coalescedWrite(T_v[i]()(2, 3), coalescedRead(-F_v[i]()()));
|
||||
coalescedWrite(T_v[i]()(3, 2), coalescedRead(F_v[i]()()));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
static CloverField fillCloverXY(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView(T_v,T,AcceleratorWrite);
|
||||
autoView(F_v,F,AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 0), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(1, 1), coalescedRead(timesI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(2, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(3, 3), coalescedRead(timesI(F_v[i]()())));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
static CloverField fillCloverXT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v , T, AcceleratorWrite);
|
||||
autoView( F_v , F, AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 1), coalescedRead(timesI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(1, 0), coalescedRead(timesI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(2, 3), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(3, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
static CloverField fillCloverYT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v ,T,AcceleratorWrite);
|
||||
autoView( F_v ,F,AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 1), coalescedRead(-(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(1, 0), coalescedRead((F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(2, 3), coalescedRead((F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(3, 2), coalescedRead(-(F_v[i]()())));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
static CloverField fillCloverZT(const GaugeLinkField &F)
|
||||
{
|
||||
CloverField T(F.Grid());
|
||||
|
||||
T = Zero();
|
||||
|
||||
autoView( T_v , T,AcceleratorWrite);
|
||||
autoView( F_v , F,AcceleratorRead);
|
||||
accelerator_for(i, T.Grid()->oSites(),CloverField::vector_type::Nsimd(),
|
||||
{
|
||||
coalescedWrite(T_v[i]()(0, 0), coalescedRead(timesI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(1, 1), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(2, 2), coalescedRead(timesMinusI(F_v[i]()())));
|
||||
coalescedWrite(T_v[i]()(3, 3), coalescedRead(timesI(F_v[i]()())));
|
||||
});
|
||||
|
||||
return T;
|
||||
}
|
||||
|
||||
template<class _Spinor>
|
||||
static accelerator_inline void multClover(_Spinor& phi, const SiteClover& C, const _Spinor& chi) {
|
||||
auto CC = coalescedRead(C);
|
||||
mult(&phi, &CC, &chi);
|
||||
}
|
||||
|
||||
template<class _SpinorField>
|
||||
inline void multCloverField(_SpinorField& out, const CloverField& C, const _SpinorField& phi) {
|
||||
const int Nsimd = SiteSpinor::Nsimd();
|
||||
autoView(out_v, out, AcceleratorWrite);
|
||||
autoView(phi_v, phi, AcceleratorRead);
|
||||
autoView(C_v, C, AcceleratorRead);
|
||||
typedef decltype(coalescedRead(out_v[0])) calcSpinor;
|
||||
accelerator_for(sss,out.Grid()->oSites(),Nsimd,{
|
||||
calcSpinor tmp;
|
||||
multClover(tmp,C_v[sss],phi_v(sss));
|
||||
coalescedWrite(out_v[sss],tmp);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<class Impl> class CompactWilsonCloverHelpers {
|
||||
public:
|
||||
|
||||
INHERIT_COMPACT_CLOVER_SIZES(Impl);
|
||||
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
INHERIT_CLOVER_TYPES(Impl);
|
||||
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
||||
|
||||
#if 0
|
||||
static accelerator_inline typename SiteCloverTriangle::vector_type triangle_elem(const SiteCloverTriangle& triangle, int block, int i, int j) {
|
||||
assert(i != j);
|
||||
if(i < j) {
|
||||
return triangle()(block)(triangle_index(i, j));
|
||||
} else { // i > j
|
||||
return conjugate(triangle()(block)(triangle_index(i, j)));
|
||||
}
|
||||
}
|
||||
#else
|
||||
template<typename vobj>
|
||||
static accelerator_inline vobj triangle_elem(const iImplCloverTriangle<vobj>& triangle, int block, int i, int j) {
|
||||
assert(i != j);
|
||||
if(i < j) {
|
||||
return triangle()(block)(triangle_index(i, j));
|
||||
} else { // i > j
|
||||
return conjugate(triangle()(block)(triangle_index(i, j)));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static accelerator_inline int triangle_index(int i, int j) {
|
||||
if(i == j)
|
||||
return 0;
|
||||
else if(i < j)
|
||||
return Nred * (Nred - 1) / 2 - (Nred - i) * (Nred - i - 1) / 2 + j - i - 1;
|
||||
else // i > j
|
||||
return Nred * (Nred - 1) / 2 - (Nred - j) * (Nred - j - 1) / 2 + i - j - 1;
|
||||
}
|
||||
|
||||
static void MooeeKernel_gpu(int Nsite,
|
||||
int Ls,
|
||||
const FermionField& in,
|
||||
FermionField& out,
|
||||
const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle) {
|
||||
autoView(diagonal_v, diagonal, AcceleratorRead);
|
||||
autoView(triangle_v, triangle, AcceleratorRead);
|
||||
autoView(in_v, in, AcceleratorRead);
|
||||
autoView(out_v, out, AcceleratorWrite);
|
||||
|
||||
typedef decltype(coalescedRead(out_v[0])) CalcSpinor;
|
||||
|
||||
const uint64_t NN = Nsite * Ls;
|
||||
|
||||
accelerator_for(ss, NN, Simd::Nsimd(), {
|
||||
int sF = ss;
|
||||
int sU = ss/Ls;
|
||||
CalcSpinor res;
|
||||
CalcSpinor in_t = in_v(sF);
|
||||
auto diagonal_t = diagonal_v(sU);
|
||||
auto triangle_t = triangle_v(sU);
|
||||
for(int block=0; block<Nhs; block++) {
|
||||
int s_start = block*Nhs;
|
||||
for(int i=0; i<Nred; i++) {
|
||||
int si = s_start + i/Nc, ci = i%Nc;
|
||||
res()(si)(ci) = diagonal_t()(block)(i) * in_t()(si)(ci);
|
||||
for(int j=0; j<Nred; j++) {
|
||||
if (j == i) continue;
|
||||
int sj = s_start + j/Nc, cj = j%Nc;
|
||||
res()(si)(ci) = res()(si)(ci) + triangle_elem(triangle_t, block, i, j) * in_t()(sj)(cj);
|
||||
};
|
||||
};
|
||||
};
|
||||
coalescedWrite(out_v[sF], res);
|
||||
});
|
||||
}
|
||||
|
||||
static void MooeeKernel_cpu(int Nsite,
|
||||
int Ls,
|
||||
const FermionField& in,
|
||||
FermionField& out,
|
||||
const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle) {
|
||||
autoView(diagonal_v, diagonal, CpuRead);
|
||||
autoView(triangle_v, triangle, CpuRead);
|
||||
autoView(in_v, in, CpuRead);
|
||||
autoView(out_v, out, CpuWrite);
|
||||
|
||||
typedef SiteSpinor CalcSpinor;
|
||||
|
||||
#if defined(A64FX) || defined(A64FXFIXEDSIZE)
|
||||
#define PREFETCH_CLOVER(BASE) { \
|
||||
uint64_t base; \
|
||||
int pf_dist_L1 = 1; \
|
||||
int pf_dist_L2 = -5; /* -> penalty -> disable */ \
|
||||
\
|
||||
if ((pf_dist_L1 >= 0) && (sU + pf_dist_L1 < Nsite)) { \
|
||||
base = (uint64_t)&diag_t()(pf_dist_L1+BASE)(0); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 0), SV_PLDL1STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 256), SV_PLDL1STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 512), SV_PLDL1STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 768), SV_PLDL1STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 1024), SV_PLDL1STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 1280), SV_PLDL1STRM); \
|
||||
} \
|
||||
\
|
||||
if ((pf_dist_L2 >= 0) && (sU + pf_dist_L2 < Nsite)) { \
|
||||
base = (uint64_t)&diag_t()(pf_dist_L2+BASE)(0); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 0), SV_PLDL2STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 256), SV_PLDL2STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 512), SV_PLDL2STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 768), SV_PLDL2STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 1024), SV_PLDL2STRM); \
|
||||
svprfd(svptrue_b64(), (int64_t*)(base + 1280), SV_PLDL2STRM); \
|
||||
} \
|
||||
}
|
||||
// TODO: Implement/generalize this for other architectures
|
||||
// I played around a bit on KNL (see below) but didn't bring anything
|
||||
// #elif defined(AVX512)
|
||||
// #define PREFETCH_CLOVER(BASE) { \
|
||||
// uint64_t base; \
|
||||
// int pf_dist_L1 = 1; \
|
||||
// int pf_dist_L2 = +4; \
|
||||
// \
|
||||
// if ((pf_dist_L1 >= 0) && (sU + pf_dist_L1 < Nsite)) { \
|
||||
// base = (uint64_t)&diag_t()(pf_dist_L1+BASE)(0); \
|
||||
// _mm_prefetch((const char*)(base + 0), _MM_HINT_T0); \
|
||||
// _mm_prefetch((const char*)(base + 64), _MM_HINT_T0); \
|
||||
// _mm_prefetch((const char*)(base + 128), _MM_HINT_T0); \
|
||||
// _mm_prefetch((const char*)(base + 192), _MM_HINT_T0); \
|
||||
// _mm_prefetch((const char*)(base + 256), _MM_HINT_T0); \
|
||||
// _mm_prefetch((const char*)(base + 320), _MM_HINT_T0); \
|
||||
// } \
|
||||
// \
|
||||
// if ((pf_dist_L2 >= 0) && (sU + pf_dist_L2 < Nsite)) { \
|
||||
// base = (uint64_t)&diag_t()(pf_dist_L2+BASE)(0); \
|
||||
// _mm_prefetch((const char*)(base + 0), _MM_HINT_T1); \
|
||||
// _mm_prefetch((const char*)(base + 64), _MM_HINT_T1); \
|
||||
// _mm_prefetch((const char*)(base + 128), _MM_HINT_T1); \
|
||||
// _mm_prefetch((const char*)(base + 192), _MM_HINT_T1); \
|
||||
// _mm_prefetch((const char*)(base + 256), _MM_HINT_T1); \
|
||||
// _mm_prefetch((const char*)(base + 320), _MM_HINT_T1); \
|
||||
// } \
|
||||
// }
|
||||
#else
|
||||
#define PREFETCH_CLOVER(BASE)
|
||||
#endif
|
||||
|
||||
const uint64_t NN = Nsite * Ls;
|
||||
|
||||
thread_for(ss, NN, {
|
||||
int sF = ss;
|
||||
int sU = ss/Ls;
|
||||
CalcSpinor res;
|
||||
CalcSpinor in_t = in_v[sF];
|
||||
auto diag_t = diagonal_v[sU]; // "diag" instead of "diagonal" here to make code below easier to read
|
||||
auto triangle_t = triangle_v[sU];
|
||||
|
||||
// upper half
|
||||
PREFETCH_CLOVER(0);
|
||||
|
||||
auto in_cc_0_0 = conjugate(in_t()(0)(0)); // Nils: reduces number
|
||||
auto in_cc_0_1 = conjugate(in_t()(0)(1)); // of conjugates from
|
||||
auto in_cc_0_2 = conjugate(in_t()(0)(2)); // 30 to 20
|
||||
auto in_cc_1_0 = conjugate(in_t()(1)(0));
|
||||
auto in_cc_1_1 = conjugate(in_t()(1)(1));
|
||||
|
||||
res()(0)(0) = diag_t()(0)( 0) * in_t()(0)(0)
|
||||
+ triangle_t()(0)( 0) * in_t()(0)(1)
|
||||
+ triangle_t()(0)( 1) * in_t()(0)(2)
|
||||
+ triangle_t()(0)( 2) * in_t()(1)(0)
|
||||
+ triangle_t()(0)( 3) * in_t()(1)(1)
|
||||
+ triangle_t()(0)( 4) * in_t()(1)(2);
|
||||
|
||||
res()(0)(1) = triangle_t()(0)( 0) * in_cc_0_0;
|
||||
res()(0)(1) = diag_t()(0)( 1) * in_t()(0)(1)
|
||||
+ triangle_t()(0)( 5) * in_t()(0)(2)
|
||||
+ triangle_t()(0)( 6) * in_t()(1)(0)
|
||||
+ triangle_t()(0)( 7) * in_t()(1)(1)
|
||||
+ triangle_t()(0)( 8) * in_t()(1)(2)
|
||||
+ conjugate( res()(0)( 1));
|
||||
|
||||
res()(0)(2) = triangle_t()(0)( 1) * in_cc_0_0
|
||||
+ triangle_t()(0)( 5) * in_cc_0_1;
|
||||
res()(0)(2) = diag_t()(0)( 2) * in_t()(0)(2)
|
||||
+ triangle_t()(0)( 9) * in_t()(1)(0)
|
||||
+ triangle_t()(0)(10) * in_t()(1)(1)
|
||||
+ triangle_t()(0)(11) * in_t()(1)(2)
|
||||
+ conjugate( res()(0)( 2));
|
||||
|
||||
res()(1)(0) = triangle_t()(0)( 2) * in_cc_0_0
|
||||
+ triangle_t()(0)( 6) * in_cc_0_1
|
||||
+ triangle_t()(0)( 9) * in_cc_0_2;
|
||||
res()(1)(0) = diag_t()(0)( 3) * in_t()(1)(0)
|
||||
+ triangle_t()(0)(12) * in_t()(1)(1)
|
||||
+ triangle_t()(0)(13) * in_t()(1)(2)
|
||||
+ conjugate( res()(1)( 0));
|
||||
|
||||
res()(1)(1) = triangle_t()(0)( 3) * in_cc_0_0
|
||||
+ triangle_t()(0)( 7) * in_cc_0_1
|
||||
+ triangle_t()(0)(10) * in_cc_0_2
|
||||
+ triangle_t()(0)(12) * in_cc_1_0;
|
||||
res()(1)(1) = diag_t()(0)( 4) * in_t()(1)(1)
|
||||
+ triangle_t()(0)(14) * in_t()(1)(2)
|
||||
+ conjugate( res()(1)( 1));
|
||||
|
||||
res()(1)(2) = triangle_t()(0)( 4) * in_cc_0_0
|
||||
+ triangle_t()(0)( 8) * in_cc_0_1
|
||||
+ triangle_t()(0)(11) * in_cc_0_2
|
||||
+ triangle_t()(0)(13) * in_cc_1_0
|
||||
+ triangle_t()(0)(14) * in_cc_1_1;
|
||||
res()(1)(2) = diag_t()(0)( 5) * in_t()(1)(2)
|
||||
+ conjugate( res()(1)( 2));
|
||||
|
||||
vstream(out_v[sF]()(0)(0), res()(0)(0));
|
||||
vstream(out_v[sF]()(0)(1), res()(0)(1));
|
||||
vstream(out_v[sF]()(0)(2), res()(0)(2));
|
||||
vstream(out_v[sF]()(1)(0), res()(1)(0));
|
||||
vstream(out_v[sF]()(1)(1), res()(1)(1));
|
||||
vstream(out_v[sF]()(1)(2), res()(1)(2));
|
||||
|
||||
// lower half
|
||||
PREFETCH_CLOVER(1);
|
||||
|
||||
auto in_cc_2_0 = conjugate(in_t()(2)(0));
|
||||
auto in_cc_2_1 = conjugate(in_t()(2)(1));
|
||||
auto in_cc_2_2 = conjugate(in_t()(2)(2));
|
||||
auto in_cc_3_0 = conjugate(in_t()(3)(0));
|
||||
auto in_cc_3_1 = conjugate(in_t()(3)(1));
|
||||
|
||||
res()(2)(0) = diag_t()(1)( 0) * in_t()(2)(0)
|
||||
+ triangle_t()(1)( 0) * in_t()(2)(1)
|
||||
+ triangle_t()(1)( 1) * in_t()(2)(2)
|
||||
+ triangle_t()(1)( 2) * in_t()(3)(0)
|
||||
+ triangle_t()(1)( 3) * in_t()(3)(1)
|
||||
+ triangle_t()(1)( 4) * in_t()(3)(2);
|
||||
|
||||
res()(2)(1) = triangle_t()(1)( 0) * in_cc_2_0;
|
||||
res()(2)(1) = diag_t()(1)( 1) * in_t()(2)(1)
|
||||
+ triangle_t()(1)( 5) * in_t()(2)(2)
|
||||
+ triangle_t()(1)( 6) * in_t()(3)(0)
|
||||
+ triangle_t()(1)( 7) * in_t()(3)(1)
|
||||
+ triangle_t()(1)( 8) * in_t()(3)(2)
|
||||
+ conjugate( res()(2)( 1));
|
||||
|
||||
res()(2)(2) = triangle_t()(1)( 1) * in_cc_2_0
|
||||
+ triangle_t()(1)( 5) * in_cc_2_1;
|
||||
res()(2)(2) = diag_t()(1)( 2) * in_t()(2)(2)
|
||||
+ triangle_t()(1)( 9) * in_t()(3)(0)
|
||||
+ triangle_t()(1)(10) * in_t()(3)(1)
|
||||
+ triangle_t()(1)(11) * in_t()(3)(2)
|
||||
+ conjugate( res()(2)( 2));
|
||||
|
||||
res()(3)(0) = triangle_t()(1)( 2) * in_cc_2_0
|
||||
+ triangle_t()(1)( 6) * in_cc_2_1
|
||||
+ triangle_t()(1)( 9) * in_cc_2_2;
|
||||
res()(3)(0) = diag_t()(1)( 3) * in_t()(3)(0)
|
||||
+ triangle_t()(1)(12) * in_t()(3)(1)
|
||||
+ triangle_t()(1)(13) * in_t()(3)(2)
|
||||
+ conjugate( res()(3)( 0));
|
||||
|
||||
res()(3)(1) = triangle_t()(1)( 3) * in_cc_2_0
|
||||
+ triangle_t()(1)( 7) * in_cc_2_1
|
||||
+ triangle_t()(1)(10) * in_cc_2_2
|
||||
+ triangle_t()(1)(12) * in_cc_3_0;
|
||||
res()(3)(1) = diag_t()(1)( 4) * in_t()(3)(1)
|
||||
+ triangle_t()(1)(14) * in_t()(3)(2)
|
||||
+ conjugate( res()(3)( 1));
|
||||
|
||||
res()(3)(2) = triangle_t()(1)( 4) * in_cc_2_0
|
||||
+ triangle_t()(1)( 8) * in_cc_2_1
|
||||
+ triangle_t()(1)(11) * in_cc_2_2
|
||||
+ triangle_t()(1)(13) * in_cc_3_0
|
||||
+ triangle_t()(1)(14) * in_cc_3_1;
|
||||
res()(3)(2) = diag_t()(1)( 5) * in_t()(3)(2)
|
||||
+ conjugate( res()(3)( 2));
|
||||
|
||||
vstream(out_v[sF]()(2)(0), res()(2)(0));
|
||||
vstream(out_v[sF]()(2)(1), res()(2)(1));
|
||||
vstream(out_v[sF]()(2)(2), res()(2)(2));
|
||||
vstream(out_v[sF]()(3)(0), res()(3)(0));
|
||||
vstream(out_v[sF]()(3)(1), res()(3)(1));
|
||||
vstream(out_v[sF]()(3)(2), res()(3)(2));
|
||||
});
|
||||
}
|
||||
|
||||
static void MooeeKernel(int Nsite,
|
||||
int Ls,
|
||||
const FermionField& in,
|
||||
FermionField& out,
|
||||
const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle) {
|
||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
||||
MooeeKernel_gpu(Nsite, Ls, in, out, diagonal, triangle);
|
||||
#else
|
||||
MooeeKernel_cpu(Nsite, Ls, in, out, diagonal, triangle);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void Invert(const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle,
|
||||
CloverDiagonalField& diagonalInv,
|
||||
CloverTriangleField& triangleInv) {
|
||||
conformable(diagonal, diagonalInv);
|
||||
conformable(triangle, triangleInv);
|
||||
conformable(diagonal, triangle);
|
||||
|
||||
diagonalInv.Checkerboard() = diagonal.Checkerboard();
|
||||
triangleInv.Checkerboard() = triangle.Checkerboard();
|
||||
|
||||
GridBase* grid = diagonal.Grid();
|
||||
|
||||
long lsites = grid->lSites();
|
||||
|
||||
typedef typename SiteCloverDiagonal::scalar_object scalar_object_diagonal;
|
||||
typedef typename SiteCloverTriangle::scalar_object scalar_object_triangle;
|
||||
|
||||
autoView(diagonal_v, diagonal, CpuRead);
|
||||
autoView(triangle_v, triangle, CpuRead);
|
||||
autoView(diagonalInv_v, diagonalInv, CpuWrite);
|
||||
autoView(triangleInv_v, triangleInv, CpuWrite);
|
||||
|
||||
thread_for(site, lsites, { // NOTE: Not on GPU because of Eigen & (peek/poke)LocalSite
|
||||
Eigen::MatrixXcd clover_inv_eigen = Eigen::MatrixXcd::Zero(Ns*Nc, Ns*Nc);
|
||||
Eigen::MatrixXcd clover_eigen = Eigen::MatrixXcd::Zero(Ns*Nc, Ns*Nc);
|
||||
|
||||
scalar_object_diagonal diagonal_tmp = Zero();
|
||||
scalar_object_diagonal diagonal_inv_tmp = Zero();
|
||||
scalar_object_triangle triangle_tmp = Zero();
|
||||
scalar_object_triangle triangle_inv_tmp = Zero();
|
||||
|
||||
Coordinate lcoor;
|
||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||
|
||||
peekLocalSite(diagonal_tmp, diagonal_v, lcoor);
|
||||
peekLocalSite(triangle_tmp, triangle_v, lcoor);
|
||||
|
||||
// TODO: can we save time here by inverting the two 6x6 hermitian matrices separately?
|
||||
for (long s_row=0;s_row<Ns;s_row++) {
|
||||
for (long s_col=0;s_col<Ns;s_col++) {
|
||||
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||
int block = s_row / Nhs;
|
||||
int s_row_block = s_row % Nhs;
|
||||
int s_col_block = s_col % Nhs;
|
||||
for (long c_row=0;c_row<Nc;c_row++) {
|
||||
for (long c_col=0;c_col<Nc;c_col++) {
|
||||
int i = s_row_block * Nc + c_row;
|
||||
int j = s_col_block * Nc + c_col;
|
||||
if(i == j)
|
||||
clover_eigen(s_row*Nc+c_row, s_col*Nc+c_col) = static_cast<ComplexD>(TensorRemove(diagonal_tmp()(block)(i)));
|
||||
else
|
||||
clover_eigen(s_row*Nc+c_row, s_col*Nc+c_col) = static_cast<ComplexD>(TensorRemove(triangle_elem(triangle_tmp, block, i, j)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
clover_inv_eigen = clover_eigen.inverse();
|
||||
|
||||
for (long s_row=0;s_row<Ns;s_row++) {
|
||||
for (long s_col=0;s_col<Ns;s_col++) {
|
||||
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||
int block = s_row / Nhs;
|
||||
int s_row_block = s_row % Nhs;
|
||||
int s_col_block = s_col % Nhs;
|
||||
for (long c_row=0;c_row<Nc;c_row++) {
|
||||
for (long c_col=0;c_col<Nc;c_col++) {
|
||||
int i = s_row_block * Nc + c_row;
|
||||
int j = s_col_block * Nc + c_col;
|
||||
if(i == j)
|
||||
diagonal_inv_tmp()(block)(i) = clover_inv_eigen(s_row*Nc+c_row, s_col*Nc+c_col);
|
||||
else if(i < j)
|
||||
triangle_inv_tmp()(block)(triangle_index(i, j)) = clover_inv_eigen(s_row*Nc+c_row, s_col*Nc+c_col);
|
||||
else
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pokeLocalSite(diagonal_inv_tmp, diagonalInv_v, lcoor);
|
||||
pokeLocalSite(triangle_inv_tmp, triangleInv_v, lcoor);
|
||||
});
|
||||
}
|
||||
|
||||
static void ConvertLayout(const CloverField& full,
|
||||
CloverDiagonalField& diagonal,
|
||||
CloverTriangleField& triangle) {
|
||||
conformable(full, diagonal);
|
||||
conformable(full, triangle);
|
||||
|
||||
diagonal.Checkerboard() = full.Checkerboard();
|
||||
triangle.Checkerboard() = full.Checkerboard();
|
||||
|
||||
autoView(full_v, full, AcceleratorRead);
|
||||
autoView(diagonal_v, diagonal, AcceleratorWrite);
|
||||
autoView(triangle_v, triangle, AcceleratorWrite);
|
||||
|
||||
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||
accelerator_for(ss, full.Grid()->oSites(), 1, {
|
||||
for(int s_row = 0; s_row < Ns; s_row++) {
|
||||
for(int s_col = 0; s_col < Ns; s_col++) {
|
||||
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||
int block = s_row / Nhs;
|
||||
int s_row_block = s_row % Nhs;
|
||||
int s_col_block = s_col % Nhs;
|
||||
for(int c_row = 0; c_row < Nc; c_row++) {
|
||||
for(int c_col = 0; c_col < Nc; c_col++) {
|
||||
int i = s_row_block * Nc + c_row;
|
||||
int j = s_col_block * Nc + c_col;
|
||||
if(i == j)
|
||||
diagonal_v[ss]()(block)(i) = full_v[ss]()(s_row, s_col)(c_row, c_col);
|
||||
else if(i < j)
|
||||
triangle_v[ss]()(block)(triangle_index(i, j)) = full_v[ss]()(s_row, s_col)(c_row, c_col);
|
||||
else
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
static void ConvertLayout(const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle,
|
||||
CloverField& full) {
|
||||
conformable(full, diagonal);
|
||||
conformable(full, triangle);
|
||||
|
||||
full.Checkerboard() = diagonal.Checkerboard();
|
||||
|
||||
full = Zero();
|
||||
|
||||
autoView(diagonal_v, diagonal, AcceleratorRead);
|
||||
autoView(triangle_v, triangle, AcceleratorRead);
|
||||
autoView(full_v, full, AcceleratorWrite);
|
||||
|
||||
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||
accelerator_for(ss, full.Grid()->oSites(), 1, {
|
||||
for(int s_row = 0; s_row < Ns; s_row++) {
|
||||
for(int s_col = 0; s_col < Ns; s_col++) {
|
||||
if(abs(s_row - s_col) > 1 || s_row + s_col == 3) continue;
|
||||
int block = s_row / Nhs;
|
||||
int s_row_block = s_row % Nhs;
|
||||
int s_col_block = s_col % Nhs;
|
||||
for(int c_row = 0; c_row < Nc; c_row++) {
|
||||
for(int c_col = 0; c_col < Nc; c_col++) {
|
||||
int i = s_row_block * Nc + c_row;
|
||||
int j = s_col_block * Nc + c_col;
|
||||
if(i == j)
|
||||
full_v[ss]()(s_row, s_col)(c_row, c_col) = diagonal_v[ss]()(block)(i);
|
||||
else
|
||||
full_v[ss]()(s_row, s_col)(c_row, c_col) = triangle_elem(triangle_v[ss], block, i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static void ModifyBoundaries(CloverDiagonalField& diagonal, CloverTriangleField& triangle, RealD csw_t, RealD cF, RealD diag_mass) {
|
||||
// Checks/grid
|
||||
double t0 = usecond();
|
||||
conformable(diagonal, triangle);
|
||||
GridBase* grid = diagonal.Grid();
|
||||
|
||||
// Determine the boundary coordinates/sites
|
||||
double t1 = usecond();
|
||||
int t_dir = Nd - 1;
|
||||
Lattice<iScalar<vInteger>> t_coor(grid);
|
||||
LatticeCoordinate(t_coor, t_dir);
|
||||
int T = grid->GlobalDimensions()[t_dir];
|
||||
|
||||
// Set off-diagonal parts at boundary to zero -- OK
|
||||
double t2 = usecond();
|
||||
CloverTriangleField zeroTriangle(grid);
|
||||
zeroTriangle.Checkerboard() = triangle.Checkerboard();
|
||||
zeroTriangle = Zero();
|
||||
triangle = where(t_coor == 0, zeroTriangle, triangle);
|
||||
triangle = where(t_coor == T-1, zeroTriangle, triangle);
|
||||
|
||||
// Set diagonal to unity (scaled correctly) -- OK
|
||||
double t3 = usecond();
|
||||
CloverDiagonalField tmp(grid);
|
||||
tmp.Checkerboard() = diagonal.Checkerboard();
|
||||
tmp = -1.0 * csw_t + diag_mass;
|
||||
diagonal = where(t_coor == 0, tmp, diagonal);
|
||||
diagonal = where(t_coor == T-1, tmp, diagonal);
|
||||
|
||||
// Correct values next to boundary
|
||||
double t4 = usecond();
|
||||
if(cF != 1.0) {
|
||||
tmp = cF - 1.0;
|
||||
tmp += diagonal;
|
||||
diagonal = where(t_coor == 1, tmp, diagonal);
|
||||
diagonal = where(t_coor == T-2, tmp, diagonal);
|
||||
}
|
||||
|
||||
// Report timings
|
||||
double t5 = usecond();
|
||||
#if 0
|
||||
std::cout << GridLogMessage << "CompactWilsonCloverHelpers::ModifyBoundaries timings:"
|
||||
<< " checks = " << (t1 - t0) / 1e6
|
||||
<< ", coordinate = " << (t2 - t1) / 1e6
|
||||
<< ", off-diag zero = " << (t3 - t2) / 1e6
|
||||
<< ", diagonal unity = " << (t4 - t3) / 1e6
|
||||
<< ", near-boundary = " << (t5 - t4) / 1e6
|
||||
<< ", total = " << (t5 - t0) / 1e6
|
||||
<< std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
template<class Field, class Mask>
|
||||
static strong_inline void ApplyBoundaryMask(Field& f, const Mask& m) {
|
||||
conformable(f, m);
|
||||
auto grid = f.Grid();
|
||||
const int Nsite = grid->oSites();
|
||||
const int Nsimd = grid->Nsimd();
|
||||
autoView(f_v, f, AcceleratorWrite);
|
||||
autoView(m_v, m, AcceleratorRead);
|
||||
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels
|
||||
accelerator_for(ss, Nsite, Nsimd, {
|
||||
coalescedWrite(f_v[ss], m_v(ss) * f_v(ss));
|
||||
});
|
||||
}
|
||||
|
||||
template<class MaskField>
|
||||
static void SetupMasks(MaskField& full, MaskField& even, MaskField& odd) {
|
||||
assert(even.Grid()->_isCheckerBoarded && even.Checkerboard() == Even);
|
||||
assert(odd.Grid()->_isCheckerBoarded && odd.Checkerboard() == Odd);
|
||||
assert(!full.Grid()->_isCheckerBoarded);
|
||||
|
||||
GridBase* grid = full.Grid();
|
||||
int t_dir = Nd-1;
|
||||
Lattice<iScalar<vInteger>> t_coor(grid);
|
||||
LatticeCoordinate(t_coor, t_dir);
|
||||
int T = grid->GlobalDimensions()[t_dir];
|
||||
|
||||
MaskField zeroMask(grid); zeroMask = Zero();
|
||||
full = 1.0;
|
||||
full = where(t_coor == 0, zeroMask, full);
|
||||
full = where(t_coor == T-1, zeroMask, full);
|
||||
|
||||
pickCheckerboard(Even, even, full);
|
||||
pickCheckerboard(Odd, odd, full);
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
92
Grid/qcd/action/fermion/WilsonCloverTypes.h
Normal file
92
Grid/qcd/action/fermion/WilsonCloverTypes.h
Normal file
@ -0,0 +1,92 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonCloverTypes.h
|
||||
|
||||
Copyright (C) 2021 - 2022
|
||||
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl>
|
||||
class WilsonCloverTypes {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
template <typename vtype> using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
||||
|
||||
typedef iImplClover<Simd> SiteClover;
|
||||
|
||||
typedef Lattice<SiteClover> CloverField;
|
||||
};
|
||||
|
||||
template<class Impl>
|
||||
class CompactWilsonCloverTypes {
|
||||
public:
|
||||
INHERIT_IMPL_TYPES(Impl);
|
||||
|
||||
static_assert(Nd == 4 && Nc == 3 && Ns == 4 && Impl::Dimension == 3, "Wrong dimensions");
|
||||
|
||||
static constexpr int Nred = Nc * Nhs; // 6
|
||||
static constexpr int Nblock = Nhs; // 2
|
||||
static constexpr int Ndiagonal = Nred; // 6
|
||||
static constexpr int Ntriangle = (Nred - 1) * Nc; // 15
|
||||
|
||||
template<typename vtype> using iImplCloverDiagonal = iScalar<iVector<iVector<vtype, Ndiagonal>, Nblock>>;
|
||||
template<typename vtype> using iImplCloverTriangle = iScalar<iVector<iVector<vtype, Ntriangle>, Nblock>>;
|
||||
|
||||
typedef iImplCloverDiagonal<Simd> SiteCloverDiagonal;
|
||||
typedef iImplCloverTriangle<Simd> SiteCloverTriangle;
|
||||
typedef iSinglet<Simd> SiteMask;
|
||||
|
||||
typedef Lattice<SiteCloverDiagonal> CloverDiagonalField;
|
||||
typedef Lattice<SiteCloverTriangle> CloverTriangleField;
|
||||
typedef Lattice<SiteMask> MaskField;
|
||||
};
|
||||
|
||||
#define INHERIT_CLOVER_TYPES(Impl) \
|
||||
typedef typename WilsonCloverTypes<Impl>::SiteClover SiteClover; \
|
||||
typedef typename WilsonCloverTypes<Impl>::CloverField CloverField;
|
||||
|
||||
#define INHERIT_COMPACT_CLOVER_TYPES(Impl) \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::SiteCloverDiagonal SiteCloverDiagonal; \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::SiteCloverTriangle SiteCloverTriangle; \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::SiteMask SiteMask; \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::CloverDiagonalField CloverDiagonalField; \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::CloverTriangleField CloverTriangleField; \
|
||||
typedef typename CompactWilsonCloverTypes<Impl>::MaskField MaskField; \
|
||||
/* ugly duplication but needed inside functionality classes */ \
|
||||
template<typename vtype> using iImplCloverDiagonal = \
|
||||
iScalar<iVector<iVector<vtype, CompactWilsonCloverTypes<Impl>::Ndiagonal>, CompactWilsonCloverTypes<Impl>::Nblock>>; \
|
||||
template<typename vtype> using iImplCloverTriangle = \
|
||||
iScalar<iVector<iVector<vtype, CompactWilsonCloverTypes<Impl>::Ntriangle>, CompactWilsonCloverTypes<Impl>::Nblock>>;
|
||||
|
||||
#define INHERIT_COMPACT_CLOVER_SIZES(Impl) \
|
||||
static constexpr int Nred = CompactWilsonCloverTypes<Impl>::Nred; \
|
||||
static constexpr int Nblock = CompactWilsonCloverTypes<Impl>::Nblock; \
|
||||
static constexpr int Ndiagonal = CompactWilsonCloverTypes<Impl>::Ndiagonal; \
|
||||
static constexpr int Ntriangle = CompactWilsonCloverTypes<Impl>::Ntriangle;
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -75,6 +75,10 @@ public:
|
||||
FermionField _tmp;
|
||||
FermionField &tmp(void) { return _tmp; }
|
||||
|
||||
int Dirichlet;
|
||||
Coordinate Block;
|
||||
|
||||
/********** Deprecate timers **********/
|
||||
void Report(void);
|
||||
void ZeroCounters(void);
|
||||
double DhopCalls;
|
||||
@ -173,7 +177,18 @@ public:
|
||||
GridCartesian &FourDimGrid,
|
||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||
double _M5,const ImplParams &p= ImplParams());
|
||||
|
||||
|
||||
virtual void DirichletBlock(Coordinate & block)
|
||||
{
|
||||
assert(block.size()==Nd+1);
|
||||
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
|
||||
Dirichlet = 1;
|
||||
Block = block;
|
||||
Stencil.DirichletBlock(block);
|
||||
StencilEven.DirichletBlock(block);
|
||||
StencilOdd.DirichletBlock(block);
|
||||
}
|
||||
}
|
||||
// Constructors
|
||||
/*
|
||||
WilsonFermion5D(int simd,
|
||||
|
@ -828,6 +828,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
|
||||
#if (!defined(GRID_HIP))
|
||||
int tshift = (mu == Nd-1) ? 1 : 0;
|
||||
unsigned int LLt = GridDefaultLatt()[Tp];
|
||||
////////////////////////////////////////////////
|
||||
// GENERAL CAYLEY CASE
|
||||
////////////////////////////////////////////////
|
||||
@ -880,7 +881,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
}
|
||||
|
||||
std::vector<RealD> G_s(Ls,1.0);
|
||||
RealD sign = 1; // sign flip for vector/tadpole
|
||||
RealD sign = 1.0; // sign flip for vector/tadpole
|
||||
if ( curr_type == Current::Axial ) {
|
||||
for(int s=0;s<Ls/2;s++){
|
||||
G_s[s] = -1.0;
|
||||
@ -890,7 +891,7 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
auto b=this->_b;
|
||||
auto c=this->_c;
|
||||
if ( b == 1 && c == 0 ) {
|
||||
sign = -1;
|
||||
sign = -1.0;
|
||||
}
|
||||
else {
|
||||
std::cerr << "Error: Tadpole implementation currently unavailable for non-Shamir actions." << std::endl;
|
||||
@ -934,7 +935,13 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
||||
tmp = Cshift(tmp,mu,-1);
|
||||
Impl::multLinkField(Utmp,this->Umu,tmp,mu+Nd); // Adjoint link
|
||||
tmp = -G_s[s]*( Utmp + gmu*Utmp );
|
||||
tmp = where((lcoor>=tmin+tshift),tmp,zz); // Mask the time
|
||||
// Mask the time
|
||||
if (tmax == LLt - 1 && tshift == 1){ // quick fix to include timeslice 0 if tmax + tshift is over the last timeslice
|
||||
unsigned int t0 = 0;
|
||||
tmp = where(((lcoor==t0) || (lcoor>=tmin+tshift)),tmp,zz);
|
||||
} else {
|
||||
tmp = where((lcoor>=tmin+tshift),tmp,zz);
|
||||
}
|
||||
L_Q += where((lcoor<=tmax+tshift),tmp,zz); // Position of current complicated
|
||||
|
||||
InsertSlice(L_Q, q_out, s , 0);
|
||||
|
@ -0,0 +1,363 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/CompactWilsonCloverFermionImplementation.h
|
||||
|
||||
Copyright (C) 2017 - 2022
|
||||
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/qcd/spin/Dirac.h>
|
||||
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
template<class Impl>
|
||||
CompactWilsonCloverFermion<Impl>::CompactWilsonCloverFermion(GaugeField& _Umu,
|
||||
GridCartesian& Fgrid,
|
||||
GridRedBlackCartesian& Hgrid,
|
||||
const RealD _mass,
|
||||
const RealD _csw_r,
|
||||
const RealD _csw_t,
|
||||
const RealD _cF,
|
||||
const WilsonAnisotropyCoefficients& clover_anisotropy,
|
||||
const ImplParams& impl_p)
|
||||
: WilsonBase(_Umu, Fgrid, Hgrid, _mass, impl_p, clover_anisotropy)
|
||||
, csw_r(_csw_r)
|
||||
, csw_t(_csw_t)
|
||||
, cF(_cF)
|
||||
, open_boundaries(impl_p.boundary_phases[Nd-1] == 0.0)
|
||||
, Diagonal(&Fgrid), Triangle(&Fgrid)
|
||||
, DiagonalEven(&Hgrid), TriangleEven(&Hgrid)
|
||||
, DiagonalOdd(&Hgrid), TriangleOdd(&Hgrid)
|
||||
, DiagonalInv(&Fgrid), TriangleInv(&Fgrid)
|
||||
, DiagonalInvEven(&Hgrid), TriangleInvEven(&Hgrid)
|
||||
, DiagonalInvOdd(&Hgrid), TriangleInvOdd(&Hgrid)
|
||||
, Tmp(&Fgrid)
|
||||
, BoundaryMask(&Fgrid)
|
||||
, BoundaryMaskEven(&Hgrid), BoundaryMaskOdd(&Hgrid)
|
||||
{
|
||||
csw_r *= 0.5;
|
||||
csw_t *= 0.5;
|
||||
if (clover_anisotropy.isAnisotropic)
|
||||
csw_r /= clover_anisotropy.xi_0;
|
||||
|
||||
ImportGauge(_Umu);
|
||||
if (open_boundaries)
|
||||
CompactHelpers::SetupMasks(this->BoundaryMask, this->BoundaryMaskEven, this->BoundaryMaskOdd);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::Dhop(const FermionField& in, FermionField& out, int dag) {
|
||||
WilsonBase::Dhop(in, out, dag);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::DhopOE(const FermionField& in, FermionField& out, int dag) {
|
||||
WilsonBase::DhopOE(in, out, dag);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::DhopEO(const FermionField& in, FermionField& out, int dag) {
|
||||
WilsonBase::DhopEO(in, out, dag);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::DhopDir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||
WilsonBase::DhopDir(in, out, dir, disp);
|
||||
if(this->open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::DhopDirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||
WilsonBase::DhopDirAll(in, out);
|
||||
if(this->open_boundaries) {
|
||||
for(auto& o : out) ApplyBoundaryMask(o);
|
||||
}
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::M(const FermionField& in, FermionField& out) {
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
WilsonBase::Dhop(in, out, DaggerNo); // call base to save applying bc
|
||||
Mooee(in, Tmp);
|
||||
axpy(out, 1.0, out, Tmp);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::Mdag(const FermionField& in, FermionField& out) {
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
WilsonBase::Dhop(in, out, DaggerYes); // call base to save applying bc
|
||||
MooeeDag(in, Tmp);
|
||||
axpy(out, 1.0, out, Tmp);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::Meooe(const FermionField& in, FermionField& out) {
|
||||
WilsonBase::Meooe(in, out);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MeooeDag(const FermionField& in, FermionField& out) {
|
||||
WilsonBase::MeooeDag(in, out);
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::Mooee(const FermionField& in, FermionField& out) {
|
||||
if(in.Grid()->_isCheckerBoarded) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
MooeeInternal(in, out, DiagonalOdd, TriangleOdd);
|
||||
} else {
|
||||
MooeeInternal(in, out, DiagonalEven, TriangleEven);
|
||||
}
|
||||
} else {
|
||||
MooeeInternal(in, out, Diagonal, Triangle);
|
||||
}
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MooeeDag(const FermionField& in, FermionField& out) {
|
||||
Mooee(in, out); // blocks are hermitian
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MooeeInv(const FermionField& in, FermionField& out) {
|
||||
if(in.Grid()->_isCheckerBoarded) {
|
||||
if(in.Checkerboard() == Odd) {
|
||||
MooeeInternal(in, out, DiagonalInvOdd, TriangleInvOdd);
|
||||
} else {
|
||||
MooeeInternal(in, out, DiagonalInvEven, TriangleInvEven);
|
||||
}
|
||||
} else {
|
||||
MooeeInternal(in, out, DiagonalInv, TriangleInv);
|
||||
}
|
||||
if(open_boundaries) ApplyBoundaryMask(out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MooeeInvDag(const FermionField& in, FermionField& out) {
|
||||
MooeeInv(in, out); // blocks are hermitian
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::Mdir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||
DhopDir(in, out, dir, disp);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MdirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||
DhopDirAll(in, out);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) {
|
||||
assert(!open_boundaries); // TODO check for changes required for open bc
|
||||
|
||||
// NOTE: code copied from original clover term
|
||||
conformable(X.Grid(), Y.Grid());
|
||||
conformable(X.Grid(), force.Grid());
|
||||
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
||||
GaugeField clover_force(force.Grid());
|
||||
PropagatorField Lambda(force.Grid());
|
||||
|
||||
// Guido: Here we are hitting some performance issues:
|
||||
// need to extract the components of the DoubledGaugeField
|
||||
// for each call
|
||||
// Possible solution
|
||||
// Create a vector object to store them? (cons: wasting space)
|
||||
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
||||
|
||||
Impl::extractLinkField(U, this->Umu);
|
||||
|
||||
force = Zero();
|
||||
// Derivative of the Wilson hopping term
|
||||
this->DhopDeriv(force, X, Y, dag);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Clover term derivative
|
||||
///////////////////////////////////////////////////////////
|
||||
Impl::outerProductImpl(Lambda, X, Y);
|
||||
//std::cout << "Lambda:" << Lambda << std::endl;
|
||||
|
||||
Gamma::Algebra sigma[] = {
|
||||
Gamma::Algebra::SigmaXY,
|
||||
Gamma::Algebra::SigmaXZ,
|
||||
Gamma::Algebra::SigmaXT,
|
||||
Gamma::Algebra::MinusSigmaXY,
|
||||
Gamma::Algebra::SigmaYZ,
|
||||
Gamma::Algebra::SigmaYT,
|
||||
Gamma::Algebra::MinusSigmaXZ,
|
||||
Gamma::Algebra::MinusSigmaYZ,
|
||||
Gamma::Algebra::SigmaZT,
|
||||
Gamma::Algebra::MinusSigmaXT,
|
||||
Gamma::Algebra::MinusSigmaYT,
|
||||
Gamma::Algebra::MinusSigmaZT};
|
||||
|
||||
/*
|
||||
sigma_{\mu \nu}=
|
||||
| 0 sigma[0] sigma[1] sigma[2] |
|
||||
| sigma[3] 0 sigma[4] sigma[5] |
|
||||
| sigma[6] sigma[7] 0 sigma[8] |
|
||||
| sigma[9] sigma[10] sigma[11] 0 |
|
||||
*/
|
||||
|
||||
int count = 0;
|
||||
clover_force = Zero();
|
||||
for (int mu = 0; mu < 4; mu++)
|
||||
{
|
||||
force_mu = Zero();
|
||||
for (int nu = 0; nu < 4; nu++)
|
||||
{
|
||||
if (mu == nu)
|
||||
continue;
|
||||
|
||||
RealD factor;
|
||||
if (nu == 4 || mu == 4)
|
||||
{
|
||||
factor = 2.0 * csw_t;
|
||||
}
|
||||
else
|
||||
{
|
||||
factor = 2.0 * csw_r;
|
||||
}
|
||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||
count++;
|
||||
}
|
||||
|
||||
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
||||
}
|
||||
//clover_force *= csw;
|
||||
force += clover_force;
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||
assert(0);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::MooeeInternal(const FermionField& in,
|
||||
FermionField& out,
|
||||
const CloverDiagonalField& diagonal,
|
||||
const CloverTriangleField& triangle) {
|
||||
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
conformable(in, out);
|
||||
conformable(in, diagonal);
|
||||
conformable(in, triangle);
|
||||
|
||||
CompactHelpers::MooeeKernel(diagonal.oSites(), 1, in, out, diagonal, triangle);
|
||||
}
|
||||
|
||||
template<class Impl>
|
||||
void CompactWilsonCloverFermion<Impl>::ImportGauge(const GaugeField& _Umu) {
|
||||
// NOTE: parts copied from original implementation
|
||||
|
||||
// Import gauge into base class
|
||||
double t0 = usecond();
|
||||
WilsonBase::ImportGauge(_Umu); // NOTE: called here and in wilson constructor -> performed twice, but can't avoid that
|
||||
|
||||
// Initialize temporary variables
|
||||
double t1 = usecond();
|
||||
conformable(_Umu.Grid(), this->GaugeGrid());
|
||||
GridBase* grid = _Umu.Grid();
|
||||
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
||||
CloverField TmpOriginal(grid);
|
||||
|
||||
// Compute the field strength terms mu>nu
|
||||
double t2 = usecond();
|
||||
WilsonLoops<Impl>::FieldStrength(Bx, _Umu, Zdir, Ydir);
|
||||
WilsonLoops<Impl>::FieldStrength(By, _Umu, Zdir, Xdir);
|
||||
WilsonLoops<Impl>::FieldStrength(Bz, _Umu, Ydir, Xdir);
|
||||
WilsonLoops<Impl>::FieldStrength(Ex, _Umu, Tdir, Xdir);
|
||||
WilsonLoops<Impl>::FieldStrength(Ey, _Umu, Tdir, Ydir);
|
||||
WilsonLoops<Impl>::FieldStrength(Ez, _Umu, Tdir, Zdir);
|
||||
|
||||
// Compute the Clover Operator acting on Colour and Spin
|
||||
// multiply here by the clover coefficients for the anisotropy
|
||||
double t3 = usecond();
|
||||
TmpOriginal = Helpers::fillCloverYZ(Bx) * csw_r;
|
||||
TmpOriginal += Helpers::fillCloverXZ(By) * csw_r;
|
||||
TmpOriginal += Helpers::fillCloverXY(Bz) * csw_r;
|
||||
TmpOriginal += Helpers::fillCloverXT(Ex) * csw_t;
|
||||
TmpOriginal += Helpers::fillCloverYT(Ey) * csw_t;
|
||||
TmpOriginal += Helpers::fillCloverZT(Ez) * csw_t;
|
||||
TmpOriginal += this->diag_mass;
|
||||
|
||||
// Convert the data layout of the clover term
|
||||
double t4 = usecond();
|
||||
CompactHelpers::ConvertLayout(TmpOriginal, Diagonal, Triangle);
|
||||
|
||||
// Possible modify the boundary values
|
||||
double t5 = usecond();
|
||||
if(open_boundaries) CompactHelpers::ModifyBoundaries(Diagonal, Triangle, csw_t, cF, this->diag_mass);
|
||||
|
||||
// Invert the clover term in the improved layout
|
||||
double t6 = usecond();
|
||||
CompactHelpers::Invert(Diagonal, Triangle, DiagonalInv, TriangleInv);
|
||||
|
||||
// Fill the remaining clover fields
|
||||
double t7 = usecond();
|
||||
pickCheckerboard(Even, DiagonalEven, Diagonal);
|
||||
pickCheckerboard(Even, TriangleEven, Triangle);
|
||||
pickCheckerboard(Odd, DiagonalOdd, Diagonal);
|
||||
pickCheckerboard(Odd, TriangleOdd, Triangle);
|
||||
pickCheckerboard(Even, DiagonalInvEven, DiagonalInv);
|
||||
pickCheckerboard(Even, TriangleInvEven, TriangleInv);
|
||||
pickCheckerboard(Odd, DiagonalInvOdd, DiagonalInv);
|
||||
pickCheckerboard(Odd, TriangleInvOdd, TriangleInv);
|
||||
|
||||
// Report timings
|
||||
double t8 = usecond();
|
||||
#if 0
|
||||
std::cout << GridLogMessage << "CompactWilsonCloverFermion::ImportGauge timings:"
|
||||
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||
<< ", allocations = " << (t2 - t1) / 1e6
|
||||
<< ", field strength = " << (t3 - t2) / 1e6
|
||||
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||
<< ", convert = " << (t5 - t4) / 1e6
|
||||
<< ", boundaries = " << (t6 - t5) / 1e6
|
||||
<< ", inversions = " << (t7 - t6) / 1e6
|
||||
<< ", pick cbs = " << (t8 - t7) / 1e6
|
||||
<< ", total = " << (t8 - t0) / 1e6
|
||||
<< std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -2,12 +2,13 @@
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermion.cc
|
||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermionImplementation.h
|
||||
|
||||
Copyright (C) 2017
|
||||
Copyright (C) 2017 - 2022
|
||||
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -33,6 +34,45 @@
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<class Impl>
|
||||
WilsonCloverFermion<Impl>::WilsonCloverFermion(GaugeField& _Umu,
|
||||
GridCartesian& Fgrid,
|
||||
GridRedBlackCartesian& Hgrid,
|
||||
const RealD _mass,
|
||||
const RealD _csw_r,
|
||||
const RealD _csw_t,
|
||||
const WilsonAnisotropyCoefficients& clover_anisotropy,
|
||||
const ImplParams& impl_p)
|
||||
: WilsonFermion<Impl>(_Umu, Fgrid, Hgrid, _mass, impl_p, clover_anisotropy)
|
||||
, CloverTerm(&Fgrid)
|
||||
, CloverTermInv(&Fgrid)
|
||||
, CloverTermEven(&Hgrid)
|
||||
, CloverTermOdd(&Hgrid)
|
||||
, CloverTermInvEven(&Hgrid)
|
||||
, CloverTermInvOdd(&Hgrid)
|
||||
, CloverTermDagEven(&Hgrid)
|
||||
, CloverTermDagOdd(&Hgrid)
|
||||
, CloverTermInvDagEven(&Hgrid)
|
||||
, CloverTermInvDagOdd(&Hgrid) {
|
||||
assert(Nd == 4); // require 4 dimensions
|
||||
|
||||
if(clover_anisotropy.isAnisotropic) {
|
||||
csw_r = _csw_r * 0.5 / clover_anisotropy.xi_0;
|
||||
diag_mass = _mass + 1.0 + (Nd - 1) * (clover_anisotropy.nu / clover_anisotropy.xi_0);
|
||||
} else {
|
||||
csw_r = _csw_r * 0.5;
|
||||
diag_mass = 4.0 + _mass;
|
||||
}
|
||||
csw_t = _csw_t * 0.5;
|
||||
|
||||
if(csw_r == 0)
|
||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_r = 0" << std::endl;
|
||||
if(csw_t == 0)
|
||||
std::cout << GridLogWarning << "Initializing WilsonCloverFermion with csw_t = 0" << std::endl;
|
||||
|
||||
ImportGauge(_Umu);
|
||||
}
|
||||
|
||||
// *NOT* EO
|
||||
template <class Impl>
|
||||
void WilsonCloverFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||
@ -67,10 +107,13 @@ void WilsonCloverFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
||||
template <class Impl>
|
||||
void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
{
|
||||
double t0 = usecond();
|
||||
WilsonFermion<Impl>::ImportGauge(_Umu);
|
||||
double t1 = usecond();
|
||||
GridBase *grid = _Umu.Grid();
|
||||
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
||||
|
||||
double t2 = usecond();
|
||||
// Compute the field strength terms mu>nu
|
||||
WilsonLoops<Impl>::FieldStrength(Bx, _Umu, Zdir, Ydir);
|
||||
WilsonLoops<Impl>::FieldStrength(By, _Umu, Zdir, Xdir);
|
||||
@ -79,19 +122,22 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
WilsonLoops<Impl>::FieldStrength(Ey, _Umu, Tdir, Ydir);
|
||||
WilsonLoops<Impl>::FieldStrength(Ez, _Umu, Tdir, Zdir);
|
||||
|
||||
double t3 = usecond();
|
||||
// Compute the Clover Operator acting on Colour and Spin
|
||||
// multiply here by the clover coefficients for the anisotropy
|
||||
CloverTerm = fillCloverYZ(Bx) * csw_r;
|
||||
CloverTerm += fillCloverXZ(By) * csw_r;
|
||||
CloverTerm += fillCloverXY(Bz) * csw_r;
|
||||
CloverTerm += fillCloverXT(Ex) * csw_t;
|
||||
CloverTerm += fillCloverYT(Ey) * csw_t;
|
||||
CloverTerm += fillCloverZT(Ez) * csw_t;
|
||||
CloverTerm = Helpers::fillCloverYZ(Bx) * csw_r;
|
||||
CloverTerm += Helpers::fillCloverXZ(By) * csw_r;
|
||||
CloverTerm += Helpers::fillCloverXY(Bz) * csw_r;
|
||||
CloverTerm += Helpers::fillCloverXT(Ex) * csw_t;
|
||||
CloverTerm += Helpers::fillCloverYT(Ey) * csw_t;
|
||||
CloverTerm += Helpers::fillCloverZT(Ez) * csw_t;
|
||||
CloverTerm += diag_mass;
|
||||
|
||||
double t4 = usecond();
|
||||
int lvol = _Umu.Grid()->lSites();
|
||||
int DimRep = Impl::Dimension;
|
||||
|
||||
double t5 = usecond();
|
||||
{
|
||||
autoView(CTv,CloverTerm,CpuRead);
|
||||
autoView(CTIv,CloverTermInv,CpuWrite);
|
||||
@ -100,7 +146,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||
typename SiteCloverType::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||
typename SiteClover::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||
peekLocalSite(Qx, CTv, lcoor);
|
||||
//if (csw!=0){
|
||||
for (int j = 0; j < Ns; j++)
|
||||
@ -125,6 +171,7 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
});
|
||||
}
|
||||
|
||||
double t6 = usecond();
|
||||
// Separate the even and odd parts
|
||||
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
||||
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
||||
@ -137,6 +184,20 @@ void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
|
||||
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
||||
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
||||
double t7 = usecond();
|
||||
|
||||
#if 0
|
||||
std::cout << GridLogMessage << "WilsonCloverFermion::ImportGauge timings:"
|
||||
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||
<< ", allocations = " << (t2 - t1) / 1e6
|
||||
<< ", field strength = " << (t3 - t2) / 1e6
|
||||
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||
<< ", misc = " << (t5 - t4) / 1e6
|
||||
<< ", inversions = " << (t6 - t5) / 1e6
|
||||
<< ", pick cbs = " << (t7 - t6) / 1e6
|
||||
<< ", total = " << (t7 - t0) / 1e6
|
||||
<< std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class Impl>
|
||||
@ -167,7 +228,7 @@ template <class Impl>
|
||||
void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionField &out, int dag, int inv)
|
||||
{
|
||||
out.Checkerboard() = in.Checkerboard();
|
||||
CloverFieldType *Clover;
|
||||
CloverField *Clover;
|
||||
assert(in.Checkerboard() == Odd || in.Checkerboard() == Even);
|
||||
|
||||
if (dag)
|
||||
@ -182,12 +243,12 @@ void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionFie
|
||||
{
|
||||
Clover = (inv) ? &CloverTermInvDagEven : &CloverTermDagEven;
|
||||
}
|
||||
out = *Clover * in;
|
||||
Helpers::multCloverField(out, *Clover, in);
|
||||
}
|
||||
else
|
||||
{
|
||||
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
||||
out = adj(*Clover) * in;
|
||||
Helpers::multCloverField(out, *Clover, in); // don't bother with adj, hermitian anyway
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -205,18 +266,98 @@ void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionFie
|
||||
// std::cout << "Calling clover term Even" << std::endl;
|
||||
Clover = (inv) ? &CloverTermInvEven : &CloverTermEven;
|
||||
}
|
||||
out = *Clover * in;
|
||||
Helpers::multCloverField(out, *Clover, in);
|
||||
// std::cout << GridLogMessage << "*Clover.Checkerboard() " << (*Clover).Checkerboard() << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
Clover = (inv) ? &CloverTermInv : &CloverTerm;
|
||||
out = *Clover * in;
|
||||
Helpers::multCloverField(out, *Clover, in);
|
||||
}
|
||||
}
|
||||
|
||||
} // MooeeInternal
|
||||
|
||||
// Derivative parts unpreconditioned pseudofermions
|
||||
template <class Impl>
|
||||
void WilsonCloverFermion<Impl>::MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
||||
{
|
||||
conformable(X.Grid(), Y.Grid());
|
||||
conformable(X.Grid(), force.Grid());
|
||||
GaugeLinkField force_mu(force.Grid()), lambda(force.Grid());
|
||||
GaugeField clover_force(force.Grid());
|
||||
PropagatorField Lambda(force.Grid());
|
||||
|
||||
// Guido: Here we are hitting some performance issues:
|
||||
// need to extract the components of the DoubledGaugeField
|
||||
// for each call
|
||||
// Possible solution
|
||||
// Create a vector object to store them? (cons: wasting space)
|
||||
std::vector<GaugeLinkField> U(Nd, this->Umu.Grid());
|
||||
|
||||
Impl::extractLinkField(U, this->Umu);
|
||||
|
||||
force = Zero();
|
||||
// Derivative of the Wilson hopping term
|
||||
this->DhopDeriv(force, X, Y, dag);
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Clover term derivative
|
||||
///////////////////////////////////////////////////////////
|
||||
Impl::outerProductImpl(Lambda, X, Y);
|
||||
//std::cout << "Lambda:" << Lambda << std::endl;
|
||||
|
||||
Gamma::Algebra sigma[] = {
|
||||
Gamma::Algebra::SigmaXY,
|
||||
Gamma::Algebra::SigmaXZ,
|
||||
Gamma::Algebra::SigmaXT,
|
||||
Gamma::Algebra::MinusSigmaXY,
|
||||
Gamma::Algebra::SigmaYZ,
|
||||
Gamma::Algebra::SigmaYT,
|
||||
Gamma::Algebra::MinusSigmaXZ,
|
||||
Gamma::Algebra::MinusSigmaYZ,
|
||||
Gamma::Algebra::SigmaZT,
|
||||
Gamma::Algebra::MinusSigmaXT,
|
||||
Gamma::Algebra::MinusSigmaYT,
|
||||
Gamma::Algebra::MinusSigmaZT};
|
||||
|
||||
/*
|
||||
sigma_{\mu \nu}=
|
||||
| 0 sigma[0] sigma[1] sigma[2] |
|
||||
| sigma[3] 0 sigma[4] sigma[5] |
|
||||
| sigma[6] sigma[7] 0 sigma[8] |
|
||||
| sigma[9] sigma[10] sigma[11] 0 |
|
||||
*/
|
||||
|
||||
int count = 0;
|
||||
clover_force = Zero();
|
||||
for (int mu = 0; mu < 4; mu++)
|
||||
{
|
||||
force_mu = Zero();
|
||||
for (int nu = 0; nu < 4; nu++)
|
||||
{
|
||||
if (mu == nu)
|
||||
continue;
|
||||
|
||||
RealD factor;
|
||||
if (nu == 4 || mu == 4)
|
||||
{
|
||||
factor = 2.0 * csw_t;
|
||||
}
|
||||
else
|
||||
{
|
||||
factor = 2.0 * csw_r;
|
||||
}
|
||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||
count++;
|
||||
}
|
||||
|
||||
pokeLorentz(clover_force, U[mu] * force_mu, mu);
|
||||
}
|
||||
//clover_force *= csw;
|
||||
force += clover_force;
|
||||
}
|
||||
|
||||
// Derivative parts
|
||||
template <class Impl>
|
||||
|
@ -60,7 +60,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
||||
UmuOdd (_FourDimRedBlackGrid),
|
||||
Lebesgue(_FourDimGrid),
|
||||
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
||||
_tmp(&FiveDimRedBlackGrid)
|
||||
_tmp(&FiveDimRedBlackGrid),
|
||||
Dirichlet(0)
|
||||
{
|
||||
// some assertions
|
||||
assert(FiveDimGrid._ndimension==5);
|
||||
@ -218,6 +219,14 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||
{
|
||||
GaugeField HUmu(_Umu.Grid());
|
||||
HUmu = _Umu*(-0.5);
|
||||
if ( Dirichlet ) {
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 5d " <<Block<<std::endl;
|
||||
Coordinate GaugeBlock(Nd);
|
||||
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
|
||||
std::cout << GridLogMessage << " Dirichlet BCs 4d " <<GaugeBlock<<std::endl;
|
||||
DirichletFilter<GaugeField> Filter(GaugeBlock);
|
||||
Filter.applyFilter(HUmu);
|
||||
}
|
||||
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
|
||||
pickCheckerboard(Even,UmuEven,Umu);
|
||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||
|
@ -77,23 +77,23 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
#define REGISTER
|
||||
|
||||
#ifdef GRID_SIMT
|
||||
#define LOAD_CHIMU(ptype) \
|
||||
#define LOAD_CHIMU(Ptype) \
|
||||
{const SiteSpinor & ref (in[offset]); \
|
||||
Chimu_00=coalescedReadPermute<ptype>(ref()(0)(0),perm,lane); \
|
||||
Chimu_01=coalescedReadPermute<ptype>(ref()(0)(1),perm,lane); \
|
||||
Chimu_02=coalescedReadPermute<ptype>(ref()(0)(2),perm,lane); \
|
||||
Chimu_10=coalescedReadPermute<ptype>(ref()(1)(0),perm,lane); \
|
||||
Chimu_11=coalescedReadPermute<ptype>(ref()(1)(1),perm,lane); \
|
||||
Chimu_12=coalescedReadPermute<ptype>(ref()(1)(2),perm,lane); \
|
||||
Chimu_20=coalescedReadPermute<ptype>(ref()(2)(0),perm,lane); \
|
||||
Chimu_21=coalescedReadPermute<ptype>(ref()(2)(1),perm,lane); \
|
||||
Chimu_22=coalescedReadPermute<ptype>(ref()(2)(2),perm,lane); \
|
||||
Chimu_30=coalescedReadPermute<ptype>(ref()(3)(0),perm,lane); \
|
||||
Chimu_31=coalescedReadPermute<ptype>(ref()(3)(1),perm,lane); \
|
||||
Chimu_32=coalescedReadPermute<ptype>(ref()(3)(2),perm,lane); }
|
||||
Chimu_00=coalescedReadPermute<Ptype>(ref()(0)(0),perm,lane); \
|
||||
Chimu_01=coalescedReadPermute<Ptype>(ref()(0)(1),perm,lane); \
|
||||
Chimu_02=coalescedReadPermute<Ptype>(ref()(0)(2),perm,lane); \
|
||||
Chimu_10=coalescedReadPermute<Ptype>(ref()(1)(0),perm,lane); \
|
||||
Chimu_11=coalescedReadPermute<Ptype>(ref()(1)(1),perm,lane); \
|
||||
Chimu_12=coalescedReadPermute<Ptype>(ref()(1)(2),perm,lane); \
|
||||
Chimu_20=coalescedReadPermute<Ptype>(ref()(2)(0),perm,lane); \
|
||||
Chimu_21=coalescedReadPermute<Ptype>(ref()(2)(1),perm,lane); \
|
||||
Chimu_22=coalescedReadPermute<Ptype>(ref()(2)(2),perm,lane); \
|
||||
Chimu_30=coalescedReadPermute<Ptype>(ref()(3)(0),perm,lane); \
|
||||
Chimu_31=coalescedReadPermute<Ptype>(ref()(3)(1),perm,lane); \
|
||||
Chimu_32=coalescedReadPermute<Ptype>(ref()(3)(2),perm,lane); }
|
||||
#define PERMUTE_DIR(dir) ;
|
||||
#else
|
||||
#define LOAD_CHIMU(ptype) \
|
||||
#define LOAD_CHIMU(Ptype) \
|
||||
{const SiteSpinor & ref (in[offset]); \
|
||||
Chimu_00=ref()(0)(0);\
|
||||
Chimu_01=ref()(0)(1);\
|
||||
@ -109,12 +109,12 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Chimu_32=ref()(3)(2);}
|
||||
|
||||
#define PERMUTE_DIR(dir) \
|
||||
permute##dir(Chi_00,Chi_00); \
|
||||
permute##dir(Chi_01,Chi_01);\
|
||||
permute##dir(Chi_02,Chi_02);\
|
||||
permute##dir(Chi_10,Chi_10); \
|
||||
permute##dir(Chi_11,Chi_11);\
|
||||
permute##dir(Chi_12,Chi_12);
|
||||
permute##dir(Chi_00,Chi_00); \
|
||||
permute##dir(Chi_01,Chi_01); \
|
||||
permute##dir(Chi_02,Chi_02); \
|
||||
permute##dir(Chi_10,Chi_10); \
|
||||
permute##dir(Chi_11,Chi_11); \
|
||||
permute##dir(Chi_12,Chi_12);
|
||||
|
||||
#endif
|
||||
|
||||
@ -371,88 +371,91 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
result_32-= UChi_12;
|
||||
|
||||
#define HAND_STENCIL_LEGB(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
{int ptype; \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
auto offset = SE->_offset; \
|
||||
auto local = SE->_is_local; \
|
||||
auto perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; }
|
||||
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
|
||||
SE=&st_p[DIR+8*ss]; \
|
||||
ptype=st_perm[DIR]; \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
#define HAND_STENCIL_LEG(PROJ,PERM,DIR,RECON) \
|
||||
{ SE=&st_p[DIR+8*ss]; \
|
||||
auto ptype=st_perm[DIR]; \
|
||||
auto offset = SE->_offset; \
|
||||
auto local = SE->_is_local; \
|
||||
auto perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; }
|
||||
|
||||
#define HAND_STENCIL_LEGA(PROJ,PERM,DIR,RECON) \
|
||||
SE=&st_p[DIR+8*ss]; \
|
||||
ptype=st_perm[DIR]; \
|
||||
/*SE=st.GetEntry(ptype,DIR,ss);*/ \
|
||||
offset = SE->_offset; \
|
||||
perm = SE->_permute; \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON;
|
||||
{ SE=&st_p[DIR+8*ss]; \
|
||||
auto ptype=st_perm[DIR]; \
|
||||
/*SE=st.GetEntry(ptype,DIR,ss);*/ \
|
||||
auto offset = SE->_offset; \
|
||||
auto perm = SE->_permute; \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; }
|
||||
|
||||
#define HAND_STENCIL_LEG_INT(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
local = SE->_is_local; \
|
||||
perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else if ( st.same_node[DIR] ) { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
if (local || st.same_node[DIR] ) { \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
} \
|
||||
acceleratorSynchronise();
|
||||
{ int ptype; \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
auto offset = SE->_offset; \
|
||||
auto local = SE->_is_local; \
|
||||
auto perm = SE->_permute; \
|
||||
if ( local ) { \
|
||||
LOAD_CHIMU(PERM); \
|
||||
PROJ; \
|
||||
if ( perm) { \
|
||||
PERMUTE_DIR(PERM); \
|
||||
} \
|
||||
} else if ( st.same_node[DIR] ) { \
|
||||
LOAD_CHI; \
|
||||
} \
|
||||
acceleratorSynchronise(); \
|
||||
if (local || st.same_node[DIR] ) { \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
} \
|
||||
acceleratorSynchronise(); }
|
||||
|
||||
#define HAND_STENCIL_LEG_EXT(PROJ,PERM,DIR,RECON) \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
offset = SE->_offset; \
|
||||
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
|
||||
LOAD_CHI; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
} \
|
||||
acceleratorSynchronise();
|
||||
{ int ptype; \
|
||||
SE=st.GetEntry(ptype,DIR,ss); \
|
||||
auto offset = SE->_offset; \
|
||||
if((!SE->_is_local)&&(!st.same_node[DIR]) ) { \
|
||||
LOAD_CHI; \
|
||||
MULT_2SPIN(DIR); \
|
||||
RECON; \
|
||||
nmu++; \
|
||||
} \
|
||||
acceleratorSynchronise(); }
|
||||
|
||||
#define HAND_RESULT(ss) \
|
||||
{ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
#define HAND_RESULT(ss) \
|
||||
{ \
|
||||
SiteSpinor & ref (out[ss]); \
|
||||
coalescedWrite(ref()(0)(0),result_00,lane); \
|
||||
coalescedWrite(ref()(0)(1),result_01,lane); \
|
||||
coalescedWrite(ref()(0)(2),result_02,lane); \
|
||||
@ -563,7 +566,6 @@ WilsonKernels<Impl>::HandDhopSiteSycl(StencilVector st_perm,StencilEntry *st_p,
|
||||
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
@ -593,9 +595,7 @@ WilsonKernels<Impl>::HandDhopSite(StencilView &st, DoubledGaugeFieldView &U,Site
|
||||
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
|
||||
HAND_STENCIL_LEG(XM_PROJ,3,Xp,XM_RECON);
|
||||
HAND_STENCIL_LEG(YM_PROJ,2,Yp,YM_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZM_PROJ,1,Zp,ZM_RECON_ACCUM);
|
||||
@ -623,8 +623,6 @@ void WilsonKernels<Impl>::HandDhopSiteDag(StencilView &st,DoubledGaugeFieldView
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
|
||||
HAND_STENCIL_LEG(XP_PROJ,3,Xp,XP_RECON);
|
||||
HAND_STENCIL_LEG(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG(ZP_PROJ,1,Zp,ZP_RECON_ACCUM);
|
||||
@ -640,8 +638,8 @@ template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
auto st_p = st._entries_p;
|
||||
auto st_perm = st._permute_type;
|
||||
// auto st_p = st._entries_p;
|
||||
// auto st_perm = st._permute_type;
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -652,7 +650,6 @@ WilsonKernels<Impl>::HandDhopSiteInt(StencilView &st,DoubledGaugeFieldView &U,Si
|
||||
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
int offset,local,perm, ptype;
|
||||
StencilEntry *SE;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XM_PROJ,3,Xp,XM_RECON_ACCUM);
|
||||
@ -670,8 +667,8 @@ template<class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
auto st_p = st._entries_p;
|
||||
auto st_perm = st._permute_type;
|
||||
// auto st_p = st._entries_p;
|
||||
// auto st_perm = st._permute_type;
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
|
||||
@ -682,7 +679,6 @@ void WilsonKernels<Impl>::HandDhopSiteDagInt(StencilView &st,DoubledGaugeFieldVi
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset,local,perm, ptype;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_INT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
HAND_STENCIL_LEG_INT(YP_PROJ,2,Yp,YP_RECON_ACCUM);
|
||||
@ -699,8 +695,8 @@ template<class Impl> accelerator_inline void
|
||||
WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
auto st_p = st._entries_p;
|
||||
auto st_perm = st._permute_type;
|
||||
// auto st_p = st._entries_p;
|
||||
// auto st_perm = st._permute_type;
|
||||
// T==0, Z==1, Y==2, Z==3 expect 1,2,2,2 simd layout etc...
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
@ -711,7 +707,7 @@ WilsonKernels<Impl>::HandDhopSiteExt(StencilView &st,DoubledGaugeFieldView &U,Si
|
||||
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
int offset, ptype;
|
||||
// int offset, ptype;
|
||||
StencilEntry *SE;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
@ -730,8 +726,8 @@ template<class Impl> accelerator_inline
|
||||
void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldView &U,SiteHalfSpinor *buf,
|
||||
int ss,int sU,const FermionFieldView &in, FermionFieldView &out)
|
||||
{
|
||||
auto st_p = st._entries_p;
|
||||
auto st_perm = st._permute_type;
|
||||
// auto st_p = st._entries_p;
|
||||
// auto st_perm = st._permute_type;
|
||||
typedef typename Simd::scalar_type S;
|
||||
typedef typename Simd::vector_type V;
|
||||
typedef decltype( coalescedRead( in[0]()(0)(0) )) Simt;
|
||||
@ -742,7 +738,7 @@ void WilsonKernels<Impl>::HandDhopSiteDagExt(StencilView &st,DoubledGaugeFieldVi
|
||||
HAND_DECLARATIONS(Simt);
|
||||
|
||||
StencilEntry *SE;
|
||||
int offset, ptype;
|
||||
// int offset, ptype;
|
||||
int nmu=0;
|
||||
ZERO_RESULT;
|
||||
HAND_STENCIL_LEG_EXT(XP_PROJ,3,Xp,XP_RECON_ACCUM);
|
||||
|
@ -0,0 +1,41 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/ qcd/action/fermion/instantiation/CompactWilsonCloverFermionInstantiation.cc.master
|
||||
|
||||
Copyright (C) 2017 - 2022
|
||||
|
||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
#include <Grid/qcd/spin/Dirac.h>
|
||||
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||
#include <Grid/qcd/action/fermion/implementation/CompactWilsonCloverFermionImplementation.h>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
#include "impl.h"
|
||||
template class CompactWilsonCloverFermion<IMPLEMENTATION>;
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -0,0 +1 @@
|
||||
../CompactWilsonCloverFermionInstantiation.cc.master
|
@ -0,0 +1 @@
|
||||
../CompactWilsonCloverFermionInstantiation.cc.master
|
@ -40,7 +40,7 @@ EOF
|
||||
|
||||
done
|
||||
|
||||
CC_LIST="WilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
||||
CC_LIST="WilsonCloverFermionInstantiation CompactWilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
||||
|
||||
for impl in $WILSON_IMPL_LIST
|
||||
do
|
||||
|
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
@ -0,0 +1,102 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
////////////////////////////////////////////////////
|
||||
// DDHMC filter with sub-block size B[mu]
|
||||
////////////////////////////////////////////////////
|
||||
|
||||
template<typename GaugeField>
|
||||
struct DDHMCFilter: public MomentumFilterBase<GaugeField>
|
||||
{
|
||||
Coordinate Block;
|
||||
int Width;
|
||||
|
||||
DDHMCFilter(const Coordinate &_Block,int _Width=2): Block(_Block) { Width=_Width; }
|
||||
|
||||
void applyFilter(GaugeField &U) const override
|
||||
{
|
||||
GridBase *grid = U.Grid();
|
||||
Coordinate Global=grid->GlobalDimensions();
|
||||
GaugeField zzz(grid); zzz = Zero();
|
||||
LatticeInteger coor(grid);
|
||||
|
||||
auto zzz_mu = PeekIndex<LorentzIndex>(zzz,0);
|
||||
////////////////////////////////////////////////////
|
||||
// Zero BDY layers
|
||||
////////////////////////////////////////////////////
|
||||
std::cout<<GridLogMessage<<" DDHMC Force Filter Block "<<Block<<" width " <<Width<<std::endl;
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
|
||||
Integer B1 = Block[mu];
|
||||
if ( B1 && (B1 <= Global[mu]) ) {
|
||||
LatticeCoordinate(coor,mu);
|
||||
|
||||
////////////////////////////////
|
||||
// OmegaBar - zero all links contained in slice B-1,0 and
|
||||
// mu links connecting to Omega
|
||||
////////////////////////////////
|
||||
if ( Width==1) {
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-2),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==2) {
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-3),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
if ( Width==3) {
|
||||
U = where(mod(coor,B1)==Integer(B1-3),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||
U = where(mod(coor,B1)==Integer(2) ,zzz,U);
|
||||
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||
U_mu = where(mod(coor,B1)==Integer(B1-4),zzz_mu,U_mu);
|
||||
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
template<typename MomentaField>
|
||||
struct DirichletFilter: public MomentumFilterBase<MomentaField>
|
||||
{
|
||||
typedef typename MomentaField::vector_type vector_type; //SIMD-vectorized complex type
|
||||
typedef typename MomentaField::scalar_type scalar_type; //scalar complex type
|
||||
|
||||
typedef iScalar<iScalar<iScalar<vector_type> > > ScalarType; //complex phase for each site
|
||||
|
||||
Coordinate Block;
|
||||
|
||||
DirichletFilter(const Coordinate &_Block): Block(_Block){}
|
||||
|
||||
void applyFilter(MomentaField &P) const override
|
||||
{
|
||||
GridBase *grid = P.Grid();
|
||||
typedef decltype(PeekIndex<LorentzIndex>(P, 0)) LatCM;
|
||||
////////////////////////////////////////////////////
|
||||
// Zero strictly links crossing between domains
|
||||
////////////////////////////////////////////////////
|
||||
LatticeInteger coor(grid);
|
||||
LatCM zz(grid); zz = Zero();
|
||||
for(int mu=0;mu<Nd;mu++) {
|
||||
if ( (Block[mu]) && (Block[mu] < grid->GlobalDimensions()[mu] ) ) {
|
||||
// If costly could provide Grid earlier and precompute masks
|
||||
std::cout << " Dirichlet in mu="<<mu<<std::endl;
|
||||
LatticeCoordinate(coor,mu);
|
||||
auto P_mu = PeekIndex<LorentzIndex>(P, mu);
|
||||
P_mu = where(mod(coor,Block[mu])==Integer(Block[mu]-1),zz,P_mu);
|
||||
PokeIndex<LorentzIndex>(P, P_mu, mu);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -129,18 +129,10 @@ public:
|
||||
Runner(S);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
||||
private:
|
||||
template <class SmearingPolicy>
|
||||
void Runner(SmearingPolicy &Smearing) {
|
||||
auto UGrid = Resources.GetCartesian();
|
||||
Resources.AddRNGs();
|
||||
Field U(UGrid);
|
||||
|
||||
// Can move this outside?
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
//Use the checkpointer to initialize the RNGs and the gauge field, writing the resulting gauge field into U.
|
||||
//This is called automatically by Run but may be useful elsewhere, e.g. for integrator tuning experiments
|
||||
void initializeGaugeFieldAndRNGs(Field &U){
|
||||
if(!Resources.haveRNGs()) Resources.AddRNGs();
|
||||
|
||||
if (Parameters.StartingType == "HotStart") {
|
||||
// Hot start
|
||||
@ -167,6 +159,25 @@ private:
|
||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////
|
||||
|
||||
private:
|
||||
template <class SmearingPolicy>
|
||||
void Runner(SmearingPolicy &Smearing) {
|
||||
auto UGrid = Resources.GetCartesian();
|
||||
Field U(UGrid);
|
||||
|
||||
initializeGaugeFieldAndRNGs(U);
|
||||
|
||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||
|
||||
// Sets the momentum filter
|
||||
MDynamics.setMomentumFilter(*(Resources.GetMomentumFilter()));
|
||||
|
||||
Smearing.set_Field(U);
|
||||
|
||||
|
@ -34,6 +34,7 @@ directory
|
||||
* @brief Classes for Hybrid Monte Carlo update
|
||||
*
|
||||
* @author Guido Cossu
|
||||
* @author Peter Boyle
|
||||
*/
|
||||
//--------------------------------------------------------------------
|
||||
#pragma once
|
||||
@ -115,22 +116,17 @@ private:
|
||||
|
||||
random(sRNG, rn_test);
|
||||
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "exp(-dH) = " << prob
|
||||
<< " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "exp(-dH) = " << prob << " Random = " << rn_test << "\n";
|
||||
std::cout << GridLogHMC << "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||
|
||||
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
||||
std::cout << GridLogMessage << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return true;
|
||||
} else { // rejected
|
||||
std::cout << GridLogMessage << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogMessage
|
||||
<< "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -139,19 +135,68 @@ private:
|
||||
// Evolution
|
||||
/////////////////////////////////////////////////////////
|
||||
RealD evolve_hmc_step(Field &U) {
|
||||
TheIntegrator.refresh(U, sRNG, pRNG); // set U and initialize P and phi's
|
||||
|
||||
RealD H0 = TheIntegrator.S(U); // initial state action
|
||||
GridBase *Grid = U.Grid();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Mainly for DDHMC perform a random translation of U modulo volume
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Random shifting gauge field by [";
|
||||
for(int d=0;d<Grid->Nd();d++) {
|
||||
|
||||
int L = Grid->GlobalDimensions()[d];
|
||||
|
||||
RealD rn_uniform; random(sRNG, rn_uniform);
|
||||
|
||||
int shift = (int) (rn_uniform*L);
|
||||
|
||||
std::cout << shift;
|
||||
if(d<Grid->Nd()-1) std::cout <<",";
|
||||
else std::cout <<"]\n";
|
||||
|
||||
U = Cshift(U,d,shift);
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
TheIntegrator.reset_timer();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// set U and initialize P and phi's
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Refresh momenta and pseudofermions";
|
||||
TheIntegrator.refresh(U, sRNG, pRNG);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// initial state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute initial action";
|
||||
RealD H0 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
std::streamsize current_precision = std::cout.precision();
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << " Molecular Dynamics evolution ";
|
||||
TheIntegrator.integrate(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
RealD H1 = TheIntegrator.S(U); // updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// updated state action
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
std::cout << GridLogMessage << "Compute final action";
|
||||
RealD H1 = TheIntegrator.S(U);
|
||||
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
if(0){
|
||||
std::cout << "------------------------- Reversibility test" << std::endl;
|
||||
@ -163,17 +208,16 @@ private:
|
||||
}
|
||||
///////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
std::cout.precision(15);
|
||||
std::cout << GridLogMessage << "Total H after trajectory = " << H1
|
||||
<< " dH = " << H1 - H0 << "\n";
|
||||
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
std::cout << GridLogHMC << "Total H after trajectory = " << H1 << " dH = " << H1 - H0 << "\n";
|
||||
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||
|
||||
std::cout.precision(current_precision);
|
||||
|
||||
return (H1 - H0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
public:
|
||||
/////////////////////////////////////////
|
||||
@ -195,10 +239,13 @@ public:
|
||||
|
||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
||||
|
||||
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
||||
std::cout << GridLogMessage << "-- # Trajectory = " << traj << "\n";
|
||||
|
||||
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
|
||||
|
||||
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
std::cout << GridLogMessage << "-- Thermalization" << std::endl;
|
||||
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
|
||||
}
|
||||
|
||||
double t0=usecond();
|
||||
@ -207,20 +254,19 @@ public:
|
||||
DeltaH = evolve_hmc_step(Ucopy);
|
||||
// Metropolis-Hastings test
|
||||
bool accept = true;
|
||||
if (traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
if (Params.MetropolisTest && traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||
accept = metropolis_test(DeltaH);
|
||||
} else {
|
||||
std::cout << GridLogMessage << "Skipping Metropolis test" << std::endl;
|
||||
std::cout << GridLogHMC << "Skipping Metropolis test" << std::endl;
|
||||
}
|
||||
|
||||
if (accept)
|
||||
Ucur = Ucopy;
|
||||
|
||||
|
||||
|
||||
double t1=usecond();
|
||||
std::cout << GridLogMessage << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||
|
||||
TheIntegrator.print_timer();
|
||||
|
||||
for (int obs = 0; obs < Observables.size(); obs++) {
|
||||
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
|
||||
@ -228,7 +274,7 @@ public:
|
||||
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
|
||||
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,6 +72,8 @@ class HMCResourceManager {
|
||||
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
|
||||
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
|
||||
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
|
||||
typedef typename ImplementationPolicy::Field MomentaField;
|
||||
typedef typename ImplementationPolicy::Field Field;
|
||||
|
||||
// Named storage for grid pairs (std + red-black)
|
||||
std::unordered_map<std::string, GridModule> Grids;
|
||||
@ -80,6 +82,9 @@ class HMCResourceManager {
|
||||
// SmearingModule<ImplementationPolicy> Smearing;
|
||||
std::unique_ptr<CheckpointerBaseModule> CP;
|
||||
|
||||
// Momentum filter
|
||||
std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> > Filter;
|
||||
|
||||
// A vector of HmcObservable modules
|
||||
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
|
||||
|
||||
@ -90,6 +95,7 @@ class HMCResourceManager {
|
||||
|
||||
bool have_RNG;
|
||||
bool have_CheckPointer;
|
||||
bool have_Filter;
|
||||
|
||||
// NOTE: operator << is not overloaded for std::vector<string>
|
||||
// so this function is necessary
|
||||
@ -101,7 +107,7 @@ class HMCResourceManager {
|
||||
|
||||
|
||||
public:
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false) {}
|
||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false), have_Filter(false) {}
|
||||
|
||||
template <class ReaderClass, class vector_type = vComplex >
|
||||
void initialize(ReaderClass &Read){
|
||||
@ -129,6 +135,7 @@ public:
|
||||
RNGModuleParameters RNGpar(Read);
|
||||
SetRNGSeeds(RNGpar);
|
||||
|
||||
|
||||
// Observables
|
||||
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
|
||||
Read.push(observable_string);// here must check if existing...
|
||||
@ -208,6 +215,16 @@ public:
|
||||
AddGrid(s, Mod);
|
||||
}
|
||||
|
||||
void SetMomentumFilter( MomentumFilterBase<typename ImplementationPolicy::Field> * MomFilter) {
|
||||
assert(have_Filter==false);
|
||||
Filter = std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> >(MomFilter);
|
||||
have_Filter = true;
|
||||
}
|
||||
MomentumFilterBase<typename ImplementationPolicy::Field> *GetMomentumFilter(void) {
|
||||
if ( !have_Filter)
|
||||
SetMomentumFilter(new MomentumFilterNone<typename ImplementationPolicy::Field>());
|
||||
return Filter.get();
|
||||
}
|
||||
|
||||
GridCartesian* GetCartesian(std::string s = "") {
|
||||
if (s.empty()) s = Grids.begin()->first;
|
||||
@ -226,6 +243,9 @@ public:
|
||||
//////////////////////////////////////////////////////
|
||||
// Random number generators
|
||||
//////////////////////////////////////////////////////
|
||||
|
||||
//Return true if the RNG objects have been instantiated
|
||||
bool haveRNGs() const{ return have_RNG; }
|
||||
|
||||
void AddRNGs(std::string s = "") {
|
||||
// Couple the RNGs to the GridModule tagged by s
|
||||
|
@ -1,61 +1,63 @@
|
||||
Using HMC in Grid version 0.5.1
|
||||
# Using HMC in Grid
|
||||
|
||||
These are the instructions to use the Generalised HMC on Grid version 0.5.1.
|
||||
Disclaimer: GRID is still under active development so any information here can be changed in future releases.
|
||||
These are the instructions to use the Generalised HMC on Grid as of commit `749b802`.
|
||||
Disclaimer: Grid is still under active development so any information here can be changed in future releases.
|
||||
|
||||
|
||||
Command line options
|
||||
===================
|
||||
(relevant file GenericHMCrunner.h)
|
||||
## Command line options
|
||||
|
||||
(relevant file `GenericHMCrunner.h`)
|
||||
The initial configuration can be changed at the command line using
|
||||
--StartType <your choice>
|
||||
valid choices, one among these
|
||||
HotStart, ColdStart, TepidStart, CheckpointStart
|
||||
default: HotStart
|
||||
`--StartingType STARTING_TYPE`, where `STARTING_TYPE` is one of
|
||||
`HotStart`, `ColdStart`, `TepidStart`, and `CheckpointStart`.
|
||||
Default: `--StartingType HotStart`
|
||||
|
||||
example
|
||||
./My_hmc_exec --StartType HotStart
|
||||
Example:
|
||||
```
|
||||
./My_hmc_exec --StartingType HotStart
|
||||
```
|
||||
|
||||
The CheckpointStart option uses the prefix for the configurations and rng seed files defined in your executable and the initial configuration is specified by
|
||||
--StartTrajectory <integer>
|
||||
default: 0
|
||||
The `CheckpointStart` option uses the prefix for the configurations and rng seed files defined in your executable and the initial configuration is specified by
|
||||
`--StartingTrajectory STARTING_TRAJECTORY`, where `STARTING_TRAJECTORY` is an integer.
|
||||
Default: `--StartingTrajectory 0`
|
||||
|
||||
The number of trajectories for a specific run are specified at command line by
|
||||
--Trajectories <integer>
|
||||
default: 1
|
||||
`--Trajectories TRAJECTORIES`, where `TRAJECTORIES` is an integer.
|
||||
Default: `--Trajectories 1`
|
||||
|
||||
The number of thermalization steps (i.e. steps when the Metropolis acceptance check is turned off) is specified by
|
||||
--Thermalizations <integer>
|
||||
default: 10
|
||||
|
||||
`--Thermalizations THERMALIZATIONS`, where `THERMALIZATIONS` is an integer.
|
||||
Default: `--Thermalizations 10`
|
||||
|
||||
Any other parameter is defined in the source for the executable.
|
||||
|
||||
HMC controls
|
||||
===========
|
||||
## HMC controls
|
||||
|
||||
The lines
|
||||
|
||||
```
|
||||
std::vector<int> SerSeed({1, 2, 3, 4, 5});
|
||||
std::vector<int> ParSeed({6, 7, 8, 9, 10});
|
||||
```
|
||||
|
||||
define the seeds for the serial and the parallel RNG.
|
||||
|
||||
The line
|
||||
|
||||
```
|
||||
TheHMC.MDparameters.set(20, 1.0);// MDsteps, traj length
|
||||
```
|
||||
|
||||
declares the number of molecular dynamics steps and the total trajectory length.
|
||||
|
||||
|
||||
Actions
|
||||
======
|
||||
## Actions
|
||||
|
||||
Action names are defined in the file
|
||||
lib/qcd/Actions.h
|
||||
Action names are defined in the directory `Grid/qcd/action`.
|
||||
|
||||
Gauge actions list:
|
||||
Gauge actions list (from `Grid/qcd/action/gauge/Gauge.h`):
|
||||
|
||||
```
|
||||
WilsonGaugeActionR;
|
||||
WilsonGaugeActionF;
|
||||
WilsonGaugeActionD;
|
||||
@ -68,8 +70,9 @@ IwasakiGaugeActionD;
|
||||
SymanzikGaugeActionR;
|
||||
SymanzikGaugeActionF;
|
||||
SymanzikGaugeActionD;
|
||||
```
|
||||
|
||||
|
||||
```
|
||||
ConjugateWilsonGaugeActionR;
|
||||
ConjugateWilsonGaugeActionF;
|
||||
ConjugateWilsonGaugeActionD;
|
||||
@ -82,26 +85,23 @@ ConjugateIwasakiGaugeActionD;
|
||||
ConjugateSymanzikGaugeActionR;
|
||||
ConjugateSymanzikGaugeActionF;
|
||||
ConjugateSymanzikGaugeActionD;
|
||||
```
|
||||
|
||||
Each of these action accepts one single parameter at creation time (beta).
|
||||
Example for creating a Symanzik action with beta=4.0
|
||||
|
||||
```
|
||||
SymanzikGaugeActionR(4.0)
|
||||
```
|
||||
|
||||
Scalar actions list (from `Grid/qcd/action/scalar/Scalar.h`):
|
||||
|
||||
```
|
||||
ScalarActionR;
|
||||
ScalarActionF;
|
||||
ScalarActionD;
|
||||
```
|
||||
|
||||
|
||||
each of these action accept one single parameter at creation time (beta).
|
||||
Example for creating a Symanzik action with beta=4.0
|
||||
|
||||
SymanzikGaugeActionR(4.0)
|
||||
|
||||
The suffixes R,F,D in the action names refer to the Real
|
||||
(the precision is defined at compile time by the --enable-precision flag in the configure),
|
||||
Float and Double, that force the precision of the action to be 32, 64 bit respectively.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
The suffixes `R`, `F`, `D` in the action names refer to the `Real`
|
||||
(the precision is defined at compile time by the `--enable-precision` flag in the configure),
|
||||
`Float` and `Double`, that force the precision of the action to be 32, 64 bit respectively.
|
||||
|
@ -33,7 +33,6 @@ directory
|
||||
#define INTEGRATOR_INCLUDED
|
||||
|
||||
#include <memory>
|
||||
#include "MomentumFilter.h"
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -67,6 +66,7 @@ public:
|
||||
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
|
||||
class Integrator {
|
||||
protected:
|
||||
|
||||
typedef typename FieldImplementation::Field MomentaField; //for readability
|
||||
typedef typename FieldImplementation::Field Field;
|
||||
|
||||
@ -119,36 +119,58 @@ protected:
|
||||
}
|
||||
} update_P_hireps{};
|
||||
|
||||
|
||||
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
|
||||
// input U actually not used in the fundamental case
|
||||
// Fundamental updates, include smearing
|
||||
|
||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||
|
||||
double start_full = usecond();
|
||||
Field force(U.Grid());
|
||||
conformable(U.Grid(), Mom.Grid());
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
||||
double start_force = usecond();
|
||||
as[level].actions.at(a)->deriv_timer_start();
|
||||
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
||||
as[level].actions.at(a)->deriv_timer_stop();
|
||||
|
||||
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
||||
auto name = as[level].actions.at(a)->action_name();
|
||||
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
||||
|
||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||
double end_force = usecond();
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
|
||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
|
||||
|
||||
MomFilter->applyFilter(force);
|
||||
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<< std::endl;
|
||||
DumpSliceNorm("force ",force,Nd-1);
|
||||
|
||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
|
||||
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
Real force_max = std::sqrt(maxLocalNorm2(force));
|
||||
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||
|
||||
as[level].actions.at(a)->deriv_log(force_abs,force_max);
|
||||
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force average: " << force_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force max : " << force_max <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt average : " << impulse_abs <<" "<<name<<std::endl;
|
||||
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt max : " << impulse_max <<" "<<name<<std::endl;
|
||||
|
||||
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
||||
double end_full = usecond();
|
||||
double time_full = (end_full - start_full) / 1e3;
|
||||
double time_force = (end_force - start_force) / 1e3;
|
||||
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
|
||||
|
||||
}
|
||||
|
||||
// Force from the other representations
|
||||
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
||||
|
||||
MomFilter->applyFilter(Mom);
|
||||
}
|
||||
|
||||
void update_U(Field& U, double ep)
|
||||
@ -162,8 +184,12 @@ protected:
|
||||
|
||||
void update_U(MomentaField& Mom, Field& U, double ep)
|
||||
{
|
||||
MomentaField MomFiltered(Mom.Grid());
|
||||
MomFiltered = Mom;
|
||||
MomFilter->applyFilter(MomFiltered);
|
||||
|
||||
// exponential of Mom*U in the gauge fields case
|
||||
FieldImplementation::update_field(Mom, U, ep);
|
||||
FieldImplementation::update_field(MomFiltered, U, ep);
|
||||
|
||||
// Update the smeared fields, can be implemented as observer
|
||||
Smearer.set_Field(U);
|
||||
@ -206,6 +232,66 @@ public:
|
||||
const MomentaField & getMomentum() const{ return P; }
|
||||
|
||||
|
||||
void reset_timer(void)
|
||||
{
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
as[level].actions.at(actionID)->reset_timer();
|
||||
}
|
||||
}
|
||||
}
|
||||
void print_timer(void)
|
||||
{
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||
std::cout << GridLogMessage << " Refresh cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->refresh_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Action cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->S_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force cumulative timings "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] "
|
||||
<< as[level].actions.at(actionID)->deriv_us*1.0e-6<<" s"<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||
std::cout << GridLogMessage << " Force average size "<<std::endl;
|
||||
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||
for (int level = 0; level < as.size(); ++level) {
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
std::cout << GridLogMessage
|
||||
<< as[level].actions.at(actionID)->action_name()
|
||||
<<"["<<level<<"]["<< actionID<<"] : "
|
||||
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
|
||||
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
|
||||
<<" calls " << as[level].actions.at(actionID)->deriv_num
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
}
|
||||
|
||||
void print_parameters()
|
||||
{
|
||||
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
|
||||
@ -224,7 +310,6 @@ public:
|
||||
}
|
||||
}
|
||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||
|
||||
}
|
||||
|
||||
void reverse_momenta()
|
||||
@ -267,15 +352,19 @@ public:
|
||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||
// get gauge field from the SmearingPolicy and
|
||||
// based on the boolean is_smeared in actionID
|
||||
auto name = as[level].actions.at(actionID)->action_name();
|
||||
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
|
||||
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
as[level].actions.at(actionID)->refresh_timer_start();
|
||||
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
|
||||
as[level].actions.at(actionID)->refresh_timer_stop();
|
||||
}
|
||||
|
||||
// Refresh the higher representation actions
|
||||
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
|
||||
}
|
||||
|
||||
MomFilter->applyFilter(P);
|
||||
}
|
||||
|
||||
// to be used by the actionlevel class to iterate
|
||||
@ -310,7 +399,9 @@ public:
|
||||
// based on the boolean is_smeared in actionID
|
||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
|
||||
as[level].actions.at(actionID)->S_timer_start();
|
||||
Hterm = as[level].actions.at(actionID)->S(Us);
|
||||
as[level].actions.at(actionID)->S_timer_stop();
|
||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
|
||||
H += Hterm;
|
||||
}
|
||||
|
@ -131,8 +131,11 @@ class CartesianStencilAccelerator {
|
||||
int _checkerboard;
|
||||
int _npoints; // Move to template param?
|
||||
int _osites;
|
||||
int _dirichlet;
|
||||
StencilVector _directions;
|
||||
StencilVector _distances;
|
||||
StencilVector _comms_send;
|
||||
StencilVector _comms_recv;
|
||||
StencilVector _comm_buf_size;
|
||||
StencilVector _permute_type;
|
||||
StencilVector same_node;
|
||||
@ -226,6 +229,8 @@ public:
|
||||
void * recv_buf;
|
||||
Integer to_rank;
|
||||
Integer from_rank;
|
||||
Integer do_send;
|
||||
Integer do_recv;
|
||||
Integer bytes;
|
||||
};
|
||||
struct Merge {
|
||||
@ -240,7 +245,20 @@ public:
|
||||
cobj * mpi_p;
|
||||
Integer buffer_size;
|
||||
};
|
||||
|
||||
struct CopyReceiveBuffer {
|
||||
void * from_p;
|
||||
void * to_p;
|
||||
Integer bytes;
|
||||
};
|
||||
struct CachedTransfer {
|
||||
Integer direction;
|
||||
Integer OrthogPlane;
|
||||
Integer DestProc;
|
||||
Integer bytes;
|
||||
Integer lane;
|
||||
Integer cb;
|
||||
void *recv_buf;
|
||||
};
|
||||
|
||||
protected:
|
||||
GridBase * _grid;
|
||||
@ -271,7 +289,8 @@ public:
|
||||
std::vector<Merge> MergersSHM;
|
||||
std::vector<Decompress> Decompressions;
|
||||
std::vector<Decompress> DecompressionsSHM;
|
||||
|
||||
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
|
||||
std::vector<CachedTransfer> CachedTransfers;
|
||||
///////////////////////////////////////////////////////////
|
||||
// Unified Comms buffers for all directions
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -284,29 +303,6 @@ public:
|
||||
int u_comm_offset;
|
||||
int _unified_buffer_size;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Timing info; ugly; possibly temporary
|
||||
/////////////////////////////////////////
|
||||
double commtime;
|
||||
double mpi3synctime;
|
||||
double mpi3synctime_g;
|
||||
double shmmergetime;
|
||||
double gathertime;
|
||||
double gathermtime;
|
||||
double halogtime;
|
||||
double mergetime;
|
||||
double decompresstime;
|
||||
double comms_bytes;
|
||||
double shm_bytes;
|
||||
double splicetime;
|
||||
double nosplicetime;
|
||||
double calls;
|
||||
std::vector<double> comm_bytes_thr;
|
||||
std::vector<double> shm_bytes_thr;
|
||||
std::vector<double> comm_time_thr;
|
||||
std::vector<double> comm_enter_thr;
|
||||
std::vector<double> comm_leave_thr;
|
||||
|
||||
////////////////////////////////////////
|
||||
// Stencil query
|
||||
////////////////////////////////////////
|
||||
@ -322,8 +318,8 @@ public:
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
|
||||
int recv_from_rank;
|
||||
int xmit_to_rank;
|
||||
// int recv_from_rank;
|
||||
// int xmit_to_rank;
|
||||
|
||||
if ( ! comm_dim ) return 1;
|
||||
if ( displacement == 0 ) return 1;
|
||||
@ -333,11 +329,12 @@ public:
|
||||
//////////////////////////////////////////
|
||||
// Comms packet queue for asynch thread
|
||||
// Use OpenMP Tasks for cleaner ???
|
||||
// must be called *inside* parallel region
|
||||
//////////////////////////////////////////
|
||||
/*
|
||||
void CommunicateThreaded()
|
||||
{
|
||||
#ifdef GRID_OMP
|
||||
// must be called in parallel region
|
||||
int mythread = omp_get_thread_num();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
#else
|
||||
@ -346,65 +343,29 @@ public:
|
||||
#endif
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
comm_enter_thr[mythread] = usecond();
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
|
||||
|
||||
}
|
||||
comm_leave_thr[mythread]= usecond();
|
||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
||||
}
|
||||
}
|
||||
|
||||
void CollateThreads(void)
|
||||
{
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
double first=0.0;
|
||||
double last =0.0;
|
||||
|
||||
for(int t=0;t<nthreads;t++) {
|
||||
|
||||
double t0 = comm_enter_thr[t];
|
||||
double t1 = comm_leave_thr[t];
|
||||
comms_bytes+=comm_bytes_thr[t];
|
||||
shm_bytes +=shm_bytes_thr[t];
|
||||
|
||||
comm_enter_thr[t] = 0.0;
|
||||
comm_leave_thr[t] = 0.0;
|
||||
comm_time_thr[t] = 0.0;
|
||||
comm_bytes_thr[t]=0;
|
||||
shm_bytes_thr[t]=0;
|
||||
|
||||
if ( first == 0.0 ) first = t0; // first is t0
|
||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
||||
|
||||
if ( t1 > last ) last = t1; // max time seen
|
||||
|
||||
}
|
||||
commtime+= last-first;
|
||||
}
|
||||
*/
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Non blocking send and receive. Necessarily parallel.
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||
{
|
||||
reqs.resize(Packets.size());
|
||||
commtime-=usecond();
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comms_bytes+=bytes;
|
||||
shm_bytes +=2*Packets[i].bytes-bytes;
|
||||
_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||
Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -413,7 +374,6 @@ public:
|
||||
for(int i=0;i<Packets.size();i++){
|
||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||
}
|
||||
commtime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Blocking send and receive. Either sequential or parallel.
|
||||
@ -421,28 +381,27 @@ public:
|
||||
void Communicate(void)
|
||||
{
|
||||
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
||||
thread_region {
|
||||
// must be called in parallel region
|
||||
int mythread = thread_num();
|
||||
int maxthreads= thread_max();
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
assert(nthreads <= maxthreads);
|
||||
if (nthreads == -1) nthreads = 1;
|
||||
if (mythread < nthreads) {
|
||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||
double start = usecond();
|
||||
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,
|
||||
Packets[i].bytes,i);
|
||||
comm_bytes_thr[mythread] += bytes;
|
||||
shm_bytes_thr[mythread] += Packets[i].bytes - bytes;
|
||||
comm_time_thr[mythread] += usecond() - start;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
// several way threaded on different communicators.
|
||||
// Cannot combine with Dirichlet operators
|
||||
// This scheme is needed on Intel Omnipath for best performance
|
||||
// Deprecate once there are very few omnipath clusters
|
||||
/////////////////////////////////////////////////////////
|
||||
int nthreads = CartesianCommunicator::nCommThreads;
|
||||
int old = GridThread::GetThreads();
|
||||
GridThread::SetThreads(nthreads);
|
||||
thread_for(i,Packets.size(),{
|
||||
_grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||
Packets[i].to_rank,Packets[i].do_send,
|
||||
Packets[i].recv_buf,
|
||||
Packets[i].from_rank,Packets[i].do_recv,
|
||||
Packets[i].bytes,i);
|
||||
});
|
||||
GridThread::SetThreads(old);
|
||||
} else {
|
||||
/////////////////////////////////////////////////////////
|
||||
// Concurrent and non-threaded asynch calls to MPI
|
||||
/////////////////////////////////////////////////////////
|
||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||
this->CommunicateBegin(reqs);
|
||||
this->CommunicateComplete(reqs);
|
||||
@ -484,31 +443,23 @@ public:
|
||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||
if ( sshift[0] == sshift[1] ) {
|
||||
if (splice_dim) {
|
||||
splicetime-=usecond();
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
} else {
|
||||
if(splice_dim){
|
||||
splicetime-=usecond();
|
||||
// if checkerboard is unfavourable take two passes
|
||||
// both with block stride loop iteration
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
splicetime+=usecond();
|
||||
} else {
|
||||
nosplicetime-=usecond();
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
|
||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
|
||||
is_same_node = is_same_node && tmp1 && tmp2;
|
||||
nosplicetime+=usecond();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -518,13 +469,10 @@ public:
|
||||
template<class compressor>
|
||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||
{
|
||||
mpi3synctime_g-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime_g+=usecond();
|
||||
|
||||
// conformable(source.Grid(),_grid);
|
||||
assert(source.Grid()==_grid);
|
||||
halogtime-=usecond();
|
||||
|
||||
u_comm_offset=0;
|
||||
|
||||
@ -538,7 +486,6 @@ public:
|
||||
assert(u_comm_offset==_unified_buffer_size);
|
||||
|
||||
accelerator_barrier();
|
||||
halogtime+=usecond();
|
||||
}
|
||||
|
||||
/////////////////////////
|
||||
@ -551,14 +498,72 @@ public:
|
||||
Mergers.resize(0);
|
||||
MergersSHM.resize(0);
|
||||
Packets.resize(0);
|
||||
calls++;
|
||||
CopyReceiveBuffers.resize(0);
|
||||
CachedTransfers.resize(0);
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
||||
void AddCopy(void *from,void * to, Integer bytes)
|
||||
{
|
||||
// std::cout << "Adding CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<bytes<<std::endl;
|
||||
CopyReceiveBuffer obj;
|
||||
obj.from_p = from;
|
||||
obj.to_p = to;
|
||||
obj.bytes= bytes;
|
||||
CopyReceiveBuffers.push_back(obj);
|
||||
}
|
||||
void CommsCopy()
|
||||
{
|
||||
// These are device resident MPI buffers.
|
||||
for(int i=0;i<CopyReceiveBuffers.size();i++){
|
||||
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
|
||||
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
|
||||
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
|
||||
// std::cout << "CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<words*sizeof(cobj)<<std::endl;
|
||||
accelerator_forNB(j, words, cobj::Nsimd(), {
|
||||
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
|
||||
{
|
||||
CachedTransfer obj;
|
||||
obj.direction = direction;
|
||||
obj.OrthogPlane = OrthogPlane;
|
||||
obj.DestProc = DestProc;
|
||||
obj.recv_buf = recv_buf;
|
||||
obj.lane = lane;
|
||||
obj.bytes = bytes;
|
||||
obj.cb = cb;
|
||||
|
||||
for(int i=0;i<CachedTransfers.size();i++){
|
||||
if ( (CachedTransfers[i].direction ==direction)
|
||||
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
|
||||
&&(CachedTransfers[i].DestProc ==DestProc)
|
||||
&&(CachedTransfers[i].bytes ==bytes)
|
||||
&&(CachedTransfers[i].lane ==lane)
|
||||
&&(CachedTransfers[i].cb ==cb)
|
||||
){
|
||||
// std::cout << "Found duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<< " bytes "<<bytes <<std::endl;
|
||||
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// std::cout << "No duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<<" bytes "<<bytes<<std::endl;
|
||||
CachedTransfers.push_back(obj);
|
||||
return 0;
|
||||
}
|
||||
void AddPacket(void *xmit,void * rcv,
|
||||
Integer to, Integer do_send,
|
||||
Integer from, Integer do_recv,
|
||||
Integer bytes){
|
||||
Packet p;
|
||||
p.send_buf = xmit;
|
||||
p.recv_buf = rcv;
|
||||
p.to_rank = to;
|
||||
p.from_rank= from;
|
||||
p.do_send = do_send;
|
||||
p.do_recv = do_recv;
|
||||
p.bytes = bytes;
|
||||
Packets.push_back(p);
|
||||
}
|
||||
@ -578,22 +583,17 @@ public:
|
||||
mv.push_back(m);
|
||||
}
|
||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||
CommsCopy();
|
||||
CommsMerge(decompress,Mergers,Decompressions);
|
||||
}
|
||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||
mpi3synctime-=usecond();
|
||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||
mpi3synctime+=usecond();
|
||||
shmmergetime-=usecond();
|
||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||
shmmergetime+=usecond();
|
||||
}
|
||||
|
||||
template<class decompressor>
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
||||
|
||||
|
||||
mergetime-=usecond();
|
||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
|
||||
{
|
||||
for(int i=0;i<mm.size();i++){
|
||||
auto mp = &mm[i].mpointer[0];
|
||||
auto vp0= &mm[i].vpointers[0][0];
|
||||
@ -603,9 +603,7 @@ public:
|
||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||
});
|
||||
}
|
||||
mergetime+=usecond();
|
||||
|
||||
decompresstime-=usecond();
|
||||
for(int i=0;i<dd.size();i++){
|
||||
auto kp = dd[i].kernel_p;
|
||||
auto mp = dd[i].mpi_p;
|
||||
@ -613,7 +611,6 @@ public:
|
||||
decompress.Decompress(kp,mp,o);
|
||||
});
|
||||
}
|
||||
decompresstime+=usecond();
|
||||
}
|
||||
////////////////////////////////////////
|
||||
// Set up routines
|
||||
@ -650,19 +647,58 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Introduce a block structure and switch off comms on boundaries
|
||||
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||
{
|
||||
this->_dirichlet = 1;
|
||||
for(int ii=0;ii<this->_npoints;ii++){
|
||||
int dimension = this->_directions[ii];
|
||||
int displacement = this->_distances[ii];
|
||||
int shift = displacement;
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int ld = gd/pd;
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
///////////////////////////////////////////
|
||||
// Figure out dirichlet send and receive
|
||||
// on this leg of stencil.
|
||||
///////////////////////////////////////////
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int block = dirichlet_block[dimension];
|
||||
this->_comms_send[ii] = comm_dim;
|
||||
this->_comms_recv[ii] = comm_dim;
|
||||
if ( block ) {
|
||||
assert(abs(displacement) < ld );
|
||||
|
||||
if( displacement > 0 ) {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noR
|
||||
// noS
|
||||
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||
if ( ( ld*pc ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||
} else {
|
||||
// High side, low side
|
||||
// | <--B--->|
|
||||
// | | |
|
||||
// noS
|
||||
// noR
|
||||
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||
if ( ( ld*pc ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
CartesianStencil(GridBase *grid,
|
||||
int npoints,
|
||||
int checkerboard,
|
||||
const std::vector<int> &directions,
|
||||
const std::vector<int> &distances,
|
||||
Parameters p)
|
||||
: shm_bytes_thr(npoints),
|
||||
comm_bytes_thr(npoints),
|
||||
comm_enter_thr(npoints),
|
||||
comm_leave_thr(npoints),
|
||||
comm_time_thr(npoints)
|
||||
{
|
||||
this->_dirichlet = 0;
|
||||
face_table_computed=0;
|
||||
_grid = grid;
|
||||
this->parameters=p;
|
||||
@ -675,6 +711,8 @@ public:
|
||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||
this->_directions = StencilVector(directions);
|
||||
this->_distances = StencilVector(distances);
|
||||
this->_comms_send.resize(npoints);
|
||||
this->_comms_recv.resize(npoints);
|
||||
this->same_node.resize(npoints);
|
||||
|
||||
_unified_buffer_size=0;
|
||||
@ -693,24 +731,27 @@ public:
|
||||
int displacement = distances[i];
|
||||
int shift = displacement;
|
||||
|
||||
int gd = _grid->_gdimensions[dimension];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int pd = _grid->_processors [dimension];
|
||||
int ld = gd/pd;
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int pc = _grid->_processor_coor[dimension];
|
||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||
|
||||
this->_checkerboard = checkerboard;
|
||||
|
||||
//////////////////////////
|
||||
// the permute type
|
||||
//////////////////////////
|
||||
int simd_layout = _grid->_simd_layout[dimension];
|
||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
||||
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
||||
|
||||
this->_comms_send[ii] = comm_dim;
|
||||
this->_comms_recv[ii] = comm_dim;
|
||||
|
||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||
|
||||
int sshift[2];
|
||||
|
||||
//////////////////////////
|
||||
// Underlying approach. For each local site build
|
||||
// up a table containing the npoint "neighbours" and whether they
|
||||
@ -811,6 +852,7 @@ public:
|
||||
GridBase *grid=_grid;
|
||||
const int Nsimd = grid->Nsimd();
|
||||
|
||||
int comms_recv = this->_comms_recv[point];
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
@ -867,7 +909,9 @@ public:
|
||||
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
||||
wraparound = 1;
|
||||
}
|
||||
if (!offnode) {
|
||||
|
||||
// Wrap locally dirichlet support case OR node local
|
||||
if ( (offnode==0) || (comms_recv==0) ) {
|
||||
|
||||
int permute_slice=0;
|
||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||
@ -984,11 +1028,14 @@ public:
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
|
||||
{
|
||||
typedef typename cobj::vector_type vector_type;
|
||||
typedef typename cobj::scalar_type scalar_type;
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
assert(rhs.Grid()==_grid);
|
||||
// conformable(_grid,rhs.Grid());
|
||||
|
||||
@ -1011,9 +1058,11 @@ public:
|
||||
|
||||
int sx = (x+sshift)%rd;
|
||||
int comm_proc = ((x+sshift)/rd)%pd;
|
||||
|
||||
|
||||
if (comm_proc) {
|
||||
|
||||
|
||||
|
||||
int words = buffer_size;
|
||||
if (cbmask != 0x3) words=words>>1;
|
||||
|
||||
@ -1045,44 +1094,53 @@ public:
|
||||
recv_buf=this->u_recv_buf_p;
|
||||
}
|
||||
|
||||
|
||||
cobj *send_buf;
|
||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Gather locally
|
||||
////////////////////////////////////////////////////////
|
||||
gathertime-=usecond();
|
||||
assert(send_buf!=NULL);
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
||||
gathertime+=usecond();
|
||||
if ( comms_send )
|
||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);
|
||||
face_idx++;
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[u_comm_offset],
|
||||
(void *)&recv_buf[u_comm_offset],
|
||||
xmit_to_rank,
|
||||
recv_from_rank,
|
||||
bytes);
|
||||
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[u_comm_offset],0,bytes,cbmask);
|
||||
if ( (!duplicate) ) { // Force comms for now
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
///////////////////////////////////////////////////////////
|
||||
// Build a list of things to do after we synchronise GPUs
|
||||
// Start comms now???
|
||||
///////////////////////////////////////////////////////////
|
||||
AddPacket((void *)&send_buf[u_comm_offset],
|
||||
(void *)&recv_buf[u_comm_offset],
|
||||
xmit_to_rank, comms_send,
|
||||
recv_from_rank, comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
if ( compress.DecompressionStep() ) {
|
||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||
&recv_buf[u_comm_offset],
|
||||
words,Decompressions);
|
||||
}
|
||||
u_comm_offset+=words;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<class compressor>
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
|
||||
{
|
||||
const int Nsimd = _grid->Nsimd();
|
||||
|
||||
const int maxl =2;// max layout in a direction
|
||||
|
||||
int comms_send = this->_comms_send[point] ;
|
||||
int comms_recv = this->_comms_recv[point] ;
|
||||
|
||||
int fd = _grid->_fdimensions[dimension];
|
||||
int rd = _grid->_rdimensions[dimension];
|
||||
int ld = _grid->_ldimensions[dimension];
|
||||
@ -1147,12 +1205,11 @@ public:
|
||||
&face_table[face_idx][0],
|
||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||
}
|
||||
gathermtime-=usecond();
|
||||
|
||||
// if ( comms_send )
|
||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||
face_idx++;
|
||||
|
||||
gathermtime+=usecond();
|
||||
//spointers[0] -- low
|
||||
//spointers[1] -- high
|
||||
|
||||
@ -1181,8 +1238,13 @@ public:
|
||||
|
||||
rpointers[i] = rp;
|
||||
|
||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
||||
|
||||
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
|
||||
if ( !duplicate ) {
|
||||
AddPacket((void *)sp,(void *)rp,
|
||||
xmit_to_rank,comms_send,
|
||||
recv_from_rank,comms_recv,
|
||||
bytes);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
|
@ -55,7 +55,7 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
|
||||
|
||||
|
||||
// Specialisation: Cayley-Hamilton exponential for SU(3)
|
||||
#ifndef GRID_CUDA
|
||||
#ifndef GRID_ACCELERATED
|
||||
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
|
||||
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
|
||||
{
|
||||
|
@ -47,20 +47,20 @@ NAMESPACE_BEGIN(Grid);
|
||||
class TypePair {
|
||||
public:
|
||||
T _internal[2];
|
||||
TypePair<T>& operator=(const Grid::Zero& o) {
|
||||
accelerator TypePair<T>& operator=(const Grid::Zero& o) {
|
||||
_internal[0] = Zero();
|
||||
_internal[1] = Zero();
|
||||
return *this;
|
||||
}
|
||||
|
||||
TypePair<T> operator+(const TypePair<T>& o) const {
|
||||
accelerator TypePair<T> operator+(const TypePair<T>& o) const {
|
||||
TypePair<T> r;
|
||||
r._internal[0] = _internal[0] + o._internal[0];
|
||||
r._internal[1] = _internal[1] + o._internal[1];
|
||||
return r;
|
||||
}
|
||||
|
||||
TypePair<T>& operator+=(const TypePair<T>& o) {
|
||||
accelerator TypePair<T>& operator+=(const TypePair<T>& o) {
|
||||
_internal[0] += o._internal[0];
|
||||
_internal[1] += o._internal[1];
|
||||
return *this;
|
||||
|
@ -74,29 +74,43 @@ void acceleratorInit(void)
|
||||
// GPU_PROP(singleToDoublePrecisionPerfRatio);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryManager::DeviceMaxBytes = (8*totalDeviceMem)/10; // Assume 80% ours
|
||||
#undef GPU_PROP_FMT
|
||||
#undef GPU_PROP
|
||||
|
||||
#ifdef GRID_DEFAULT_GPU
|
||||
int device = 0;
|
||||
// IBM Jsrun makes cuda Device numbering screwy and not match rank
|
||||
if ( world_rank == 0 ) {
|
||||
printf("AcceleratorCudaInit: using default device \n");
|
||||
printf("AcceleratorCudaInit: assume user either uses a) IBM jsrun, or \n");
|
||||
printf("AcceleratorCudaInit: assume user either uses\n");
|
||||
printf("AcceleratorCudaInit: a) IBM jsrun, or \n");
|
||||
printf("AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
|
||||
printf("AcceleratorCudaInit: Configure options --enable-setdevice=no \n");
|
||||
}
|
||||
#else
|
||||
int device = rank;
|
||||
printf("AcceleratorCudaInit: rank %d setting device to node rank %d\n",world_rank,rank);
|
||||
printf("AcceleratorCudaInit: Configure options --enable-setdevice=yes \n");
|
||||
cudaSetDevice(rank);
|
||||
#endif
|
||||
|
||||
cudaSetDevice(device);
|
||||
cudaStreamCreate(©Stream);
|
||||
const int len=64;
|
||||
char busid[len];
|
||||
if( rank == world_rank ) {
|
||||
cudaDeviceGetPCIBusId(busid, len, device);
|
||||
printf("local rank %d device %d bus id: %s\n", rank, device, busid);
|
||||
}
|
||||
|
||||
if ( world_rank == 0 ) printf("AcceleratorCudaInit: ================================================\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef GRID_HIP
|
||||
hipDeviceProp_t *gpu_props;
|
||||
hipStream_t copyStream;
|
||||
void acceleratorInit(void)
|
||||
{
|
||||
int nDevices = 1;
|
||||
@ -154,16 +168,25 @@ void acceleratorInit(void)
|
||||
#ifdef GRID_DEFAULT_GPU
|
||||
if ( world_rank == 0 ) {
|
||||
printf("AcceleratorHipInit: using default device \n");
|
||||
printf("AcceleratorHipInit: assume user either uses a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding \n");
|
||||
printf("AcceleratorHipInit: Configure options --enable-summit, --enable-select-gpu=no \n");
|
||||
printf("AcceleratorHipInit: assume user or srun sets ROCR_VISIBLE_DEVICES and numa binding \n");
|
||||
printf("AcceleratorHipInit: Configure options --enable-setdevice=no \n");
|
||||
}
|
||||
int device = 0;
|
||||
#else
|
||||
if ( world_rank == 0 ) {
|
||||
printf("AcceleratorHipInit: rank %d setting device to node rank %d\n",world_rank,rank);
|
||||
printf("AcceleratorHipInit: Configure options --enable-select-gpu=yes \n");
|
||||
printf("AcceleratorHipInit: Configure options --enable-setdevice=yes \n");
|
||||
}
|
||||
hipSetDevice(rank);
|
||||
int device = rank;
|
||||
#endif
|
||||
hipSetDevice(device);
|
||||
hipStreamCreate(©Stream);
|
||||
const int len=64;
|
||||
char busid[len];
|
||||
if( rank == world_rank ) {
|
||||
hipDeviceGetPCIBusId(busid, len, device);
|
||||
printf("local rank %d device %d bus id: %s\n", rank, device, busid);
|
||||
}
|
||||
if ( world_rank == 0 ) printf("AcceleratorHipInit: ================================================\n");
|
||||
}
|
||||
#endif
|
||||
|
@ -95,6 +95,7 @@ void acceleratorInit(void);
|
||||
//////////////////////////////////////////////
|
||||
|
||||
#ifdef GRID_CUDA
|
||||
|
||||
#include <cuda.h>
|
||||
|
||||
#ifdef __CUDA_ARCH__
|
||||
@ -115,6 +116,14 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
#endif
|
||||
} // CUDA specific
|
||||
|
||||
inline void cuda_mem(void)
|
||||
{
|
||||
size_t free_t,total_t,used_t;
|
||||
cudaMemGetInfo(&free_t,&total_t);
|
||||
used_t=total_t-free_t;
|
||||
std::cout << " MemoryManager : GPU used "<<used_t<<" free "<<free_t<< " total "<<total_t<<std::endl;
|
||||
}
|
||||
|
||||
#define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \
|
||||
{ \
|
||||
int nt=acceleratorThreads(); \
|
||||
@ -221,6 +230,7 @@ inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes
|
||||
cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToDevice,copyStream);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { cudaStreamSynchronize(copyStream); };
|
||||
|
||||
inline int acceleratorIsCommunicable(void *ptr)
|
||||
{
|
||||
// int uvm=0;
|
||||
@ -297,7 +307,7 @@ inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) {
|
||||
theGridAccelerator->memcpy(to,from,bytes);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { theGridAccelerator->wait(); }
|
||||
inline void acceleratorCopySynchronise(void) { theGridAccelerator->wait(); std::cout<<"acceleratorCopySynchronise() wait "<<std::endl; }
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theGridAccelerator->memset(base,value,bytes); theGridAccelerator->wait();}
|
||||
@ -328,10 +338,11 @@ NAMESPACE_BEGIN(Grid);
|
||||
#define accelerator __host__ __device__
|
||||
#define accelerator_inline __host__ __device__ inline
|
||||
|
||||
extern hipStream_t copyStream;
|
||||
/*These routines define mapping from thread grid to loop & vector lane indexing */
|
||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
#ifdef GRID_SIMT
|
||||
return hipThreadIdx_z;
|
||||
return hipThreadIdx_x;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
@ -345,19 +356,41 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
||||
{ __VA_ARGS__;} \
|
||||
}; \
|
||||
int nt=acceleratorThreads(); \
|
||||
dim3 hip_threads(nt,1,nsimd); \
|
||||
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
|
||||
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
|
||||
0,0, \
|
||||
num1,num2,nsimd,lambda); \
|
||||
dim3 hip_threads(nsimd, nt, 1); \
|
||||
dim3 hip_blocks ((num1+nt-1)/nt,num2,1); \
|
||||
if(hip_threads.x * hip_threads.y * hip_threads.z <= 64){ \
|
||||
hipLaunchKernelGGL(LambdaApply64,hip_blocks,hip_threads, \
|
||||
0,0, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
} else { \
|
||||
hipLaunchKernelGGL(LambdaApply,hip_blocks,hip_threads, \
|
||||
0,0, \
|
||||
num1,num2,nsimd, lambda); \
|
||||
} \
|
||||
}
|
||||
|
||||
|
||||
template<typename lambda> __global__
|
||||
__launch_bounds__(64,1)
|
||||
void LambdaApply64(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||
{
|
||||
// Following the same scheme as CUDA for now
|
||||
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
|
||||
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
|
||||
uint64_t z = threadIdx.x;
|
||||
if ( (x < numx) && (y<numy) && (z<numz) ) {
|
||||
Lambda(x,y,z);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename lambda> __global__
|
||||
__launch_bounds__(1024,1)
|
||||
void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
|
||||
{
|
||||
uint64_t x = hipThreadIdx_x + hipBlockDim_x*hipBlockIdx_x;
|
||||
uint64_t y = hipThreadIdx_y + hipBlockDim_y*hipBlockIdx_y;
|
||||
uint64_t z = hipThreadIdx_z ;//+ hipBlockDim_z*hipBlockIdx_z;
|
||||
// Following the same scheme as CUDA for now
|
||||
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
|
||||
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
|
||||
uint64_t z = threadIdx.x;
|
||||
if ( (x < numx) && (y<numy) && (z<numz) ) {
|
||||
Lambda(x,y,z);
|
||||
}
|
||||
@ -402,10 +435,16 @@ inline void acceleratorFreeShared(void *ptr){ hipFree(ptr);};
|
||||
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
|
||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
|
||||
inline void acceleratorCopySynchronise(void) { }
|
||||
//inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
|
||||
//inline void acceleratorCopySynchronise(void) { }
|
||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(base,value,bytes);}
|
||||
|
||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||
{
|
||||
hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);
|
||||
}
|
||||
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@ -422,6 +461,8 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
||||
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
||||
accelerator_barrier(dummy);
|
||||
|
||||
#define GRID_ACCELERATED
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////
|
||||
@ -476,18 +517,12 @@ inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
||||
///////////////////////////////////////////////////
|
||||
// Synchronise across local threads for divergence resynch
|
||||
///////////////////////////////////////////////////
|
||||
accelerator_inline void acceleratorSynchronise(void)
|
||||
accelerator_inline void acceleratorSynchronise(void) // Only Nvidia needs
|
||||
{
|
||||
#ifdef GRID_SIMT
|
||||
#ifdef GRID_CUDA
|
||||
__syncwarp();
|
||||
#endif
|
||||
#ifdef GRID_SYCL
|
||||
//cl::sycl::detail::workGroupBarrier();
|
||||
#endif
|
||||
#ifdef GRID_HIP
|
||||
__syncthreads();
|
||||
#endif
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
// Coordinate class, maxdims = 8 for now.
|
||||
////////////////////////////////////////////////////////////////
|
||||
#define GRID_MAX_LATTICE_DIMENSION (8)
|
||||
#define GRID_MAX_SIMD (16)
|
||||
#define GRID_MAX_SIMD (32)
|
||||
|
||||
static constexpr int MaxDims = GRID_MAX_LATTICE_DIMENSION;
|
||||
|
||||
|
@ -167,6 +167,13 @@ void GridCmdOptionInt(std::string &str,int & val)
|
||||
return;
|
||||
}
|
||||
|
||||
void GridCmdOptionFloat(std::string &str,float & val)
|
||||
{
|
||||
std::stringstream ss(str);
|
||||
ss>>val;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
Coordinate &latt_c,
|
||||
|
@ -57,6 +57,7 @@ void GridCmdOptionCSL(std::string str,std::vector<std::string> & vec);
|
||||
template<class VectorInt>
|
||||
void GridCmdOptionIntVector(const std::string &str,VectorInt & vec);
|
||||
void GridCmdOptionInt(std::string &str,int & val);
|
||||
void GridCmdOptionFloat(std::string &str,float & val);
|
||||
|
||||
|
||||
void GridParseLayout(char **argv,int argc,
|
||||
|
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
@ -0,0 +1,232 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/Test_hmc_EODWFRatio.cc
|
||||
|
||||
Copyright (C) 2015-2016
|
||||
|
||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution
|
||||
directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
using namespace Grid;
|
||||
|
||||
Grid_init(&argc, &argv);
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
// Typedefs to simplify notation
|
||||
typedef WilsonImplR FermionImplPolicy;
|
||||
typedef MobiusFermionR FermionAction;
|
||||
typedef typename FermionAction::FermionField FermionField;
|
||||
|
||||
typedef Grid::XmlReader Serialiser;
|
||||
|
||||
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||
IntegratorParameters MD;
|
||||
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
|
||||
// MD.name = std::string("Leap Frog");
|
||||
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
|
||||
// MD.name = std::string("Force Gradient");
|
||||
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
|
||||
MD.name = std::string("MinimumNorm2");
|
||||
MD.MDsteps = 4;
|
||||
MD.trajL = 1.0;
|
||||
|
||||
HMCparameters HMCparams;
|
||||
HMCparams.StartTrajectory = 8;
|
||||
HMCparams.Trajectories = 200;
|
||||
HMCparams.NoMetropolisUntil= 0;
|
||||
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||
// HMCparams.StartingType =std::string("ColdStart");
|
||||
HMCparams.StartingType =std::string("CheckpointStart");
|
||||
HMCparams.MD = MD;
|
||||
HMCWrapper TheHMC(HMCparams);
|
||||
|
||||
// Grid from the command line arguments --grid and --mpi
|
||||
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||
|
||||
CheckpointerParameters CPparams;
|
||||
CPparams.config_prefix = "ckpoint_EODWF_lat";
|
||||
CPparams.rng_prefix = "ckpoint_EODWF_rng";
|
||||
CPparams.saveInterval = 1;
|
||||
CPparams.format = "IEEE64BIG";
|
||||
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||
|
||||
RNGModuleParameters RNGpar;
|
||||
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||
|
||||
// Construct observables
|
||||
// here there is too much indirection
|
||||
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||
//////////////////////////////////////////////
|
||||
|
||||
const int Ls = 16;
|
||||
Real beta = 2.13;
|
||||
Real light_mass = 0.01;
|
||||
Real strange_mass = 0.04;
|
||||
Real pv_mass = 1.0;
|
||||
RealD M5 = 1.8;
|
||||
RealD b = 1.0;
|
||||
RealD c = 0.0;
|
||||
|
||||
// FIXME:
|
||||
// Same in MC and MD
|
||||
// Need to mix precision too
|
||||
OneFlavourRationalParams OFRp;
|
||||
OFRp.lo = 4.0e-3;
|
||||
OFRp.hi = 30.0;
|
||||
OFRp.MaxIter = 10000;
|
||||
OFRp.tolerance= 1.0e-10;
|
||||
OFRp.degree = 16;
|
||||
OFRp.precision= 50;
|
||||
|
||||
std::vector<Real> hasenbusch({ 0.01, 0.04, 0.2 , pv_mass });
|
||||
std::vector<bool> dirichlet ({ true, true, true });
|
||||
|
||||
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Domain decomposed
|
||||
////////////////////////////////////////////////////////////////
|
||||
Coordinate latt4 = GridPtr->GlobalDimensions();
|
||||
Coordinate mpi = GridPtr->ProcessorGrid();
|
||||
Coordinate shm;
|
||||
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
Coordinate CommDim(Nd);
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Coordinate Block4(Nd);
|
||||
Block4[0] = Dirichlet[1];
|
||||
Block4[1] = Dirichlet[2];
|
||||
Block4[2] = Dirichlet[3];
|
||||
Block4[3] = Dirichlet[4];
|
||||
TheHMC.Resources.SetMomentumFilter(new DDHMCFilter<WilsonImplR::Field>(Block4));
|
||||
|
||||
//////////////////////////
|
||||
// Fermion Grid
|
||||
//////////////////////////
|
||||
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
|
||||
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
|
||||
|
||||
IwasakiGaugeActionR GaugeAction(beta);
|
||||
|
||||
// temporarily need a gauge field
|
||||
LatticeGaugeField U(GridPtr);
|
||||
|
||||
// These lines are unecessary if BC are all periodic
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
FermionAction::ImplParams Params(boundary);
|
||||
|
||||
double StoppingCondition = 1e-10;
|
||||
double MaxCGIterations = 30000;
|
||||
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
|
||||
|
||||
////////////////////////////////////
|
||||
// Collect actions
|
||||
////////////////////////////////////
|
||||
ActionLevel<HMCWrapper::Field> Level1(1);
|
||||
ActionLevel<HMCWrapper::Field> Level2(2);
|
||||
ActionLevel<HMCWrapper::Field> Level3(8);
|
||||
|
||||
////////////////////////////////////
|
||||
// Strange action
|
||||
////////////////////////////////////
|
||||
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
|
||||
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
|
||||
|
||||
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
|
||||
// Level1.push_back(&StrangePseudoFermion);
|
||||
|
||||
////////////////////////////////////
|
||||
// up down action
|
||||
////////////////////////////////////
|
||||
std::vector<Real> light_den;
|
||||
std::vector<Real> light_num;
|
||||
std::vector<int> dirichlet_den;
|
||||
std::vector<int> dirichlet_num;
|
||||
|
||||
int n_hasenbusch = hasenbusch.size();
|
||||
light_den.push_back(light_mass);
|
||||
dirichlet_den.push_back(0);
|
||||
for(int h=0;h<n_hasenbusch;h++){
|
||||
light_den.push_back(hasenbusch[h]);
|
||||
light_num.push_back(hasenbusch[h]);
|
||||
dirichlet_num.push_back(1);
|
||||
dirichlet_den.push_back(1);
|
||||
}
|
||||
light_num.push_back(pv_mass);
|
||||
dirichlet_num.push_back(0);
|
||||
|
||||
std::vector<FermionAction *> Numerators;
|
||||
std::vector<FermionAction *> Denominators;
|
||||
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
|
||||
|
||||
for(int h=0;h<n_hasenbusch+1;h++){
|
||||
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h]<< " (" << dirichlet_num[h]
|
||||
<<") / " << light_den[h]<< " (" << dirichlet_den[h]<<")"<< std::endl;
|
||||
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
|
||||
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
|
||||
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
|
||||
if ( dirichlet_den[h]==1) Denominators[h]->DirichletBlock(Dirichlet);
|
||||
if ( dirichlet_num[h]==1) Numerators[h]->DirichletBlock(Dirichlet);
|
||||
}
|
||||
|
||||
int nquo=Quotients.size();
|
||||
Level1.push_back(Quotients[0]);
|
||||
Level1.push_back(Quotients[nquo-1]);
|
||||
for(int h=1;h<nquo-1;h++){
|
||||
Level2.push_back(Quotients[h]);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
// Gauge action
|
||||
/////////////////////////////////////////////////////////////
|
||||
Level3.push_back(&GaugeAction);
|
||||
TheHMC.TheAction.push_back(Level1);
|
||||
TheHMC.TheAction.push_back(Level2);
|
||||
TheHMC.TheAction.push_back(Level3);
|
||||
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||
|
||||
/////////////////////////////////////////////////////////////
|
||||
|
||||
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||
TheHMC.Run(); // no smearing
|
||||
|
||||
Grid_finalize();
|
||||
} // main
|
||||
|
||||
|
||||
|
@ -137,7 +137,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
Eigen::MatrixXd mean(nVol, 4), stdDev(nVol, 4), rob(nVol, 4);
|
||||
Eigen::VectorXd avMean(4), avStdDev(4), avRob(4);
|
||||
double n = BENCH_IO_NPASS;
|
||||
// double n = BENCH_IO_NPASS;
|
||||
|
||||
stats(mean, stdDev, perf);
|
||||
stats(avMean, avStdDev, avPerf);
|
||||
@ -164,7 +164,7 @@ int main (int argc, char ** argv)
|
||||
mean(volInd(l), gWrite), stdDev(volInd(l), gWrite));
|
||||
}
|
||||
MSG << std::endl;
|
||||
MSG << "Robustness of individual results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
|
||||
MSG << "Robustness of individual results, in %. (rob = 100% - std dev / mean)" << std::endl;
|
||||
MSG << std::endl;
|
||||
grid_printf("%4s %12s %12s %12s %12s\n",
|
||||
"L", "std read", "std write", "Grid read", "Grid write");
|
||||
@ -185,7 +185,7 @@ int main (int argc, char ** argv)
|
||||
avMean(sRead), avStdDev(sRead), avMean(sWrite), avStdDev(sWrite),
|
||||
avMean(gRead), avStdDev(gRead), avMean(gWrite), avStdDev(gWrite));
|
||||
MSG << std::endl;
|
||||
MSG << "Robustness of volume-averaged results, in \%. (rob = 100\% - std dev / mean)" << std::endl;
|
||||
MSG << "Robustness of volume-averaged results, in %. (rob = 100% - std dev / mean)" << std::endl;
|
||||
MSG << std::endl;
|
||||
grid_printf("%12s %12s %12s %12s\n",
|
||||
"std read", "std write", "Grid read", "Grid write");
|
||||
|
@ -142,7 +142,7 @@ public:
|
||||
// bzero((void *)rbuf[d],lat*lat*lat*Ls*sizeof(HalfSpinColourVectorD));
|
||||
}
|
||||
|
||||
int ncomm;
|
||||
// int ncomm;
|
||||
double dbytes;
|
||||
|
||||
for(int dir=0;dir<8;dir++) {
|
||||
@ -290,7 +290,7 @@ public:
|
||||
LatticeSU4 z(&Grid); z=Zero();
|
||||
LatticeSU4 x(&Grid); x=Zero();
|
||||
LatticeSU4 y(&Grid); y=Zero();
|
||||
double a=2.0;
|
||||
// double a=2.0;
|
||||
|
||||
uint64_t Nloop=NLOOP;
|
||||
|
||||
|
@ -217,9 +217,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
|
||||
comm_proc = mpi_layout[mu]-1;
|
||||
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
|
||||
}
|
||||
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu);
|
||||
requests.resize(0);
|
||||
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
|
||||
dbytes+=
|
||||
Grid.StencilSendToRecvFromBegin(requests,
|
||||
(void *)&xbuf[mu+4][0],
|
||||
xmit_to_rank,
|
||||
xmit_to_rank,1,
|
||||
(void *)&rbuf[mu+4][0],
|
||||
recv_from_rank,
|
||||
recv_from_rank,1,
|
||||
bytes,mu+4);
|
||||
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
||||
requests.resize(0);
|
||||
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
|
||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||
}
|
||||
int tid = omp_get_thread_num();
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
||||
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
|
||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
|
||||
(void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
|
||||
|
||||
thread_critical { dbytes+=tbytes; }
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Number of iterations to average: "<< Nloop << std::endl;
|
||||
std::vector<double> t_time(Nloop);
|
||||
time_statistics timestat;
|
||||
// time_statistics timestat;
|
||||
|
||||
std::cout<<GridLogMessage << "===================================================================================================="<<std::endl;
|
||||
std::cout<<GridLogMessage << "= Benchmarking sequential halo exchange from host memory "<<std::endl;
|
||||
|
@ -32,18 +32,18 @@
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
|
||||
template<class d>
|
||||
struct scal {
|
||||
d internal;
|
||||
////////////////////////
|
||||
/// Move to domains ////
|
||||
////////////////////////
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
Gamma::Algebra Gmu [] = {
|
||||
Gamma::Algebra::GammaX,
|
||||
Gamma::Algebra::GammaY,
|
||||
Gamma::Algebra::GammaZ,
|
||||
Gamma::Algebra::GammaT
|
||||
};
|
||||
|
||||
void Benchmark(int Ls, Coordinate Dirichlet);
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
@ -52,24 +52,82 @@ int main (int argc, char ** argv)
|
||||
|
||||
int threads = GridThread::GetThreads();
|
||||
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
int Ls=16;
|
||||
for(int i=0;i<argc;i++)
|
||||
for(int i=0;i<argc;i++) {
|
||||
if(std::string(argv[i]) == "-Ls"){
|
||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////
|
||||
// With comms
|
||||
//////////////////
|
||||
Coordinate Dirichlet(Nd+1,0);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing with full communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
//////////////////
|
||||
// Domain decomposed
|
||||
//////////////////
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
Coordinate mpi = GridDefaultMpi();
|
||||
Coordinate CommDim(Nd);
|
||||
Coordinate shm;
|
||||
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||
|
||||
|
||||
//////////////////////
|
||||
// Node level
|
||||
//////////////////////
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without internode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
std::cout << GridLogMessage<< " Testing without intranode communication " <<std::endl;
|
||||
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||
|
||||
for(int d=0;d<Nd;d++) CommDim[d]= mpi[d]>1 ? 1 : 0;
|
||||
Dirichlet[0] = 0;
|
||||
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0];
|
||||
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1];
|
||||
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2];
|
||||
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3];
|
||||
|
||||
Benchmark(Ls,Dirichlet);
|
||||
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
void Benchmark(int Ls, Coordinate Dirichlet)
|
||||
{
|
||||
Coordinate latt4 = GridDefaultLatt();
|
||||
GridLogLayout();
|
||||
|
||||
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
||||
|
||||
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||
|
||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
||||
@ -80,9 +138,9 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
||||
|
||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
||||
|
||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
||||
#if 0
|
||||
@ -100,7 +158,6 @@ int main (int argc, char ** argv)
|
||||
src = src*N2;
|
||||
#endif
|
||||
|
||||
|
||||
LatticeFermionF result(FGrid); result=Zero();
|
||||
LatticeFermionF ref(FGrid); ref=Zero();
|
||||
LatticeFermionF tmp(FGrid);
|
||||
@ -108,38 +165,31 @@ int main (int argc, char ** argv)
|
||||
|
||||
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||
LatticeGaugeFieldF Umu(UGrid);
|
||||
LatticeGaugeFieldF UmuCopy(UGrid);
|
||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
UmuCopy=Umu;
|
||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||
#if 0
|
||||
Umu=1.0;
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
LatticeColourMatrixF ttmp(UGrid);
|
||||
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
// if (mu !=2 ) ttmp = 0;
|
||||
// ttmp = ttmp* pow(10.0,mu);
|
||||
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
||||
}
|
||||
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
||||
#endif
|
||||
|
||||
////////////////////////////////////
|
||||
// Apply BCs
|
||||
////////////////////////////////////
|
||||
Coordinate Block(4);
|
||||
for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
|
||||
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block5 " << Dirichlet << std::endl;
|
||||
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block4 " << Block << std::endl;
|
||||
|
||||
DirichletFilter<LatticeGaugeFieldF> Filter(Block);
|
||||
Filter.applyFilter(Umu);
|
||||
|
||||
////////////////////////////////////
|
||||
// Naive wilson implementation
|
||||
////////////////////////////////////
|
||||
// replicate across fifth dimension
|
||||
LatticeGaugeFieldF Umu5d(FGrid);
|
||||
std::vector<LatticeColourMatrixF> U(4,FGrid);
|
||||
{
|
||||
autoView( Umu5d_v, Umu5d, CpuWrite);
|
||||
autoView( Umu_v , Umu , CpuRead);
|
||||
for(int ss=0;ss<Umu.Grid()->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
Umu5d_v[Ls*ss+s] = Umu_v[ss];
|
||||
}
|
||||
}
|
||||
}
|
||||
std::vector<LatticeColourMatrixF> U(4,UGrid);
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu5d,mu);
|
||||
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||
}
|
||||
|
||||
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||
|
||||
if (1)
|
||||
@ -147,10 +197,28 @@ int main (int argc, char ** argv)
|
||||
ref = Zero();
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
tmp = U[mu]*Cshift(src,mu+1,1);
|
||||
tmp = Cshift(src,mu+1,1);
|
||||
{
|
||||
autoView( tmp_v , tmp , CpuWrite);
|
||||
autoView( U_v , U[mu] , CpuRead);
|
||||
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
tmp_v[Ls*ss+s] = U_v[ss]*tmp_v[Ls*ss+s];
|
||||
}
|
||||
}
|
||||
}
|
||||
ref=ref + tmp - Gamma(Gmu[mu])*tmp;
|
||||
|
||||
tmp =adj(U[mu])*src;
|
||||
{
|
||||
autoView( tmp_v , tmp , CpuWrite);
|
||||
autoView( U_v , U[mu] , CpuRead);
|
||||
autoView( src_v, src , CpuRead);
|
||||
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
tmp_v[Ls*ss+s] = adj(U_v[ss])*src_v[Ls*ss+s];
|
||||
}
|
||||
}
|
||||
}
|
||||
tmp =Cshift(tmp,mu+1,-1);
|
||||
ref=ref + tmp + Gamma(Gmu[mu])*tmp;
|
||||
}
|
||||
@ -182,11 +250,13 @@ int main (int argc, char ** argv)
|
||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||
|
||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||
int ncall =3000;
|
||||
Dw.DirichletBlock(Dirichlet);
|
||||
Dw.ImportGauge(Umu);
|
||||
|
||||
int ncall =300;
|
||||
|
||||
if (1) {
|
||||
FGrid->Barrier();
|
||||
Dw.ZeroCounters();
|
||||
Dw.Dhop(src,result,0);
|
||||
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
||||
double t0=usecond();
|
||||
@ -211,29 +281,20 @@ int main (int argc, char ** argv)
|
||||
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
||||
|
||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
||||
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||
std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||
// std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
//exit(0);
|
||||
|
||||
if(( norm2(err)>1.0e-4) ) {
|
||||
/*
|
||||
std::cout << "RESULT\n " << result<<std::endl;
|
||||
std::cout << "REF \n " << ref <<std::endl;
|
||||
std::cout << "ERR \n " << err <<std::endl;
|
||||
*/
|
||||
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
||||
FGrid->Barrier();
|
||||
exit(-1);
|
||||
}
|
||||
assert (norm2(err)< 1.0e-4 );
|
||||
Dw.Report();
|
||||
}
|
||||
|
||||
if (1)
|
||||
@ -242,16 +303,30 @@ int main (int argc, char ** argv)
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
|
||||
// ref = src - Gamma(Gamma::Algebra::GammaX)* src ; // 1+gamma_x
|
||||
tmp = U[mu]*Cshift(src,mu+1,1);
|
||||
tmp = Cshift(src,mu+1,1);
|
||||
{
|
||||
autoView( ref_v, ref, CpuWrite);
|
||||
autoView( tmp_v, tmp, CpuRead);
|
||||
for(int i=0;i<ref_v.size();i++){
|
||||
ref_v[i]+= tmp_v[i] + Gamma(Gmu[mu])*tmp_v[i]; ;
|
||||
autoView( U_v , U[mu] , CpuRead);
|
||||
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
int i=s+Ls*ss;
|
||||
ref_v[i]+= U_v[ss]*(tmp_v[i] + Gamma(Gmu[mu])*tmp_v[i]); ;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tmp =adj(U[mu])*src;
|
||||
|
||||
{
|
||||
autoView( tmp_v , tmp , CpuWrite);
|
||||
autoView( U_v , U[mu] , CpuRead);
|
||||
autoView( src_v, src , CpuRead);
|
||||
for(int ss=0;ss<U[mu].Grid()->oSites();ss++){
|
||||
for(int s=0;s<Ls;s++){
|
||||
tmp_v[Ls*ss+s] = adj(U_v[ss])*src_v[Ls*ss+s];
|
||||
}
|
||||
}
|
||||
}
|
||||
// tmp =adj(U[mu])*src;
|
||||
tmp =Cshift(tmp,mu+1,-1);
|
||||
{
|
||||
autoView( ref_v, ref, CpuWrite);
|
||||
@ -263,21 +338,20 @@ int main (int argc, char ** argv)
|
||||
}
|
||||
ref = -0.5*ref;
|
||||
}
|
||||
// dump=1;
|
||||
Dw.Dhop(src,result,1);
|
||||
|
||||
Dw.Dhop(src,result,DaggerYes);
|
||||
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||
|
||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||
err = ref-result;
|
||||
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
||||
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
||||
std::cout<< "DAG ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert((norm2(err)<1.0e-4));
|
||||
|
||||
LatticeFermionF src_e (FrbGrid);
|
||||
LatticeFermionF src_o (FrbGrid);
|
||||
LatticeFermionF r_e (FrbGrid);
|
||||
@ -307,7 +381,6 @@ int main (int argc, char ** argv)
|
||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||
{
|
||||
Dw.ZeroCounters();
|
||||
FGrid->Barrier();
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
double t0=usecond();
|
||||
@ -329,7 +402,6 @@ int main (int argc, char ** argv)
|
||||
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
||||
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
||||
Dw.Report();
|
||||
}
|
||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||
Dw.DhopOE(src_e,r_o,DaggerNo);
|
||||
@ -344,13 +416,7 @@ int main (int argc, char ** argv)
|
||||
|
||||
err = r_eo-result;
|
||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||
if((norm2(err)>1.0e-4)){
|
||||
/*
|
||||
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
||||
std::cout<< "Deo REF\n " <<result << std::endl;
|
||||
std::cout<< "Deo ERR \n " << err <<std::endl;
|
||||
*/
|
||||
}
|
||||
assert(norm2(err)<1.0e-4);
|
||||
|
||||
pickCheckerboard(Even,src_e,err);
|
||||
pickCheckerboard(Odd,src_o,err);
|
||||
@ -359,6 +425,4 @@ int main (int argc, char ** argv)
|
||||
|
||||
assert(norm2(src_e)<1.0e-4);
|
||||
assert(norm2(src_o)<1.0e-4);
|
||||
Grid_finalize();
|
||||
exit(0);
|
||||
}
|
||||
|
@ -184,8 +184,10 @@ int main (int argc, char ** argv)
|
||||
|
||||
double bytes=1.0*vol*Nvec*sizeof(Real);
|
||||
double flops=vol*Nvec*2;// mul,add
|
||||
std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"<<bytes<<" \t\t"<<bytes/time<<"\t\t"<<flops/time<< "\t\t"<<(stop-start)/1000./1000.<< "\t\t " <<std::endl;
|
||||
|
||||
std::cout<<GridLogMessage<<std::setprecision(3) << lat<<"\t\t"
|
||||
<<bytes<<" \t\t"<<bytes/time<<"\t\t"<<flops/time<< "\t\t"
|
||||
<<(stop-start)/1000./1000.<< "\t\t " <<std::endl;
|
||||
assert(nn==nn);
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
|
@ -4,7 +4,7 @@ using namespace Grid;
|
||||
template<class Field>
|
||||
void SimpleConjugateGradient(LinearOperatorBase<Field> &HPDop,const Field &b, Field &x)
|
||||
{
|
||||
RealD cp, c, alpha, d, beta, ssq, qq;
|
||||
RealD cp, c, alpha, d, beta, ssq;
|
||||
RealD Tolerance=1.0e-10;
|
||||
int MaxIterations=10000;
|
||||
|
||||
|
539
examples/Example_wall_wall_3pt.cc
Normal file
539
examples/Example_wall_wall_3pt.cc
Normal file
@ -0,0 +1,539 @@
|
||||
/*
|
||||
* Warning: This code illustrative only: not well tested, and not meant for production use
|
||||
* without regression / tests being applied
|
||||
*/
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace Grid;
|
||||
typedef SpinColourMatrix Propagator;
|
||||
typedef SpinColourVector Fermion;
|
||||
typedef PeriodicGimplR GimplR;
|
||||
|
||||
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||
{
|
||||
public:
|
||||
INHERIT_GIMPL_TYPES(Gimpl);
|
||||
|
||||
GridBase *grid;
|
||||
GaugeField U;
|
||||
|
||||
CovariantLaplacianCshift(GaugeField &_U) :
|
||||
grid(_U.Grid()),
|
||||
U(_U) { };
|
||||
|
||||
virtual GridBase *Grid(void) { return grid; };
|
||||
|
||||
virtual void M (const Field &in, Field &out)
|
||||
{
|
||||
out=Zero();
|
||||
for(int mu=0;mu<Nd-1;mu++) {
|
||||
GaugeLinkField Umu = PeekIndex<LorentzIndex>(U, mu); // NB: Inefficent
|
||||
out = out - Gimpl::CovShiftForward(Umu,mu,in);
|
||||
out = out - Gimpl::CovShiftBackward(Umu,mu,in);
|
||||
out = out + 2.0*in;
|
||||
}
|
||||
};
|
||||
virtual void Mdag (const Field &in, Field &out) { M(in,out);}; // Laplacian is hermitian
|
||||
virtual void Mdiag (const Field &in, Field &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);}; // Unimplemented need only for multigrid
|
||||
virtual void MdirAll (const Field &in, std::vector<Field> &out) {assert(0);}; // Unimplemented need only for multigrid
|
||||
};
|
||||
|
||||
void MakePhase(Coordinate mom,LatticeComplex &phase)
|
||||
{
|
||||
GridBase *grid = phase.Grid();
|
||||
auto latt_size = grid->GlobalDimensions();
|
||||
ComplexD ci(0.0,1.0);
|
||||
phase=Zero();
|
||||
|
||||
LatticeComplex coor(phase.Grid());
|
||||
for(int mu=0;mu<Nd;mu++){
|
||||
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||
LatticeCoordinate(coor,mu);
|
||||
phase = phase + (TwoPiL * mom[mu]) * coor;
|
||||
}
|
||||
phase = exp(phase*ci);
|
||||
}
|
||||
void LinkSmear(int nstep, RealD rho,LatticeGaugeField &Uin,LatticeGaugeField &Usmr)
|
||||
{
|
||||
Smear_Stout<GimplR> Stout(rho);
|
||||
LatticeGaugeField Utmp(Uin.Grid());
|
||||
Utmp = Uin;
|
||||
for(int i=0;i<nstep;i++){
|
||||
Stout.smear(Usmr,Utmp);
|
||||
Utmp = Usmr;
|
||||
}
|
||||
}
|
||||
void PointSource(Coordinate &coor,LatticePropagator &source)
|
||||
{
|
||||
// Coordinate coor({0,0,0,0});
|
||||
source=Zero();
|
||||
SpinColourMatrix kronecker; kronecker=1.0;
|
||||
pokeSite(kronecker,source,coor);
|
||||
}
|
||||
void GFWallSource(int tslice,LatticePropagator &source)
|
||||
{
|
||||
GridBase *grid = source.Grid();
|
||||
LatticeComplex one(grid); one = ComplexD(1.0,0.0);
|
||||
LatticeComplex zz(grid); zz=Zero();
|
||||
LatticeInteger t(grid);
|
||||
LatticeCoordinate(t,Tdir);
|
||||
one = where(t==Integer(tslice), one, zz);
|
||||
source = 1.0;
|
||||
source = source * one;
|
||||
}
|
||||
|
||||
void Z2WallSource(GridParallelRNG &RNG,int tslice,LatticePropagator &source)
|
||||
{
|
||||
GridBase *grid = source.Grid();
|
||||
LatticeComplex noise(grid);
|
||||
LatticeComplex zz(grid); zz=Zero();
|
||||
LatticeInteger t(grid);
|
||||
|
||||
RealD nrm=1.0/sqrt(2);
|
||||
bernoulli(RNG, noise); // 0,1 50:50
|
||||
|
||||
noise = (2.*noise - Complex(1,1))*nrm;
|
||||
|
||||
LatticeCoordinate(t,Tdir);
|
||||
noise = where(t==Integer(tslice), noise, zz);
|
||||
|
||||
source = 1.0;
|
||||
source = source*noise;
|
||||
std::cout << " Z2 wall " << norm2(source) << std::endl;
|
||||
}
|
||||
void GaugeFix(LatticeGaugeField &U,LatticeGaugeField &Ufix)
|
||||
{
|
||||
Real alpha=0.05;
|
||||
|
||||
Real plaq=WilsonLoops<GimplR>::avgPlaquette(U);
|
||||
|
||||
std::cout << " Initial plaquette "<<plaq << std::endl;
|
||||
|
||||
LatticeColourMatrix xform(U.Grid());
|
||||
Ufix = U;
|
||||
int orthog=Nd-1;
|
||||
FourierAcceleratedGaugeFixer<GimplR>::SteepestDescentGaugeFix(Ufix,xform,alpha,100000,1.0e-14, 1.0e-14,true,orthog);
|
||||
|
||||
plaq=WilsonLoops<GimplR>::avgPlaquette(Ufix);
|
||||
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
}
|
||||
template<class Field>
|
||||
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
|
||||
{
|
||||
typedef CovariantLaplacianCshift <GimplR,Field> Laplacian_t;
|
||||
Laplacian_t Laplacian(U);
|
||||
|
||||
Integer Iterations = 40;
|
||||
Real width = 2.0;
|
||||
Real coeff = (width*width) / Real(4*Iterations);
|
||||
|
||||
Field tmp(U.Grid());
|
||||
smeared=unsmeared;
|
||||
// chi = (1-p^2/2N)^N kronecker
|
||||
for(int n = 0; n < Iterations; ++n) {
|
||||
Laplacian.M(smeared,tmp);
|
||||
smeared = smeared - coeff*tmp;
|
||||
std::cout << " smear iter " << n<<" " <<norm2(smeared)<<std::endl;
|
||||
}
|
||||
}
|
||||
void GaussianSource(Coordinate &site,LatticeGaugeField &U,LatticePropagator &source)
|
||||
{
|
||||
LatticePropagator tmp(source.Grid());
|
||||
PointSource(site,source);
|
||||
std::cout << " GaussianSource Kronecker "<< norm2(source)<<std::endl;
|
||||
tmp = source;
|
||||
GaussianSmear(U,tmp,source);
|
||||
std::cout << " GaussianSource Smeared "<< norm2(source)<<std::endl;
|
||||
}
|
||||
void GaussianWallSource(GridParallelRNG &RNG,int tslice,LatticeGaugeField &U,LatticePropagator &source)
|
||||
{
|
||||
Z2WallSource(RNG,tslice,source);
|
||||
auto tmp = source;
|
||||
GaussianSmear(U,tmp,source);
|
||||
}
|
||||
void SequentialSource(int tslice,Coordinate &mom,LatticePropagator &spectator,LatticePropagator &source)
|
||||
{
|
||||
assert(mom.size()==Nd);
|
||||
assert(mom[Tdir] == 0);
|
||||
|
||||
GridBase * grid = spectator.Grid();
|
||||
|
||||
LatticeInteger ts(grid);
|
||||
LatticeCoordinate(ts,Tdir);
|
||||
source = Zero();
|
||||
source = where(ts==Integer(tslice),spectator,source); // Stick in a slice of the spectator, zero everywhere else
|
||||
|
||||
LatticeComplex phase(grid);
|
||||
MakePhase(mom,phase);
|
||||
|
||||
source = source *phase;
|
||||
}
|
||||
template<class Action>
|
||||
void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
|
||||
{
|
||||
GridBase *UGrid = D.GaugeGrid();
|
||||
GridBase *FGrid = D.FermionGrid();
|
||||
|
||||
LatticeFermion src4 (UGrid);
|
||||
LatticeFermion src5 (FGrid);
|
||||
LatticeFermion result5(FGrid);
|
||||
LatticeFermion result4(UGrid);
|
||||
|
||||
ConjugateGradient<LatticeFermion> CG(1.0e-12,100000);
|
||||
SchurRedBlackDiagTwoSolve<LatticeFermion> schur(CG);
|
||||
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
|
||||
for(int s=0;s<Nd;s++){
|
||||
for(int c=0;c<Nc;c++){
|
||||
PropToFerm<Action>(src4,source,s,c);
|
||||
|
||||
D.ImportPhysicalFermionSource(src4,src5);
|
||||
|
||||
result5=Zero();
|
||||
schur(D,src5,result5,ZG);
|
||||
std::cout<<GridLogMessage
|
||||
<<"spin "<<s<<" color "<<c
|
||||
<<" norm2(src5d) " <<norm2(src5)
|
||||
<<" norm2(result5d) "<<norm2(result5)<<std::endl;
|
||||
|
||||
D.ExportPhysicalFermionSolution(result5,result4);
|
||||
|
||||
FermToProp<Action>(propagator,result4,s,c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class MesonFile: Serializable {
|
||||
public:
|
||||
GRID_SERIALIZABLE_CLASS_MEMBERS(MesonFile, std::vector<std::vector<Complex> >, data);
|
||||
};
|
||||
|
||||
void MesonTrace(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
|
||||
{
|
||||
const int nchannel=4;
|
||||
Gamma::Algebra Gammas[nchannel][2] = {
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
|
||||
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
|
||||
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
|
||||
};
|
||||
|
||||
Gamma G5(Gamma::Algebra::Gamma5);
|
||||
|
||||
LatticeComplex meson_CF(q1.Grid());
|
||||
MesonFile MF;
|
||||
|
||||
for(int ch=0;ch<nchannel;ch++){
|
||||
|
||||
Gamma Gsrc(Gammas[ch][0]);
|
||||
Gamma Gsnk(Gammas[ch][1]);
|
||||
|
||||
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
|
||||
|
||||
std::vector<TComplex> meson_T;
|
||||
sliceSum(meson_CF,meson_T, Tdir);
|
||||
|
||||
int nt=meson_T.size();
|
||||
|
||||
std::vector<Complex> corr(nt);
|
||||
for(int t=0;t<nt;t++){
|
||||
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
|
||||
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
|
||||
}
|
||||
MF.data.push_back(corr);
|
||||
}
|
||||
|
||||
{
|
||||
XmlWriter WR(file);
|
||||
write(WR,"MesonFile",MF);
|
||||
}
|
||||
}
|
||||
|
||||
void Meson3pt(std::string file,LatticePropagator &q1,LatticePropagator &q2,LatticeComplex &phase)
|
||||
{
|
||||
const int nchannel=4;
|
||||
Gamma::Algebra Gammas[nchannel][2] = {
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaX},
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaY},
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaZ},
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaT}
|
||||
};
|
||||
|
||||
Gamma G5(Gamma::Algebra::Gamma5);
|
||||
|
||||
LatticeComplex meson_CF(q1.Grid());
|
||||
MesonFile MF;
|
||||
|
||||
for(int ch=0;ch<nchannel;ch++){
|
||||
|
||||
Gamma Gsrc(Gammas[ch][0]);
|
||||
Gamma Gsnk(Gammas[ch][1]);
|
||||
|
||||
meson_CF = trace(G5*adj(q1)*G5*Gsnk*q2*adj(Gsrc));
|
||||
|
||||
std::vector<TComplex> meson_T;
|
||||
sliceSum(meson_CF,meson_T, Tdir);
|
||||
|
||||
int nt=meson_T.size();
|
||||
|
||||
std::vector<Complex> corr(nt);
|
||||
for(int t=0;t<nt;t++){
|
||||
corr[t] = TensorRemove(meson_T[t]); // Yes this is ugly, not figured a work around
|
||||
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
|
||||
}
|
||||
MF.data.push_back(corr);
|
||||
}
|
||||
|
||||
{
|
||||
XmlWriter WR(file);
|
||||
write(WR,"MesonFile",MF);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void WallSinkMesonTrace(std::string file,std::vector<Propagator> &q1,std::vector<Propagator> &q2)
|
||||
{
|
||||
const int nchannel=4;
|
||||
Gamma::Algebra Gammas[nchannel][2] = {
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::Gamma5},
|
||||
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::GammaTGamma5},
|
||||
{Gamma::Algebra::GammaTGamma5,Gamma::Algebra::Gamma5},
|
||||
{Gamma::Algebra::Gamma5 ,Gamma::Algebra::GammaTGamma5}
|
||||
};
|
||||
|
||||
Gamma G5(Gamma::Algebra::Gamma5);
|
||||
int nt=q1.size();
|
||||
std::vector<Complex> meson_CF(nt);
|
||||
MesonFile MF;
|
||||
|
||||
for(int ch=0;ch<nchannel;ch++){
|
||||
|
||||
Gamma Gsrc(Gammas[ch][0]);
|
||||
Gamma Gsnk(Gammas[ch][1]);
|
||||
|
||||
std::vector<Complex> corr(nt);
|
||||
for(int t=0;t<nt;t++){
|
||||
meson_CF[t] = trace(G5*adj(q1[t])*G5*Gsnk*q2[t]*adj(Gsrc));
|
||||
corr[t] = TensorRemove(meson_CF[t]); // Yes this is ugly, not figured a work around
|
||||
std::cout << " channel "<<ch<<" t "<<t<<" " <<corr[t]<<std::endl;
|
||||
}
|
||||
MF.data.push_back(corr);
|
||||
}
|
||||
|
||||
{
|
||||
XmlWriter WR(file);
|
||||
write(WR,"MesonFile",MF);
|
||||
}
|
||||
}
|
||||
int make_idx(int p, int m,int nmom)
|
||||
{
|
||||
if (m==0) return p;
|
||||
assert(p==0);
|
||||
return nmom + m - 1;
|
||||
}
|
||||
|
||||
int main (int argc, char ** argv)
|
||||
{
|
||||
Grid_init(&argc,&argv);
|
||||
|
||||
// Double precision grids
|
||||
auto latt = GridDefaultLatt();
|
||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(),
|
||||
GridDefaultSimd(Nd,vComplex::Nsimd()),
|
||||
GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
LatticeGaugeField Utmp(UGrid);
|
||||
LatticeGaugeField Usmr(UGrid);
|
||||
std::string config;
|
||||
if( argc > 1 && argv[1][0] != '-' )
|
||||
{
|
||||
std::cout<<GridLogMessage <<"Loading configuration from "<<argv[1]<<std::endl;
|
||||
FieldMetaData header;
|
||||
NerscIO::readConfiguration(Umu, header, argv[1]);
|
||||
config=argv[1];
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
|
||||
SU<Nc>::ColdConfiguration(Umu);
|
||||
config="ColdConfig";
|
||||
}
|
||||
// GaugeFix(Umu,Utmp);
|
||||
// Umu=Utmp;
|
||||
|
||||
int nsmr=3;
|
||||
RealD rho=0.1;
|
||||
LinkSmear(nsmr,rho,Umu,Usmr);
|
||||
|
||||
|
||||
std::vector<int> smeared_link({ 0,0,1} );
|
||||
std::vector<RealD> masses({ 0.004,0.02477,0.447} ); // u/d, s, c ??
|
||||
std::vector<RealD> M5s ({ 1.8,1.8,1.0} );
|
||||
std::vector<RealD> bs ({ 1.0,1.0,1.5} ); // DDM
|
||||
std::vector<RealD> cs ({ 0.0,0.0,0.5} ); // DDM
|
||||
std::vector<int> Ls_s ({ 16,16,12} );
|
||||
std::vector<GridCartesian *> FGrids;
|
||||
std::vector<GridRedBlackCartesian *> FrbGrids;
|
||||
|
||||
std::vector<Coordinate> momenta;
|
||||
momenta.push_back(Coordinate({0,0,0,0}));
|
||||
momenta.push_back(Coordinate({1,0,0,0}));
|
||||
momenta.push_back(Coordinate({2,0,0,0}));
|
||||
|
||||
int nmass = masses.size();
|
||||
int nmom = momenta.size();
|
||||
|
||||
std::vector<MobiusFermionR *> FermActs;
|
||||
|
||||
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
|
||||
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
typedef MobiusFermionR FermionAction;
|
||||
FermionAction::ImplParams Params(boundary);
|
||||
|
||||
for(int m=0;m<masses.size();m++) {
|
||||
|
||||
RealD mass = masses[m];
|
||||
RealD M5 = M5s[m];
|
||||
RealD b = bs[m];
|
||||
RealD c = cs[m];
|
||||
int Ls = Ls_s[m];
|
||||
|
||||
if ( smeared_link[m] ) Utmp = Usmr;
|
||||
else Utmp = Umu;
|
||||
|
||||
FGrids.push_back(SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid));
|
||||
FrbGrids.push_back(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid));
|
||||
|
||||
FermActs.push_back(new MobiusFermionR(Utmp,*FGrids[m],*FrbGrids[m],*UGrid,*UrbGrid,mass,M5,b,c,Params));
|
||||
}
|
||||
|
||||
LatticePropagator z2wall_source(UGrid);
|
||||
LatticePropagator gfwall_source(UGrid);
|
||||
LatticePropagator phased_prop(UGrid);
|
||||
|
||||
int tslice = 0;
|
||||
int tseq=(tslice+16)%latt[Nd-1];
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// RNG seeded for Z2 wall
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// You can manage seeds however you like.
|
||||
// Recommend SeedUniqueString.
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString("Study2-Source_Z2_p_0_0_0_t_0-880");
|
||||
Z2WallSource (RNG4,tslice,z2wall_source);
|
||||
GFWallSource (tslice,gfwall_source);
|
||||
|
||||
std::vector<LatticeComplex> phase(nmom,UGrid);
|
||||
for(int m=0;m<nmom;m++){
|
||||
MakePhase(momenta[m],phase[m]);
|
||||
}
|
||||
|
||||
std::vector<LatticePropagator> Z2Props (nmom+nmass-1,UGrid);
|
||||
std::vector<LatticePropagator> GFProps (nmom+nmass-1,UGrid);
|
||||
for(int p=0;p<nmom;p++) {
|
||||
int m=0;
|
||||
int idx = make_idx(p,m,nmom);
|
||||
phased_prop = z2wall_source * phase[p];
|
||||
Solve(*FermActs[m],phased_prop ,Z2Props[idx]);
|
||||
|
||||
phased_prop = gfwall_source * phase[p];
|
||||
Solve(*FermActs[m],phased_prop ,GFProps[idx]);
|
||||
}
|
||||
for(int m=1;m<nmass;m++) {
|
||||
int p=0;
|
||||
int idx = make_idx(p,m,nmom);
|
||||
phased_prop = z2wall_source;
|
||||
Solve(*FermActs[m],phased_prop ,Z2Props[idx]);
|
||||
|
||||
phased_prop = gfwall_source;
|
||||
Solve(*FermActs[m],phased_prop ,GFProps[idx]);
|
||||
}
|
||||
|
||||
std::vector<std::vector<Propagator> > wsnk_z2Props(nmom+nmass-1);
|
||||
std::vector<std::vector<Propagator> > wsnk_gfProps(nmom+nmass-1);
|
||||
|
||||
// Non-zero kaon and point and D two point
|
||||
// WW stick momentum on m1 (lighter)
|
||||
// zero momentum on m2
|
||||
for(int m1=0;m1<nmass;m1++) {
|
||||
for(int m2=m1;m2<nmass;m2++) {
|
||||
int pmax = (m1==0)? nmom:1;
|
||||
for(int p=0;p<pmax;p++){
|
||||
|
||||
std::stringstream ssg,ssz;
|
||||
std::stringstream wssg,wssz;
|
||||
|
||||
int idx1 = make_idx(p,m1,nmom);
|
||||
int idx2 = make_idx(0,m2,nmom);
|
||||
|
||||
/// Point sinks
|
||||
ssg<<config<<"_p"<<p<< "_m" << m1 << "_m"<< m2 << "_p_gf_meson.xml";
|
||||
ssz<<config<<"_p"<<p<< "_m" << m1 << "_m"<< m2 << "_p_z2_meson.xml";
|
||||
MesonTrace(ssz.str(),Z2Props[idx1],Z2Props[idx2],phase[p]); // Q1 is conjugated
|
||||
MesonTrace(ssg.str(),GFProps[idx1],GFProps[idx2],phase[p]);
|
||||
|
||||
/// Wall sinks
|
||||
wssg<<config<<"_p"<<p<< "_m" << m1 << "_m"<< m2 << "_w_gf_meson.xml";
|
||||
wssz<<config<<"_p"<<p<< "_m" << m1 << "_m"<< m2 << "_w_z2_meson.xml";
|
||||
|
||||
phased_prop = GFProps[m2] * phase[p];
|
||||
sliceSum(phased_prop,wsnk_gfProps[m1],Tdir);
|
||||
sliceSum(GFProps[m1],wsnk_gfProps[m2],Tdir);
|
||||
WallSinkMesonTrace(wssg.str(),wsnk_gfProps[m1],wsnk_gfProps[m2]);
|
||||
|
||||
phased_prop = Z2Props[m2] * phase[p];
|
||||
sliceSum(phased_prop,wsnk_gfProps[m1],Tdir);
|
||||
sliceSum(Z2Props[m1],wsnk_gfProps[m2],Tdir);
|
||||
WallSinkMesonTrace(wssz.str(),wsnk_z2Props[m1],wsnk_z2Props[m2]);
|
||||
}
|
||||
}}
|
||||
|
||||
|
||||
/////////////////////////////////////
|
||||
// Sequential solves
|
||||
/////////////////////////////////////
|
||||
LatticePropagator seq_wsnk_z2src(UGrid);
|
||||
LatticePropagator seq_wsnk_gfsrc(UGrid);
|
||||
LatticePropagator seq_psnk_z2src(UGrid);
|
||||
LatticePropagator seq_psnk_gfsrc(UGrid);
|
||||
LatticePropagator source(UGrid);
|
||||
for(int m=0;m<nmass-1;m++){
|
||||
int spect_idx = make_idx(0,m,nmom);
|
||||
int charm=nmass-1;
|
||||
|
||||
SequentialSource(tseq,momenta[0],GFProps[spect_idx],source);
|
||||
Solve(*FermActs[charm],source,seq_psnk_gfsrc);
|
||||
|
||||
SequentialSource(tseq,momenta[0],Z2Props[spect_idx],source);
|
||||
Solve(*FermActs[charm],source,seq_psnk_z2src);
|
||||
|
||||
// Todo need wall sequential solve
|
||||
for(int p=0;p<nmom;p++){
|
||||
int active_idx = make_idx(p,0,nmom);
|
||||
std::stringstream seq_3pt_p_z2;
|
||||
std::stringstream seq_3pt_p_gf;
|
||||
std::stringstream seq_3pt_w_z2;
|
||||
std::stringstream seq_3pt_w_gf;
|
||||
seq_3pt_p_z2 <<config<<"_3pt_p"<<p<< "_m" << m << "_p_z2_meson.xml";
|
||||
seq_3pt_p_gf <<config<<"_3pt_p"<<p<< "_m" << m << "_p_gf_meson.xml";
|
||||
seq_3pt_w_z2 <<config<<"_3pt_p"<<p<< "_m" << m << "_w_z2_meson.xml";
|
||||
seq_3pt_w_gf <<config<<"_3pt_p"<<p<< "_m" << m << "_w_gf_meson.xml";
|
||||
Meson3pt(seq_3pt_p_gf.str(),GFProps[active_idx],seq_psnk_gfsrc,phase[p]);
|
||||
Meson3pt(seq_3pt_p_z2.str(),Z2Props[active_idx],seq_psnk_z2src,phase[p]);
|
||||
}
|
||||
}
|
||||
|
||||
Grid_finalize();
|
||||
}
|
||||
|
||||
|
||||
|
@ -9,6 +9,7 @@ using namespace std;
|
||||
using namespace Grid;
|
||||
typedef SpinColourMatrix Propagator;
|
||||
typedef SpinColourVector Fermion;
|
||||
typedef PeriodicGimplR GimplR;
|
||||
|
||||
template<class Gimpl,class Field> class CovariantLaplacianCshift : public SparseMatrixBase<Field>
|
||||
{
|
||||
@ -55,6 +56,16 @@ void MakePhase(Coordinate mom,LatticeComplex &phase)
|
||||
}
|
||||
phase = exp(phase*ci);
|
||||
}
|
||||
void LinkSmear(int nstep, RealD rho,LatticeGaugeField &Uin,LatticeGaugeField &Usmr)
|
||||
{
|
||||
Smear_Stout<GimplR> Stout(rho);
|
||||
LatticeGaugeField Utmp(Uin.Grid());
|
||||
Utmp = Uin;
|
||||
for(int i=0;i<nstep;i++){
|
||||
Stout.smear(Usmr,Utmp);
|
||||
Utmp = Usmr;
|
||||
}
|
||||
}
|
||||
void PointSource(Coordinate &coor,LatticePropagator &source)
|
||||
{
|
||||
// Coordinate coor({0,0,0,0});
|
||||
@ -97,23 +108,23 @@ void GaugeFix(LatticeGaugeField &U,LatticeGaugeField &Ufix)
|
||||
{
|
||||
Real alpha=0.05;
|
||||
|
||||
Real plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(U);
|
||||
Real plaq=WilsonLoops<GimplR>::avgPlaquette(U);
|
||||
|
||||
std::cout << " Initial plaquette "<<plaq << std::endl;
|
||||
|
||||
LatticeColourMatrix xform(U.Grid());
|
||||
Ufix = U;
|
||||
int orthog=Nd-1;
|
||||
FourierAcceleratedGaugeFixer<PeriodicGimplR>::SteepestDescentGaugeFix(Ufix,xform,alpha,10000,1.0e-12, 1.0e-12,true,orthog);
|
||||
FourierAcceleratedGaugeFixer<GimplR>::SteepestDescentGaugeFix(Ufix,xform,alpha,100000,1.0e-14, 1.0e-14,true,orthog);
|
||||
|
||||
plaq=WilsonLoops<PeriodicGimplR>::avgPlaquette(Ufix);
|
||||
plaq=WilsonLoops<GimplR>::avgPlaquette(Ufix);
|
||||
|
||||
std::cout << " Final plaquette "<<plaq << std::endl;
|
||||
}
|
||||
template<class Field>
|
||||
void GaussianSmear(LatticeGaugeField &U,Field &unsmeared,Field &smeared)
|
||||
{
|
||||
typedef CovariantLaplacianCshift <PeriodicGimplR,Field> Laplacian_t;
|
||||
typedef CovariantLaplacianCshift <GimplR,Field> Laplacian_t;
|
||||
Laplacian_t Laplacian(U);
|
||||
|
||||
Integer Iterations = 40;
|
||||
@ -167,19 +178,21 @@ void Solve(Action &D,LatticePropagator &source,LatticePropagator &propagator)
|
||||
GridBase *UGrid = D.GaugeGrid();
|
||||
GridBase *FGrid = D.FermionGrid();
|
||||
|
||||
LatticeFermion src4 (UGrid);
|
||||
LatticeFermion src4 (UGrid); src4 = Zero();
|
||||
LatticeFermion src5 (FGrid);
|
||||
LatticeFermion result5(FGrid);
|
||||
LatticeFermion result4(UGrid);
|
||||
|
||||
ConjugateGradient<LatticeFermion> CG(1.0e-8,100000);
|
||||
SchurRedBlackDiagMooeeSolve<LatticeFermion> schur(CG);
|
||||
ConjugateGradient<LatticeFermion> CG(1.0e-12,100000);
|
||||
SchurRedBlackDiagTwoSolve<LatticeFermion> schur(CG);
|
||||
ZeroGuesser<LatticeFermion> ZG; // Could be a DeflatedGuesser if have eigenvectors
|
||||
std::cout<<GridLogMessage<< " source4 "<<norm2(source)<<std::endl;
|
||||
for(int s=0;s<Nd;s++){
|
||||
for(int c=0;c<Nc;c++){
|
||||
PropToFerm<Action>(src4,source,s,c);
|
||||
|
||||
std::cout<<GridLogMessage<< s<<c<<" src4 "<<norm2(src4)<<std::endl;
|
||||
D.ImportPhysicalFermionSource(src4,src5);
|
||||
std::cout<<GridLogMessage<< s<<c<<" src5 "<<norm2(src5)<<std::endl;
|
||||
|
||||
result5=Zero();
|
||||
schur(D,src5,result5,ZG);
|
||||
@ -287,15 +300,10 @@ int main (int argc, char ** argv)
|
||||
GridDefaultMpi());
|
||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// You can manage seeds however you like.
|
||||
// Recommend SeedUniqueString.
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
std::vector<int> seeds4({1,2,3,4});
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedFixedIntegers(seeds4);
|
||||
|
||||
LatticeGaugeField Umu(UGrid);
|
||||
LatticeGaugeField Ufixed(UGrid);
|
||||
LatticeGaugeField Utmp(UGrid);
|
||||
LatticeGaugeField Usmr(UGrid);
|
||||
std::string config;
|
||||
if( argc > 1 && argv[1][0] != '-' )
|
||||
{
|
||||
@ -308,13 +316,20 @@ int main (int argc, char ** argv)
|
||||
{
|
||||
std::cout<<GridLogMessage <<"Using hot configuration"<<std::endl;
|
||||
SU<Nc>::ColdConfiguration(Umu);
|
||||
// SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||
config="HotConfig";
|
||||
config="ColdConfig";
|
||||
}
|
||||
GaugeFix(Umu,Ufixed);
|
||||
Umu=Ufixed;
|
||||
|
||||
// GaugeFix(Umu,Utmp);
|
||||
// Umu=Utmp;
|
||||
|
||||
int nsmr=3;
|
||||
RealD rho=0.1;
|
||||
RealD plaq_gf =WilsonLoops<GimplR>::avgPlaquette(Umu);
|
||||
LinkSmear(nsmr,rho,Umu,Usmr);
|
||||
RealD plaq_smr=WilsonLoops<GimplR>::avgPlaquette(Usmr);
|
||||
std::cout << GridLogMessage << " GF Plaquette " <<plaq_gf<<std::endl;
|
||||
std::cout << GridLogMessage << " SM Plaquette " <<plaq_smr<<std::endl;
|
||||
|
||||
std::vector<int> smeared_link({ 0,0,1} );
|
||||
std::vector<RealD> masses({ 0.004,0.02477,0.447} ); // u/d, s, c ??
|
||||
std::vector<RealD> M5s ({ 1.8,1.8,1.0} );
|
||||
std::vector<RealD> bs ({ 1.0,1.0,1.5} ); // DDM
|
||||
@ -330,6 +345,9 @@ int main (int argc, char ** argv)
|
||||
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||
std::cout<<GridLogMessage <<"MobiusFermion action as Scaled Shamir kernel"<<std::endl;
|
||||
std::cout<<GridLogMessage <<"======================"<<std::endl;
|
||||
std::vector<Complex> boundary = {1,1,1,-1};
|
||||
typedef MobiusFermionR FermionAction;
|
||||
FermionAction::ImplParams Params(boundary);
|
||||
|
||||
for(int m=0;m<masses.size();m++) {
|
||||
|
||||
@ -339,30 +357,40 @@ int main (int argc, char ** argv)
|
||||
RealD c = cs[m];
|
||||
int Ls = Ls_s[m];
|
||||
|
||||
if ( smeared_link[m] ) Utmp = Usmr;
|
||||
else Utmp = Umu;
|
||||
|
||||
FGrids.push_back(SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid));
|
||||
FrbGrids.push_back(SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid));
|
||||
|
||||
FermActs.push_back(new MobiusFermionR(Umu,*FGrids[m],*FrbGrids[m],*UGrid,*UrbGrid,mass,M5,b,c));
|
||||
FermActs.push_back(new MobiusFermionR(Utmp,*FGrids[m],*FrbGrids[m],*UGrid,*UrbGrid,mass,M5,b,c,Params));
|
||||
}
|
||||
|
||||
LatticePropagator point_source(UGrid);
|
||||
LatticePropagator z2wall_source(UGrid);
|
||||
LatticePropagator gfwall_source(UGrid);
|
||||
|
||||
Coordinate Origin({0,0,0,0});
|
||||
PointSource (Origin,point_source);
|
||||
Z2WallSource (RNG4,0,z2wall_source);
|
||||
GFWallSource (0,gfwall_source);
|
||||
|
||||
std::vector<LatticePropagator> PointProps(nmass,UGrid);
|
||||
std::vector<LatticePropagator> GaussProps(nmass,UGrid);
|
||||
int tslice = 0;
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// RNG seeded for Z2 wall
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// You can manage seeds however you like.
|
||||
// Recommend SeedUniqueString.
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString("Study2-Source_Z2_p_0_0_0_t_0-880");
|
||||
Z2WallSource (RNG4,tslice,z2wall_source);
|
||||
GFWallSource (tslice,gfwall_source);
|
||||
|
||||
std::vector<LatticePropagator> Z2Props (nmass,UGrid);
|
||||
std::vector<LatticePropagator> GFProps (nmass,UGrid);
|
||||
|
||||
for(int m=0;m<nmass;m++) {
|
||||
|
||||
std::cout << GridLogMessage << " Mass " <<m << " z2wall source "<<norm2(z2wall_source)<<std::endl;
|
||||
Solve(*FermActs[m],z2wall_source ,Z2Props[m]);
|
||||
std::cout << GridLogMessage << " Mass " <<m << " gfwall source "<<norm2(gfwall_source)<<std::endl;
|
||||
Solve(*FermActs[m],gfwall_source ,GFProps[m]);
|
||||
|
||||
std::cout << GridLogMessage << " Mass " <<m << " z2wall source "<<norm2(z2wall_source)<< " " << norm2(gfwall_source)<<std::endl;
|
||||
|
||||
}
|
||||
|
||||
@ -383,14 +411,15 @@ int main (int argc, char ** argv)
|
||||
std::stringstream wssg,wssz;
|
||||
|
||||
/// Point sinks
|
||||
ssg<<config<< "_m" << m1 << "_m"<< m2 << "p_gf_meson.xml";
|
||||
ssz<<config<< "_m" << m1 << "_m"<< m2 << "p_z2_meson.xml";
|
||||
ssg<<config<< "_m" << m1 << "_m"<< m2 << "_p_gf_meson.xml";
|
||||
ssz<<config<< "_m" << m1 << "_m"<< m2 << "_p_z2_meson.xml";
|
||||
|
||||
MesonTrace(ssz.str(),Z2Props[m1],Z2Props[m2],phase);
|
||||
MesonTrace(ssg.str(),GFProps[m1],GFProps[m2],phase);
|
||||
|
||||
/// Wall sinks
|
||||
wssg<<config<< "_m" << m1 << "_m"<< m2 << "w_gf_meson.xml";
|
||||
wssz<<config<< "_m" << m1 << "_m"<< m2 << "w_z2_meson.xml";
|
||||
wssg<<config<< "_m" << m1 << "_m"<< m2 << "_w_gf_meson.xml";
|
||||
wssz<<config<< "_m" << m1 << "_m"<< m2 << "_w_z2_meson.xml";
|
||||
|
||||
WallSinkMesonTrace(wssg.str(),wsnk_gfProps[m1],wsnk_gfProps[m2]);
|
||||
WallSinkMesonTrace(wssz.str(),wsnk_z2Props[m1],wsnk_z2Props[m2]);
|
||||
|
12
systems/Crusher/config-command
Normal file
12
systems/Crusher/config-command
Normal file
@ -0,0 +1,12 @@
|
||||
../../configure --enable-comms=mpi-auto \
|
||||
--enable-unified=no \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=hip \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-simd=GPU \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC -I/opt/rocm-4.5.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||
LDFLAGS=" -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa "
|
||||
HIPFLAGS = --amdgpu-target=gfx90a
|
30
systems/Crusher/dwf.slurm
Normal file
30
systems/Crusher/dwf.slurm
Normal file
@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 8
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 32.32.32.256 --mpi 1.1.1.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -n8 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
||||
|
||||
|
27
systems/Crusher/dwf4.slurm
Normal file
27
systems/Crusher/dwf4.slurm
Normal file
@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 4
|
||||
#SBATCH --exclusive
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=4
|
||||
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads 8 --grid 32.32.64.64 --mpi 1.1.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
|
||||
srun --gpus-per-task 1 -n4 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
||||
|
48
systems/Crusher/dwf8.slurm
Normal file
48
systems/Crusher/dwf8.slurm
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 8
|
||||
#SBATCH -n 64
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export OMP_NUM_THREADS=1
|
||||
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.256 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.256.8node
|
||||
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node.shm0
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node
|
||||
|
||||
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
echo $PARAMS
|
||||
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node_shm0
|
||||
|
||||
|
||||
|
13
systems/Crusher/mpiwrapper.sh
Executable file
13
systems/Crusher/mpiwrapper.sh
Executable file
@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
lrank=$SLURM_LOCALID
|
||||
lgpu=(0 1 2 3 7 6 5 4)
|
||||
|
||||
export ROCR_VISIBLE_DEVICES=${lgpu[$lrank]}
|
||||
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES "
|
||||
|
||||
$*
|
||||
|
||||
|
||||
|
5
systems/Crusher/sourceme.sh
Normal file
5
systems/Crusher/sourceme.sh
Normal file
@ -0,0 +1,5 @@
|
||||
module load PrgEnv-gnu
|
||||
module load rocm/4.5.0
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
module load craype-accel-amd-gfx90a
|
26
systems/Spock/comms.slurm
Normal file
26
systems/Spock/comms.slurm
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J comms
|
||||
#SBATCH -o comms.%J
|
||||
#SBATCH -e comms.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 2
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 64.64.32.32 --mpi 2.1.1.1 "
|
||||
srun -n2 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_comms_host_device $PARAMS
|
||||
|
14
systems/Spock/config-command
Normal file
14
systems/Spock/config-command
Normal file
@ -0,0 +1,14 @@
|
||||
../../configure --enable-comms=mpi-auto \
|
||||
--enable-unified=no \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=hip \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-simd=GPU \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
--with-gmp=$OLCF_GMP_ROOT \
|
||||
--with-mpfr=/opt/cray/pe/gcc/mpfr/3.1.4/ \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC -I/opt/rocm-4.3.0/include/ -std=c++14 -I${MPICH_DIR}/include " \
|
||||
--prefix=/ccs/home/chulwoo/Grid \
|
||||
LDFLAGS=" -L${MPICH_DIR}/lib -lmpi -L${CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa "
|
26
systems/Spock/dwf.slurm
Normal file
26
systems/Spock/dwf.slurm
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 1
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 32.32.32.32 --mpi 1.1.1.1 --comms-overlap"
|
||||
srun -n1 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
26
systems/Spock/dwf4.slurm
Normal file
26
systems/Spock/dwf4.slurm
Normal file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 01:00:00
|
||||
##SBATCH -U openmpThu
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 1
|
||||
#SBATCH -n 4
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 32.32.64.64 --mpi 1.1.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -n4 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||
|
24
systems/Spock/dwf8.slurm
Normal file
24
systems/Spock/dwf8.slurm
Normal file
@ -0,0 +1,24 @@
|
||||
#!/bin/bash
|
||||
# Begin LSF Directives
|
||||
#SBATCH -A LGT104
|
||||
#SBATCH -t 3:00:00
|
||||
#SBATCH -p ecp
|
||||
#SBATCH -J DWF
|
||||
#SBATCH -o DWF.%J
|
||||
#SBATCH -e DWF.%J
|
||||
#SBATCH -N 2
|
||||
#SBATCH -n 8
|
||||
|
||||
DIR=.
|
||||
module list
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
|
||||
export OMP_NUM_THREADS=8
|
||||
|
||||
AT=8
|
||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||
PARAMS=" --accelerator-threads ${AT} --grid 16.16.16.48 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||
srun -N2 -n8 --label -c$OMP_NUM_THREADS --gpus-per-task=1 ./mpiwrapper.sh ./HMC/Mobius2p1f_DD_RHMC $PARAMS
|
||||
|
12
systems/Spock/mpiwrapper.sh
Executable file
12
systems/Spock/mpiwrapper.sh
Executable file
@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
lrank=$SLURM_LOCALID
|
||||
|
||||
export ROCR_VISIBLE_DEVICES=$SLURM_LOCALID
|
||||
|
||||
echo "`hostname` - $lrank device=$ROCR_VISIBLE_DEVICES binding=$BINDING"
|
||||
|
||||
$*
|
||||
|
||||
|
||||
|
9
systems/Spock/sourceme.sh
Normal file
9
systems/Spock/sourceme.sh
Normal file
@ -0,0 +1,9 @@
|
||||
module load emacs
|
||||
module load PrgEnv-gnu
|
||||
module load rocm/4.5.0
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
module load craype-accel-amd-gfx908
|
||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/gcc/mpfr/3.1.4/lib/:$LD_LIBRARY_PATH
|
179
systems/Summit/comms.4node
Normal file
179
systems/Summit/comms.4node
Normal file
@ -0,0 +1,179 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: Tesla V100-SXM2-16GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 16911433728
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 4
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: rank 0 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 0 device 0 bus id: 0004:04:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 24
|
||||
SharedMemoryMpi: Node communicator of size 6
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 1073741824bytes at 0x200060000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=7cb1ff7395a5833ded6526c43891bd07a0436290: (HEAD -> develop, origin/develop, origin/HEAD) clean
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 1073741824 byte stencil comms buffers
|
||||
AcceleratorCudaInit: rank 1 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 1 device 1 bus id: 0004:05:00.0
|
||||
AcceleratorCudaInit: rank 2 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 2 device 2 bus id: 0004:06:00.0
|
||||
AcceleratorCudaInit: rank 5 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 5 device 5 bus id: 0035:05:00.0
|
||||
AcceleratorCudaInit: rank 4 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 4 device 4 bus id: 0035:04:00.0
|
||||
AcceleratorCudaInit: rank 3 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 3 device 3 bus id: 0035:03:00.0
|
||||
Grid : Message : MemoryManager Cache 13529146982 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 2.137929 s : Grid is setup to use 6 threads
|
||||
Grid : Message : 2.137941 s : Number of iterations to average: 250
|
||||
Grid : Message : 2.137950 s : ====================================================================================================
|
||||
Grid : Message : 2.137958 s : = Benchmarking sequential halo exchange from host memory
|
||||
Grid : Message : 2.137966 s : ====================================================================================================
|
||||
Grid : Message : 2.137974 s : L Ls bytes MB/s uni MB/s bidi
|
||||
AcceleratorCudaInit: rank 22 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 10 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 15 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 21 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 20 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 7 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 9 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 11 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 8 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 6 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 19 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 23 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 18 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 12 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 16 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 13 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 14 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 17 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
Grid : Message : 2.604949 s : 8 8 393216 89973.9 179947.8
|
||||
Grid : Message : 2.668249 s : 8 8 393216 18650.3 37300.5
|
||||
Grid : Message : 2.732288 s : 8 8 393216 18428.5 36857.1
|
||||
Grid : Message : 2.753565 s : 8 8 393216 55497.2 110994.4
|
||||
Grid : Message : 2.808960 s : 12 8 1327104 100181.5 200363.0
|
||||
Grid : Message : 3.226900 s : 12 8 1327104 20600.5 41201.0
|
||||
Grid : Message : 3.167459 s : 12 8 1327104 24104.6 48209.2
|
||||
Grid : Message : 3.227660 s : 12 8 1327104 66156.7 132313.5
|
||||
Grid : Message : 3.413570 s : 16 8 3145728 56174.4 112348.8
|
||||
Grid : Message : 3.802697 s : 16 8 3145728 24255.9 48511.7
|
||||
Grid : Message : 4.190498 s : 16 8 3145728 24336.7 48673.4
|
||||
Grid : Message : 4.385171 s : 16 8 3145728 48484.1 96968.2
|
||||
Grid : Message : 4.805284 s : 20 8 6144000 46380.5 92761.1
|
||||
Grid : Message : 5.562975 s : 20 8 6144000 24328.5 48656.9
|
||||
Grid : Message : 6.322562 s : 20 8 6144000 24266.7 48533.4
|
||||
Grid : Message : 6.773598 s : 20 8 6144000 40868.5 81736.9
|
||||
Grid : Message : 7.600999 s : 24 8 10616832 40198.3 80396.6
|
||||
Grid : Message : 8.912917 s : 24 8 10616832 24279.5 48559.1
|
||||
Grid : Message : 10.220961 s : 24 8 10616832 24350.2 48700.4
|
||||
Grid : Message : 11.728250 s : 24 8 10616832 37390.9 74781.8
|
||||
Grid : Message : 12.497258 s : 28 8 16859136 36792.2 73584.5
|
||||
Grid : Message : 14.585387 s : 28 8 16859136 24222.2 48444.3
|
||||
Grid : Message : 16.664783 s : 28 8 16859136 24323.4 48646.8
|
||||
Grid : Message : 17.955238 s : 28 8 16859136 39194.7 78389.4
|
||||
Grid : Message : 20.136479 s : 32 8 25165824 35718.3 71436.5
|
||||
Grid : Message : 23.241958 s : 32 8 25165824 24311.4 48622.9
|
||||
Grid : Message : 26.344810 s : 32 8 25165824 24331.9 48663.7
|
||||
Grid : Message : 28.384420 s : 32 8 25165824 37016.3 74032.7
|
||||
Grid : Message : 28.388879 s : ====================================================================================================
|
||||
Grid : Message : 28.388894 s : = Benchmarking sequential halo exchange from GPU memory
|
||||
Grid : Message : 28.388909 s : ====================================================================================================
|
||||
Grid : Message : 28.388924 s : L Ls bytes MB/s uni MB/s bidi
|
||||
Grid : Message : 28.553993 s : 8 8 393216 8272.4 16544.7
|
||||
Grid : Message : 28.679592 s : 8 8 393216 9395.4 18790.8
|
||||
Grid : Message : 28.811112 s : 8 8 393216 8971.0 17942.0
|
||||
Grid : Message : 28.843770 s : 8 8 393216 36145.6 72291.2
|
||||
Grid : Message : 28.981754 s : 12 8 1327104 49591.6 99183.2
|
||||
Grid : Message : 29.299764 s : 12 8 1327104 12520.8 25041.7
|
||||
Grid : Message : 29.620288 s : 12 8 1327104 12422.2 24844.4
|
||||
Grid : Message : 29.657645 s : 12 8 1327104 106637.5 213275.1
|
||||
Grid : Message : 29.952933 s : 16 8 3145728 43939.2 87878.5
|
||||
Grid : Message : 30.585411 s : 16 8 3145728 14922.1 29844.2
|
||||
Grid : Message : 31.219781 s : 16 8 3145728 14877.2 29754.4
|
||||
Grid : Message : 31.285017 s : 16 8 3145728 144724.3 289448.7
|
||||
Grid : Message : 31.706443 s : 20 8 6144000 54676.2 109352.4
|
||||
Grid : Message : 32.739205 s : 20 8 6144000 17848.0 35696.1
|
||||
Grid : Message : 33.771852 s : 20 8 6144000 17849.9 35699.7
|
||||
Grid : Message : 33.871981 s : 20 8 6144000 184141.4 368282.8
|
||||
Grid : Message : 34.536808 s : 24 8 10616832 55784.3 111568.6
|
||||
Grid : Message : 36.275648 s : 24 8 10616832 18317.6 36635.3
|
||||
Grid : Message : 37.997181 s : 24 8 10616832 18501.7 37003.4
|
||||
Grid : Message : 38.140442 s : 24 8 10616832 222383.9 444767.9
|
||||
Grid : Message : 39.177222 s : 28 8 16859136 56609.7 113219.4
|
||||
Grid : Message : 41.874755 s : 28 8 16859136 18749.9 37499.8
|
||||
Grid : Message : 44.529381 s : 28 8 16859136 19052.9 38105.8
|
||||
Grid : Message : 44.742192 s : 28 8 16859136 237717.1 475434.2
|
||||
Grid : Message : 46.184000 s : 32 8 25165824 57091.2 114182.4
|
||||
Grid : Message : 50.734740 s : 32 8 25165824 19411.0 38821.9
|
||||
Grid : Message : 53.931228 s : 32 8 25165824 19570.6 39141.2
|
||||
Grid : Message : 54.238467 s : 32 8 25165824 245765.6 491531.2
|
||||
Grid : Message : 54.268664 s : ====================================================================================================
|
||||
Grid : Message : 54.268680 s : = All done; Bye Bye
|
||||
Grid : Message : 54.268691 s : ====================================================================================================
|
14
systems/Summit/config-command
Normal file
14
systems/Summit/config-command
Normal file
@ -0,0 +1,14 @@
|
||||
../../configure --enable-comms=mpi \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=32 \
|
||||
--enable-unified=no \
|
||||
--enable-shm=nvlink \
|
||||
--disable-gparity \
|
||||
--enable-setdevice \
|
||||
--disable-fermion-reps \
|
||||
--enable-accelerator=cuda \
|
||||
--prefix /ccs/home/paboyle/prefix \
|
||||
CXX=nvcc \
|
||||
LDFLAGS=-L/ccs/home/paboyle/prefix/lib/ \
|
||||
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_70,code=sm_70 -I/ccs/home/paboyle/prefix/include/ -std=c++14"
|
||||
|
206
systems/Summit/dwf.24.4node
Normal file
206
systems/Summit/dwf.24.4node
Normal file
@ -0,0 +1,206 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: Tesla V100-SXM2-16GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 16911433728
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 4
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: rank 0 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 0 device 0 bus id: 0004:04:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 24
|
||||
SharedMemoryMpi: Node communicator of size 6
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x200080000000 for comms buffers
|
||||
AcceleratorCudaInit: rank 3 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 3 device 3 bus id: 0035:03:00.0
|
||||
AcceleratorCudaInit: rank 5 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 5 device 5 bus id: 0035:05:00.0
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
AcceleratorCudaInit: rank 4 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 4 device 4 bus id: 0035:04:00.0
|
||||
AcceleratorCudaInit: rank 1 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 1 device 1 bus id: 0004:05:00.0
|
||||
AcceleratorCudaInit: rank 2 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 2 device 2 bus id: 0004:06:00.0
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=7cb1ff7395a5833ded6526c43891bd07a0436290: (HEAD -> develop, origin/develop, origin/HEAD) clean
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 8388608000 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 1.731905 s : Grid Layout
|
||||
Grid : Message : 1.731915 s : Global lattice size : 48 48 48 72
|
||||
Grid : Message : 1.731928 s : OpenMP threads : 6
|
||||
Grid : Message : 1.731938 s : MPI tasks : 2 2 2 3
|
||||
AcceleratorCudaInit: rank 9 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 23 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 22 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 21 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 18 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 6 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 7 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 10 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 8 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 11 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 20 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 19 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 13 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 12 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 14 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 16 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 15 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 17 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
Grid : Message : 2.683494 s : Making s innermost grids
|
||||
Grid : Message : 2.780034 s : Initialising 4d RNG
|
||||
Grid : Message : 2.833099 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 2.833121 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.916841 s : Initialising 5d RNG
|
||||
Grid : Message : 3.762880 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 3.762902 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 5.264345 s : Initialised RNGs
|
||||
Grid : Message : 6.489904 s : Drawing gauge field
|
||||
Grid : Message : 6.729262 s : Random gauge initialised
|
||||
Grid : Message : 7.781273 s : Setting up Cshift based reference
|
||||
Grid : Message : 8.725313 s : *****************************************************************
|
||||
Grid : Message : 8.725332 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 8.725342 s : *****************************************************************
|
||||
Grid : Message : 8.725352 s : *****************************************************************
|
||||
Grid : Message : 8.725362 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 8.725372 s : * Vectorising space-time by 4
|
||||
Grid : Message : 8.725383 s : * VComplexF size is 32 B
|
||||
Grid : Message : 8.725395 s : * SINGLE precision
|
||||
Grid : Message : 8.725405 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 8.725415 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 8.725425 s : *****************************************************************
|
||||
Grid : Message : 9.465229 s : Called warmup
|
||||
Grid : Message : 58.646066 s : Called Dw 3000 times in 4.91764e+07 us
|
||||
Grid : Message : 58.646121 s : mflop/s = 1.02592e+07
|
||||
Grid : Message : 58.646134 s : mflop/s per rank = 427468
|
||||
Grid : Message : 58.646145 s : mflop/s per node = 2.56481e+06
|
||||
Grid : Message : 58.646156 s : RF GiB/s (base 2) = 20846.5
|
||||
Grid : Message : 58.646166 s : mem GiB/s (base 2) = 13029.1
|
||||
Grid : Message : 58.648008 s : norm diff 1.04778e-13
|
||||
Grid : Message : 58.734885 s : #### Dhop calls report
|
||||
Grid : Message : 58.734897 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 58.734909 s : WilsonFermion5D TotalTime /Calls : 8217.71 us
|
||||
Grid : Message : 58.734922 s : WilsonFermion5D CommTime /Calls : 7109.5 us
|
||||
Grid : Message : 58.734933 s : WilsonFermion5D FaceTime /Calls : 446.623 us
|
||||
Grid : Message : 58.734943 s : WilsonFermion5D ComputeTime1/Calls : 18.0558 us
|
||||
Grid : Message : 58.734953 s : WilsonFermion5D ComputeTime2/Calls : 731.097 us
|
||||
Grid : Message : 58.734979 s : Average mflops/s per call : 4.8157e+09
|
||||
Grid : Message : 58.734989 s : Average mflops/s per call per rank : 2.00654e+08
|
||||
Grid : Message : 58.734999 s : Average mflops/s per call per node : 1.20393e+09
|
||||
Grid : Message : 58.735008 s : Average mflops/s per call (full) : 1.04183e+07
|
||||
Grid : Message : 58.735017 s : Average mflops/s per call per rank (full): 434094
|
||||
Grid : Message : 58.735026 s : Average mflops/s per call per node (full): 2.60456e+06
|
||||
Grid : Message : 58.735035 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 58.735043 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 58.735051 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 58.735059 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 58.735067 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 58.735075 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 64.934380 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 64.934740 s : Called DwDag
|
||||
Grid : Message : 64.934870 s : norm dag result 12.0422
|
||||
Grid : Message : 64.120756 s : norm dag ref 12.0422
|
||||
Grid : Message : 64.149389 s : norm dag diff 7.6644e-14
|
||||
Grid : Message : 64.317786 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 64.465331 s : src_e0.499995
|
||||
Grid : Message : 64.524653 s : src_o0.500005
|
||||
Grid : Message : 64.558706 s : *********************************************************
|
||||
Grid : Message : 64.558717 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 64.558727 s : * Vectorising space-time by 4
|
||||
Grid : Message : 64.558737 s : * SINGLE precision
|
||||
Grid : Message : 64.558745 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 64.558753 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 64.558761 s : *********************************************************
|
||||
Grid : Message : 92.702145 s : Deo mflop/s = 8.97692e+06
|
||||
Grid : Message : 92.702185 s : Deo mflop/s per rank 374038
|
||||
Grid : Message : 92.702198 s : Deo mflop/s per node 2.24423e+06
|
||||
Grid : Message : 92.702209 s : #### Dhop calls report
|
||||
Grid : Message : 92.702223 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 92.702240 s : WilsonFermion5D TotalTime /Calls : 9377.88 us
|
||||
Grid : Message : 92.702257 s : WilsonFermion5D CommTime /Calls : 8221.84 us
|
||||
Grid : Message : 92.702277 s : WilsonFermion5D FaceTime /Calls : 543.548 us
|
||||
Grid : Message : 92.702301 s : WilsonFermion5D ComputeTime1/Calls : 20.936 us
|
||||
Grid : Message : 92.702322 s : WilsonFermion5D ComputeTime2/Calls : 732.33 us
|
||||
Grid : Message : 92.702376 s : Average mflops/s per call : 4.13001e+09
|
||||
Grid : Message : 92.702387 s : Average mflops/s per call per rank : 1.72084e+08
|
||||
Grid : Message : 92.702397 s : Average mflops/s per call per node : 1.0325e+09
|
||||
Grid : Message : 92.702407 s : Average mflops/s per call (full) : 9.12937e+06
|
||||
Grid : Message : 92.702416 s : Average mflops/s per call per rank (full): 380391
|
||||
Grid : Message : 92.702426 s : Average mflops/s per call per node (full): 2.28234e+06
|
||||
Grid : Message : 92.702435 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 92.702443 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 92.702451 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 92.702459 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 92.702467 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 92.702475 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 92.772983 s : r_e6.02121
|
||||
Grid : Message : 92.786384 s : r_o6.02102
|
||||
Grid : Message : 92.799622 s : res12.0422
|
||||
Grid : Message : 93.860500 s : norm diff 0
|
||||
Grid : Message : 93.162026 s : norm diff even 0
|
||||
Grid : Message : 93.197529 s : norm diff odd 0
|
206
systems/Summit/dwf.32.4node
Normal file
206
systems/Summit/dwf.32.4node
Normal file
@ -0,0 +1,206 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: Tesla V100-SXM2-16GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 16911433728
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 4
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: rank 0 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 0 device 0 bus id: 0004:04:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 24
|
||||
SharedMemoryMpi: Node communicator of size 6
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x200080000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
AcceleratorCudaInit: rank 2 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 2 device 2 bus id: 0004:06:00.0
|
||||
AcceleratorCudaInit: rank 1 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 1 device 1 bus id: 0004:05:00.0
|
||||
AcceleratorCudaInit: rank 4 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 4 device 4 bus id: 0035:04:00.0
|
||||
AcceleratorCudaInit: rank 3 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 3 device 3 bus id: 0035:03:00.0
|
||||
AcceleratorCudaInit: rank 5 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
local rank 5 device 5 bus id: 0035:05:00.0
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=7cb1ff7395a5833ded6526c43891bd07a0436290: (HEAD -> develop, origin/develop, origin/HEAD) clean
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 8388608000 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 1.544984 s : Grid Layout
|
||||
Grid : Message : 1.544992 s : Global lattice size : 64 64 64 96
|
||||
Grid : Message : 1.545003 s : OpenMP threads : 6
|
||||
Grid : Message : 1.545011 s : MPI tasks : 2 2 2 3
|
||||
AcceleratorCudaInit: rank 8 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 6 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 11 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 16 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 17 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 13 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 12 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 21 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 23 setting device to node rank 5
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 22 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 19 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 18 setting device to node rank 0
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 7 setting device to node rank 1
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 10 setting device to node rank 4
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 9 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 14 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 15 setting device to node rank 3
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
AcceleratorCudaInit: rank 20 setting device to node rank 2
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=yes
|
||||
Grid : Message : 2.994920 s : Making s innermost grids
|
||||
Grid : Message : 2.232502 s : Initialising 4d RNG
|
||||
Grid : Message : 2.397047 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 2.397069 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.653140 s : Initialising 5d RNG
|
||||
Grid : Message : 5.285347 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 5.285369 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 9.994738 s : Initialised RNGs
|
||||
Grid : Message : 13.153426 s : Drawing gauge field
|
||||
Grid : Message : 13.825697 s : Random gauge initialised
|
||||
Grid : Message : 18.537657 s : Setting up Cshift based reference
|
||||
Grid : Message : 22.296755 s : *****************************************************************
|
||||
Grid : Message : 22.296781 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 22.296791 s : *****************************************************************
|
||||
Grid : Message : 22.296800 s : *****************************************************************
|
||||
Grid : Message : 22.296809 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 22.296818 s : * Vectorising space-time by 4
|
||||
Grid : Message : 22.296828 s : * VComplexF size is 32 B
|
||||
Grid : Message : 22.296838 s : * SINGLE precision
|
||||
Grid : Message : 22.296847 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 22.296855 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 22.296863 s : *****************************************************************
|
||||
Grid : Message : 24.746452 s : Called warmup
|
||||
Grid : Message : 137.525756 s : Called Dw 3000 times in 1.12779e+08 us
|
||||
Grid : Message : 137.525818 s : mflop/s = 1.41383e+07
|
||||
Grid : Message : 137.525831 s : mflop/s per rank = 589097
|
||||
Grid : Message : 137.525843 s : mflop/s per node = 3.53458e+06
|
||||
Grid : Message : 137.525854 s : RF GiB/s (base 2) = 28728.7
|
||||
Grid : Message : 137.525864 s : mem GiB/s (base 2) = 17955.5
|
||||
Grid : Message : 137.693645 s : norm diff 1.04885e-13
|
||||
Grid : Message : 137.965585 s : #### Dhop calls report
|
||||
Grid : Message : 137.965598 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 137.965612 s : WilsonFermion5D TotalTime /Calls : 18899.7 us
|
||||
Grid : Message : 137.965624 s : WilsonFermion5D CommTime /Calls : 16041.4 us
|
||||
Grid : Message : 137.965634 s : WilsonFermion5D FaceTime /Calls : 859.705 us
|
||||
Grid : Message : 137.965644 s : WilsonFermion5D ComputeTime1/Calls : 70.5881 us
|
||||
Grid : Message : 137.965654 s : WilsonFermion5D ComputeTime2/Calls : 2094.8 us
|
||||
Grid : Message : 137.965682 s : Average mflops/s per call : 3.87638e+09
|
||||
Grid : Message : 137.965692 s : Average mflops/s per call per rank : 1.61516e+08
|
||||
Grid : Message : 137.965702 s : Average mflops/s per call per node : 9.69095e+08
|
||||
Grid : Message : 137.965712 s : Average mflops/s per call (full) : 1.43168e+07
|
||||
Grid : Message : 137.965721 s : Average mflops/s per call per rank (full): 596533
|
||||
Grid : Message : 137.965730 s : Average mflops/s per call per node (full): 3.5792e+06
|
||||
Grid : Message : 137.965740 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 137.965748 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 137.965756 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 137.965764 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 137.965772 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 137.965780 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 156.554605 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 156.554632 s : Called DwDag
|
||||
Grid : Message : 156.554642 s : norm dag result 12.0421
|
||||
Grid : Message : 156.639265 s : norm dag ref 12.0421
|
||||
Grid : Message : 156.888281 s : norm dag diff 7.62057e-14
|
||||
Grid : Message : 157.609797 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 158.208630 s : src_e0.499996
|
||||
Grid : Message : 158.162447 s : src_o0.500004
|
||||
Grid : Message : 158.267780 s : *********************************************************
|
||||
Grid : Message : 158.267791 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 158.267801 s : * Vectorising space-time by 4
|
||||
Grid : Message : 158.267811 s : * SINGLE precision
|
||||
Grid : Message : 158.267820 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 158.267828 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 158.267836 s : *********************************************************
|
||||
Grid : Message : 216.487829 s : Deo mflop/s = 1.37283e+07
|
||||
Grid : Message : 216.487869 s : Deo mflop/s per rank 572011
|
||||
Grid : Message : 216.487881 s : Deo mflop/s per node 3.43206e+06
|
||||
Grid : Message : 216.487893 s : #### Dhop calls report
|
||||
Grid : Message : 216.487903 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 216.487913 s : WilsonFermion5D TotalTime /Calls : 19399.6 us
|
||||
Grid : Message : 216.487923 s : WilsonFermion5D CommTime /Calls : 16475.4 us
|
||||
Grid : Message : 216.487933 s : WilsonFermion5D FaceTime /Calls : 972.393 us
|
||||
Grid : Message : 216.487943 s : WilsonFermion5D ComputeTime1/Calls : 49.8474 us
|
||||
Grid : Message : 216.487953 s : WilsonFermion5D ComputeTime2/Calls : 2089.93 us
|
||||
Grid : Message : 216.488001 s : Average mflops/s per call : 5.39682e+09
|
||||
Grid : Message : 216.488011 s : Average mflops/s per call per rank : 2.24867e+08
|
||||
Grid : Message : 216.488020 s : Average mflops/s per call per node : 1.3492e+09
|
||||
Grid : Message : 216.488030 s : Average mflops/s per call (full) : 1.39479e+07
|
||||
Grid : Message : 216.488039 s : Average mflops/s per call per rank (full): 581162
|
||||
Grid : Message : 216.488048 s : Average mflops/s per call per node (full): 3.48697e+06
|
||||
Grid : Message : 216.488057 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 216.488065 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 216.488073 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 216.488081 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 216.488089 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 216.488097 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 217.384495 s : r_e6.02113
|
||||
Grid : Message : 217.426121 s : r_o6.02096
|
||||
Grid : Message : 217.472636 s : res12.0421
|
||||
Grid : Message : 218.200068 s : norm diff 0
|
||||
Grid : Message : 218.645673 s : norm diff even 0
|
||||
Grid : Message : 218.816561 s : norm diff odd 0
|
25
systems/Summit/dwf16.lsf
Normal file
25
systems/Summit/dwf16.lsf
Normal file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
#BSUB -P LGT104
|
||||
#BSUB -W 2:00
|
||||
#BSUB -nnodes 16
|
||||
#BSUB -J DWF
|
||||
|
||||
export OMP_NUM_THREADS=6
|
||||
export PAMI_IBV_ADAPTER_AFFINITY=1
|
||||
export PAMI_ENABLE_STRIPING=1
|
||||
export OPT="--comms-concurrent --comms-overlap "
|
||||
|
||||
APP="./benchmarks/Benchmark_comms_host_device --mpi 4.4.4.3 "
|
||||
jsrun --nrs 16 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > comms.16node.log
|
||||
|
||||
APP="./benchmarks/Benchmark_dwf_fp32 --grid 96.96.96.72 --mpi 4.4.4.3 --shm 2048 --shm-force-mpi 1 --device-mem 8000 --shm-force-mpi 1 $OPT "
|
||||
jsrun --nrs 16 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > dwf.16node.24.log
|
||||
|
||||
APP="./benchmarks/Benchmark_dwf_fp32 --grid 128.128.128.96 --mpi 4.4.4.3 --shm 2048 --shm-force-mpi 1 --device-mem 8000 --shm-force-mpi 1 $OPT "
|
||||
jsrun --nrs 16 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > dwf.16node.32.log
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
25
systems/Summit/dwf4.lsf
Normal file
25
systems/Summit/dwf4.lsf
Normal file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
#BSUB -P LGT104
|
||||
#BSUB -W 2:00
|
||||
#BSUB -nnodes 4
|
||||
#BSUB -J DWF
|
||||
|
||||
export OMP_NUM_THREADS=6
|
||||
export PAMI_IBV_ADAPTER_AFFINITY=1
|
||||
export PAMI_ENABLE_STRIPING=1
|
||||
export OPT="--comms-concurrent --comms-overlap "
|
||||
#export GRID_ALLOC_NCACHE_LARGE=1
|
||||
export APP="./benchmarks/Benchmark_comms_host_device --mpi 2.2.2.3 "
|
||||
jsrun --nrs 4 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > comms.4node
|
||||
|
||||
APP="./benchmarks/Benchmark_dwf_fp32 --grid 48.48.48.72 --mpi 2.2.2.3 --shm 2048 --shm-force-mpi 1 --device-mem 8000 --shm-force-mpi 1 $OPT "
|
||||
jsrun --nrs 4 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > dwf.24.4node
|
||||
|
||||
APP="./benchmarks/Benchmark_dwf_fp32 --grid 64.64.64.96 --mpi 2.2.2.3 --shm 2048 --shm-force-mpi 1 --device-mem 8000 --shm-force-mpi 1 $OPT "
|
||||
jsrun --nrs 4 -a6 -g6 -c42 -dpacked -b packed:7 --latency_priority gpu-cpu --smpiargs=-gpu $APP > dwf.32.4node
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
8
systems/Summit/sourceme-cuda10.sh
Normal file
8
systems/Summit/sourceme-cuda10.sh
Normal file
@ -0,0 +1,8 @@
|
||||
export UCX_GDR_COPY_RCACHE=no
|
||||
export UCX_MEMTYPE_CACHE=n
|
||||
export UCX_RNDV_SCHEME=put_zcopy
|
||||
module load gcc/7.5.0
|
||||
module load cuda/10.2.89
|
||||
#cuda/11.4.0
|
||||
export LD_LIBRARY_PATH=/ccs/home/paboyle/prefix/lib/:$LD_LIBRARY_PATH
|
||||
|
@ -5,7 +5,7 @@
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-accelerator=cuda \
|
||||
--with-lime=/mnt/lustre/tursafs1/home/tc002/tc002/dc-boyl1/spack/spack/opt/spack/linux-rhel8-zen/gcc-8.4.1/c-lime-2-3-9-e6wxqrid6rqmd45z7n32dxkvkykpvyez \
|
||||
--disable-accelerator-cshift \
|
||||
--enable-accelerator-cshift \
|
||||
--disable-unified \
|
||||
CXX=nvcc \
|
||||
LDFLAGS="-cudart shared " \
|
||||
|
@ -1,25 +1,25 @@
|
||||
tu-c0r0n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r0n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n09 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r0n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r0n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r0n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n00 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n00 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n00 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n00 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n06 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n06 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n06 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n03 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n03 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n03 - 3 device=3 binding=--interleave=6,7
|
||||
tu-c0r3n09 - 0 device=0 binding=--interleave=0,1
|
||||
tu-c0r3n09 - 1 device=1 binding=--interleave=2,3
|
||||
tu-c0r3n09 - 2 device=2 binding=--interleave=4,5
|
||||
tu-c0r3n09 - 3 device=3 binding=--interleave=6,7
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
@ -33,11 +33,41 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
@ -50,43 +80,25 @@ AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: assume user either uses
|
||||
AcceleratorCudaInit: a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: Configure options --enable-setdevice=no
|
||||
local rank 1 device 0 bus id: 0000:44:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 0 device 0 bus id: 0000:03:00.0
|
||||
AcceleratorCudaInit: ================================================
|
||||
AcceleratorCudaInit: ================================================
|
||||
local rank 2 device 0 bus id: 0000:84:00.0
|
||||
SharedMemoryMpi: World communicator of size 16
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x7fcd80000000 for comms buffers
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x153960000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
@ -116,7 +128,7 @@ This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=9d2238148c56e3fbadfa95dcabf2b83d4bde14cd: (HEAD -> develop) uncommited changes
|
||||
Current Grid git commit hash=da06d15f73184ceb15d66d4e7e702b02fed7b940: (HEAD -> feature/dirichlet, develop) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
@ -124,122 +136,102 @@ Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 1.198523 s : Grid Layout
|
||||
Grid : Message : 1.198530 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.198534 s : OpenMP threads : 4
|
||||
Grid : Message : 1.198535 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.397615 s : Making s innermost grids
|
||||
Grid : Message : 1.441828 s : Initialising 4d RNG
|
||||
Grid : Message : 1.547973 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 1.547998 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 1.954777 s : Initialising 5d RNG
|
||||
Grid : Message : 3.633825 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 3.633869 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 12.162710 s : Initialised RNGs
|
||||
Grid : Message : 15.882520 s : Drawing gauge field
|
||||
Grid : Message : 15.816362 s : Random gauge initialised
|
||||
Grid : Message : 17.279671 s : Setting up Cshift based reference
|
||||
Grid : Message : 26.331426 s : *****************************************************************
|
||||
Grid : Message : 26.331452 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 26.331454 s : *****************************************************************
|
||||
Grid : Message : 26.331456 s : *****************************************************************
|
||||
Grid : Message : 26.331458 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 26.331459 s : * Vectorising space-time by 8
|
||||
Grid : Message : 26.331463 s : * VComplexF size is 64 B
|
||||
Grid : Message : 26.331465 s : * SINGLE precision
|
||||
Grid : Message : 26.331467 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 26.331468 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 26.331469 s : *****************************************************************
|
||||
Grid : Message : 28.413717 s : Called warmup
|
||||
Grid : Message : 56.418423 s : Called Dw 3000 times in 2.80047e+07 us
|
||||
Grid : Message : 56.418476 s : mflop/s = 3.79581e+07
|
||||
Grid : Message : 56.418479 s : mflop/s per rank = 2.37238e+06
|
||||
Grid : Message : 56.418481 s : mflop/s per node = 9.48953e+06
|
||||
Grid : Message : 56.418483 s : RF GiB/s (base 2) = 77130
|
||||
Grid : Message : 56.418485 s : mem GiB/s (base 2) = 48206.3
|
||||
Grid : Message : 56.422076 s : norm diff 1.03481e-13
|
||||
Grid : Message : 56.456894 s : #### Dhop calls report
|
||||
Grid : Message : 56.456899 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 56.456903 s : WilsonFermion5D TotalTime /Calls : 4710.93 us
|
||||
Grid : Message : 56.456905 s : WilsonFermion5D CommTime /Calls : 3196.15 us
|
||||
Grid : Message : 56.456908 s : WilsonFermion5D FaceTime /Calls : 494.392 us
|
||||
Grid : Message : 56.456910 s : WilsonFermion5D ComputeTime1/Calls : 44.4107 us
|
||||
Grid : Message : 56.456912 s : WilsonFermion5D ComputeTime2/Calls : 1037.75 us
|
||||
Grid : Message : 56.456921 s : Average mflops/s per call : 3.55691e+09
|
||||
Grid : Message : 56.456925 s : Average mflops/s per call per rank : 2.22307e+08
|
||||
Grid : Message : 56.456928 s : Average mflops/s per call per node : 8.89228e+08
|
||||
Grid : Message : 56.456930 s : Average mflops/s per call (full) : 3.82915e+07
|
||||
Grid : Message : 56.456933 s : Average mflops/s per call per rank (full): 2.39322e+06
|
||||
Grid : Message : 56.456952 s : Average mflops/s per call per node (full): 9.57287e+06
|
||||
Grid : Message : 56.456954 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 56.457016 s : Stencil calls 3001
|
||||
Grid : Message : 56.457022 s : Stencil halogtime 0
|
||||
Grid : Message : 56.457024 s : Stencil gathertime 55.9154
|
||||
Grid : Message : 56.457026 s : Stencil gathermtime 20.1073
|
||||
Grid : Message : 56.457028 s : Stencil mergetime 18.5585
|
||||
Grid : Message : 56.457030 s : Stencil decompresstime 0.0639787
|
||||
Grid : Message : 56.457032 s : Stencil comms_bytes 4.02653e+08
|
||||
Grid : Message : 56.457034 s : Stencil commtime 6379.93
|
||||
Grid : Message : 56.457036 s : Stencil 63.1124 GB/s per rank
|
||||
Grid : Message : 56.457038 s : Stencil 252.45 GB/s per node
|
||||
Grid : Message : 56.457040 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 56.457048 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 56.457062 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 56.457065 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 56.457066 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 79.259261 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 79.259287 s : Called DwDag
|
||||
Grid : Message : 79.259288 s : norm dag result 12.0421
|
||||
Grid : Message : 79.271740 s : norm dag ref 12.0421
|
||||
Grid : Message : 79.287759 s : norm dag diff 7.63236e-14
|
||||
Grid : Message : 79.328100 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 79.955951 s : src_e0.499997
|
||||
Grid : Message : 80.633620 s : src_o0.500003
|
||||
Grid : Message : 80.164163 s : *********************************************************
|
||||
Grid : Message : 80.164168 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 80.164170 s : * Vectorising space-time by 8
|
||||
Grid : Message : 80.164172 s : * SINGLE precision
|
||||
Grid : Message : 80.164174 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 80.164177 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 80.164178 s : *********************************************************
|
||||
Grid : Message : 93.797635 s : Deo mflop/s = 3.93231e+07
|
||||
Grid : Message : 93.797670 s : Deo mflop/s per rank 2.45769e+06
|
||||
Grid : Message : 93.797672 s : Deo mflop/s per node 9.83077e+06
|
||||
Grid : Message : 93.797674 s : #### Dhop calls report
|
||||
Grid : Message : 93.797675 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 93.797677 s : WilsonFermion5D TotalTime /Calls : 4542.83 us
|
||||
Grid : Message : 93.797679 s : WilsonFermion5D CommTime /Calls : 2978.97 us
|
||||
Grid : Message : 93.797681 s : WilsonFermion5D FaceTime /Calls : 602.287 us
|
||||
Grid : Message : 93.797683 s : WilsonFermion5D ComputeTime1/Calls : 67.1416 us
|
||||
Grid : Message : 93.797685 s : WilsonFermion5D ComputeTime2/Calls : 1004.07 us
|
||||
Grid : Message : 93.797713 s : Average mflops/s per call : 3.30731e+09
|
||||
Grid : Message : 93.797717 s : Average mflops/s per call per rank : 2.06707e+08
|
||||
Grid : Message : 93.797719 s : Average mflops/s per call per node : 8.26827e+08
|
||||
Grid : Message : 93.797721 s : Average mflops/s per call (full) : 3.97084e+07
|
||||
Grid : Message : 93.797727 s : Average mflops/s per call per rank (full): 2.48178e+06
|
||||
Grid : Message : 93.797732 s : Average mflops/s per call per node (full): 9.92711e+06
|
||||
Grid : Message : 93.797735 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 93.797746 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 93.797758 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 93.797769 s : Stencil calls 3001
|
||||
Grid : Message : 93.797773 s : Stencil halogtime 0
|
||||
Grid : Message : 93.797776 s : Stencil gathertime 56.7458
|
||||
Grid : Message : 93.797780 s : Stencil gathermtime 22.6504
|
||||
Grid : Message : 93.797782 s : Stencil mergetime 21.1913
|
||||
Grid : Message : 93.797786 s : Stencil decompresstime 0.0556481
|
||||
Grid : Message : 93.797788 s : Stencil comms_bytes 2.01327e+08
|
||||
Grid : Message : 93.797791 s : Stencil commtime 2989.33
|
||||
Grid : Message : 93.797795 s : Stencil 67.3484 GB/s per rank
|
||||
Grid : Message : 93.797798 s : Stencil 269.394 GB/s per node
|
||||
Grid : Message : 93.797801 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 93.797803 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 93.797805 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 93.873429 s : r_e6.02111
|
||||
Grid : Message : 93.879931 s : r_o6.02102
|
||||
Grid : Message : 93.885912 s : res12.0421
|
||||
Grid : Message : 94.876555 s : norm diff 0
|
||||
Grid : Message : 95.485643 s : norm diff even 0
|
||||
Grid : Message : 95.581236 s : norm diff odd 0
|
||||
Grid : Message : 1.875883 s : Grid Layout
|
||||
Grid : Message : 1.875893 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 1.875897 s : OpenMP threads : 4
|
||||
Grid : Message : 1.875898 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 1.993571 s : Initialising 4d RNG
|
||||
Grid : Message : 2.881990 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 2.882370 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.495044 s : Initialising 5d RNG
|
||||
Grid : Message : 4.120900 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 4.121350 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 15.268010 s : Drawing gauge field
|
||||
Grid : Message : 16.234025 s : Random gauge initialised
|
||||
Grid : Message : 16.234057 s : Applying BCs
|
||||
Grid : Message : 16.365565 s : Setting up Cshift based reference
|
||||
Grid : Message : 44.512418 s : *****************************************************************
|
||||
Grid : Message : 44.512448 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 44.512450 s : *****************************************************************
|
||||
Grid : Message : 44.512451 s : *****************************************************************
|
||||
Grid : Message : 44.512452 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 44.512453 s : * Vectorising space-time by 8
|
||||
Grid : Message : 44.512454 s : * VComplexF size is 64 B
|
||||
Grid : Message : 44.512456 s : * SINGLE precision
|
||||
Grid : Message : 44.512459 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 44.512460 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 44.512461 s : *****************************************************************
|
||||
Grid : Message : 46.389070 s : Called warmup
|
||||
Grid : Message : 49.211265 s : Called Dw 300 times in 2.82203e+06 us
|
||||
Grid : Message : 49.211295 s : mflop/s = 3.76681e+07
|
||||
Grid : Message : 49.211297 s : mflop/s per rank = 2.35425e+06
|
||||
Grid : Message : 49.211299 s : mflop/s per node = 9.41702e+06
|
||||
Grid : Message : 49.211301 s : RF GiB/s (base 2) = 76540.6
|
||||
Grid : Message : 49.211308 s : mem GiB/s (base 2) = 47837.9
|
||||
Grid : Message : 49.214868 s : norm diff 1.06409e-13
|
||||
Grid : Message : 92.647781 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 92.647816 s : Called DwDag
|
||||
Grid : Message : 92.647817 s : norm dag result 12.0421
|
||||
Grid : Message : 92.801806 s : norm dag ref 12.0421
|
||||
Grid : Message : 92.817724 s : norm dag diff 7.21921e-14
|
||||
Grid : Message : 92.858973 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 93.210378 s : src_e0.499997
|
||||
Grid : Message : 93.583286 s : src_o0.500003
|
||||
Grid : Message : 93.682468 s : *********************************************************
|
||||
Grid : Message : 93.682471 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 93.682472 s : * Vectorising space-time by 8
|
||||
Grid : Message : 93.682473 s : * SINGLE precision
|
||||
Grid : Message : 93.682475 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 93.682476 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 93.682477 s : *********************************************************
|
||||
Grid : Message : 95.162342 s : Deo mflop/s = 3.92487e+07
|
||||
Grid : Message : 95.162387 s : Deo mflop/s per rank 2.45305e+06
|
||||
Grid : Message : 95.162389 s : Deo mflop/s per node 9.81219e+06
|
||||
Grid : Message : 95.232801 s : r_e6.02111
|
||||
Grid : Message : 95.240061 s : r_o6.02102
|
||||
Grid : Message : 95.245975 s : res12.0421
|
||||
Grid : Message : 95.833402 s : norm diff 0
|
||||
Grid : Message : 96.573829 s : norm diff even 0
|
||||
Grid : Message : 96.868272 s : norm diff odd 0
|
||||
Dirichlet block [0 64 64 32 32]
|
||||
Grid : Message : 97.756909 s : Grid Layout
|
||||
Grid : Message : 97.756911 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 97.756921 s : OpenMP threads : 4
|
||||
Grid : Message : 97.756922 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 97.897085 s : Initialising 4d RNG
|
||||
Grid : Message : 97.965061 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 97.965097 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 98.367431 s : Initialising 5d RNG
|
||||
Grid : Message : 99.752745 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 99.752790 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 111.290148 s : Drawing gauge field
|
||||
Grid : Message : 112.349289 s : Random gauge initialised
|
||||
Grid : Message : 112.349320 s : Applying BCs
|
||||
Grid : Message : 113.948740 s : Setting up Cshift based reference
|
||||
Grid : Message : 140.320415 s : *****************************************************************
|
||||
Grid : Message : 140.320443 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 140.320444 s : *****************************************************************
|
||||
Grid : Message : 140.320445 s : *****************************************************************
|
||||
Grid : Message : 140.320446 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 140.320447 s : * Vectorising space-time by 8
|
||||
Grid : Message : 140.320448 s : * VComplexF size is 64 B
|
||||
Grid : Message : 140.320450 s : * SINGLE precision
|
||||
Grid : Message : 140.320451 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 140.320452 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 140.320453 s : *****************************************************************
|
||||
Grid : Message : 142.296150 s : Called warmup
|
||||
Grid : Message : 144.397678 s : Called Dw 300 times in 2.36719e+06 us
|
||||
Grid : Message : 144.397700 s : mflop/s = 4.49058e+07
|
||||
Grid : Message : 144.397702 s : mflop/s per rank = 2.80661e+06
|
||||
Grid : Message : 144.397704 s : mflop/s per node = 1.12265e+07
|
||||
Grid : Message : 144.397706 s : RF GiB/s (base 2) = 91247.6
|
||||
Grid : Message : 144.397708 s : mem GiB/s (base 2) = 57029.7
|
||||
Grid : Message : 144.401269 s : norm diff 9.78944e-14
|
||||
Grid : Message : 186.885460 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 186.885492 s : Called DwDag
|
||||
Grid : Message : 186.885493 s : norm dag result 10.4157
|
||||
Grid : Message : 186.897154 s : norm dag ref 11.2266
|
||||
Grid : Message : 186.912538 s : norm dag diff 0.484633
|
||||
|
@ -1,14 +1,13 @@
|
||||
#!/bin/bash
|
||||
#SBATCH -J dslash
|
||||
#SBATCH -A tc002
|
||||
#SBATCH -t 2:20:00
|
||||
#SBATCH --nodelist=tu-c0r0n[00,03,06,09]
|
||||
#SBATCH -A dp207
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --nodes=4
|
||||
#SBATCH --ntasks=16
|
||||
#SBATCH --qos=standard
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --cpus-per-task=8
|
||||
#SBATCH --time=12:00:00
|
||||
#SBATCH --time=0:05:00
|
||||
#SBATCH --partition=gpu
|
||||
#SBATCH --gres=gpu:4
|
||||
#SBATCH --output=%x.%j.out
|
||||
|
@ -1,2 +1,6 @@
|
||||
spack load c-lime
|
||||
module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1
|
||||
module load cuda/11.4.1 openmpi/4.1.1-cuda11.4.1 ucx/1.12.0-cuda11.4.1
|
||||
#module load cuda/11.4.1 openmpi/4.1.1 ucx/1.10.1
|
||||
export PREFIX=/home/tc002/tc002/shared/env/prefix/
|
||||
export LD_LIBRARY_PATH=$PREFIX/lib/:$LD_LIBRARY_PATH
|
||||
unset SBATCH_EXPORT
|
||||
|
||||
|
226
tests/core/Test_compact_wilson_clover_speedup.cc
Normal file
226
tests/core/Test_compact_wilson_clover_speedup.cc
Normal file
@ -0,0 +1,226 @@
|
||||
/*************************************************************************************
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./tests/core/Test_compact_wilson_clover_speedup.cc
|
||||
|
||||
Copyright (C) 2020 - 2022
|
||||
|
||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||
Author: Nils Meyer <nils.meyer@ur.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
See the full license in the file "LICENSE" in the top level distribution directory
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
|
||||
#include <Grid/Grid.h>
|
||||
|
||||
using namespace Grid;
|
||||
|
||||
NAMESPACE_BEGIN(CommandlineHelpers);
|
||||
|
||||
static bool checkPresent(int* argc, char*** argv, const std::string& option) {
|
||||
return GridCmdOptionExists(*argv, *argv + *argc, option);
|
||||
}
|
||||
|
||||
static std::string getContent(int* argc, char*** argv, const std::string& option) {
|
||||
return GridCmdOptionPayload(*argv, *argv + *argc, option);
|
||||
}
|
||||
|
||||
static int readInt(int* argc, char*** argv, std::string&& option, int defaultValue) {
|
||||
std::string arg;
|
||||
int ret = defaultValue;
|
||||
if(checkPresent(argc, argv, option)) {
|
||||
arg = getContent(argc, argv, option);
|
||||
GridCmdOptionInt(arg, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static float readFloat(int* argc, char*** argv, std::string&& option, float defaultValue) {
|
||||
std::string arg;
|
||||
float ret = defaultValue;
|
||||
if(checkPresent(argc, argv, option)) {
|
||||
arg = getContent(argc, argv, option);
|
||||
GridCmdOptionFloat(arg, ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
NAMESPACE_END(CommandlineHelpers);
|
||||
|
||||
|
||||
#define _grid_printf(LOGGER, ...) \
|
||||
{ \
|
||||
if((LOGGER).isActive()) { /* this makes it safe to put, e.g., norm2 in the calling code w.r.t. performance */ \
|
||||
char _printf_buf[1024]; \
|
||||
std::sprintf(_printf_buf, __VA_ARGS__); \
|
||||
std::cout << (LOGGER) << _printf_buf; \
|
||||
fflush(stdout); \
|
||||
} \
|
||||
}
|
||||
#define grid_printf_msg(...) _grid_printf(GridLogMessage, __VA_ARGS__)
|
||||
|
||||
|
||||
template<typename Field>
|
||||
bool resultsAgree(const Field& ref, const Field& res, const std::string& name) {
|
||||
RealD checkTolerance = (getPrecision<Field>::value == 2) ? 1e-15 : 1e-7;
|
||||
Field diff(ref.Grid());
|
||||
diff = ref - res;
|
||||
auto absDev = norm2(diff);
|
||||
auto relDev = absDev / norm2(ref);
|
||||
std::cout << GridLogMessage
|
||||
<< "norm2(reference), norm2(" << name << "), abs. deviation, rel. deviation: " << norm2(ref) << " "
|
||||
<< norm2(res) << " " << absDev << " " << relDev << " -> check "
|
||||
<< ((relDev < checkTolerance) ? "passed" : "failed") << std::endl;
|
||||
|
||||
return relDev <= checkTolerance;
|
||||
}
|
||||
|
||||
|
||||
template<typename vCoeff_t>
|
||||
void runBenchmark(int* argc, char*** argv) {
|
||||
// read from command line
|
||||
const int nIter = CommandlineHelpers::readInt( argc, argv, "--niter", 1000);
|
||||
const RealD mass = CommandlineHelpers::readFloat( argc, argv, "--mass", 0.5);
|
||||
const RealD csw = CommandlineHelpers::readFloat( argc, argv, "--csw", 1.0);
|
||||
const RealD cF = CommandlineHelpers::readFloat( argc, argv, "--cF", 1.0);
|
||||
const bool antiPeriodic = CommandlineHelpers::checkPresent(argc, argv, "--antiperiodic");
|
||||
|
||||
// precision
|
||||
static_assert(getPrecision<vCoeff_t>::value == 2 || getPrecision<vCoeff_t>::value == 1, "Incorrect precision"); // double or single
|
||||
std::string precision = (getPrecision<vCoeff_t>::value == 2 ? "double" : "single");
|
||||
|
||||
// setup grids
|
||||
GridCartesian* UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd, vCoeff_t::Nsimd()), GridDefaultMpi());
|
||||
GridRedBlackCartesian* UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||
// clang-format on
|
||||
|
||||
// setup rng
|
||||
std::vector<int> seeds({1, 2, 3, 4});
|
||||
GridParallelRNG pRNG(UGrid);
|
||||
pRNG.SeedFixedIntegers(seeds);
|
||||
|
||||
// type definitions
|
||||
typedef WilsonImpl<vCoeff_t, FundamentalRepresentation, CoeffReal> WImpl;
|
||||
typedef WilsonCloverFermion<WImpl> WilsonCloverOperator;
|
||||
typedef CompactWilsonCloverFermion<WImpl> CompactWilsonCloverOperator;
|
||||
typedef typename WilsonCloverOperator::FermionField Fermion;
|
||||
typedef typename WilsonCloverOperator::GaugeField Gauge;
|
||||
|
||||
// setup fields
|
||||
Fermion src(UGrid); random(pRNG, src);
|
||||
Fermion ref(UGrid); ref = Zero();
|
||||
Fermion res(UGrid); res = Zero();
|
||||
Fermion hop(UGrid); hop = Zero();
|
||||
Fermion diff(UGrid); diff = Zero();
|
||||
Gauge Umu(UGrid); SU3::HotConfiguration(pRNG, Umu);
|
||||
|
||||
// setup boundary phases
|
||||
typename WilsonCloverOperator::ImplParams implParams;
|
||||
std::vector<Complex> boundary_phases(Nd, 1.);
|
||||
if(antiPeriodic) boundary_phases[Nd-1] = -1.;
|
||||
implParams.boundary_phases = boundary_phases;
|
||||
WilsonAnisotropyCoefficients anisParams;
|
||||
|
||||
// misc stuff needed for benchmarks
|
||||
double volume=1.0; for(int mu=0; mu<Nd; mu++) volume*=UGrid->_fdimensions[mu];
|
||||
|
||||
// setup fermion operators
|
||||
WilsonCloverOperator Dwc( Umu, *UGrid, *UrbGrid, mass, csw, csw, anisParams, implParams);
|
||||
CompactWilsonCloverOperator Dwc_compact(Umu, *UGrid, *UrbGrid, mass, csw, csw, cF, anisParams, implParams);
|
||||
|
||||
// now test the conversions
|
||||
typename CompactWilsonCloverOperator::CloverField tmp_ref(UGrid); tmp_ref = Dwc.CloverTerm;
|
||||
typename CompactWilsonCloverOperator::CloverField tmp_res(UGrid); tmp_res = Zero();
|
||||
typename CompactWilsonCloverOperator::CloverField tmp_diff(UGrid); tmp_diff = Zero();
|
||||
typename CompactWilsonCloverOperator::CloverDiagonalField diagonal(UGrid); diagonal = Zero();
|
||||
typename CompactWilsonCloverOperator::CloverTriangleField triangle(UGrid); diagonal = Zero();
|
||||
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(tmp_ref, diagonal, triangle);
|
||||
CompactWilsonCloverOperator::CompactHelpers::ConvertLayout(diagonal, triangle, tmp_res);
|
||||
tmp_diff = tmp_ref - tmp_res;
|
||||
std::cout << GridLogMessage << "conversion: ref, res, diff, eps"
|
||||
<< " " << norm2(tmp_ref)
|
||||
<< " " << norm2(tmp_res)
|
||||
<< " " << norm2(tmp_diff)
|
||||
<< " " << norm2(tmp_diff) / norm2(tmp_ref)
|
||||
<< std::endl;
|
||||
|
||||
// performance per site (use minimal values necessary)
|
||||
double hop_flop_per_site = 1320; // Rich's Talk + what Peter uses
|
||||
double hop_byte_per_site = (8 * 9 + 9 * 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||
double clov_flop_per_site = 504; // Rich's Talk and 1412.2629
|
||||
double clov_byte_per_site = (2 * 18 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||
double clov_flop_per_site_performed = 1128;
|
||||
double clov_byte_per_site_performed = (12 * 12 + 12 + 12) * 2 * getPrecision<vCoeff_t>::value * 4;
|
||||
|
||||
// total performance numbers
|
||||
double hop_gflop_total = volume * nIter * hop_flop_per_site / 1e9;
|
||||
double hop_gbyte_total = volume * nIter * hop_byte_per_site / 1e9;
|
||||
double clov_gflop_total = volume * nIter * clov_flop_per_site / 1e9;
|
||||
double clov_gbyte_total = volume * nIter * clov_byte_per_site / 1e9;
|
||||
double clov_gflop_performed_total = volume * nIter * clov_flop_per_site_performed / 1e9;
|
||||
double clov_gbyte_performed_total = volume * nIter * clov_byte_per_site_performed / 1e9;
|
||||
|
||||
// warmup + measure dhop
|
||||
for(auto n : {1, 2, 3, 4, 5}) Dwc.Dhop(src, hop, 0);
|
||||
double t0 = usecond();
|
||||
for(int n = 0; n < nIter; n++) Dwc.Dhop(src, hop, 0);
|
||||
double t1 = usecond();
|
||||
double secs_hop = (t1-t0)/1e6;
|
||||
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n",
|
||||
"hop", precision.c_str(), secs_hop, hop_gflop_total/secs_hop, hop_gbyte_total/secs_hop, 0.0, secs_hop/secs_hop);
|
||||
|
||||
#define BENCH_CLOVER_KERNEL(KERNEL) { \
|
||||
/* warmup + measure reference clover */ \
|
||||
for(auto n : {1, 2, 3, 4, 5}) Dwc.KERNEL(src, ref); \
|
||||
double t2 = usecond(); \
|
||||
for(int n = 0; n < nIter; n++) Dwc.KERNEL(src, ref); \
|
||||
double t3 = usecond(); \
|
||||
double secs_ref = (t3-t2)/1e6; \
|
||||
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
|
||||
"reference_"#KERNEL, precision.c_str(), secs_ref, clov_gflop_total/secs_ref, clov_gbyte_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
|
||||
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", /* to see how well the ET performs */ \
|
||||
"reference_"#KERNEL"_performed", precision.c_str(), secs_ref, clov_gflop_performed_total/secs_ref, clov_gbyte_performed_total/secs_ref, secs_ref/secs_ref, secs_ref/secs_hop); \
|
||||
\
|
||||
/* warmup + measure compact clover */ \
|
||||
for(auto n : {1, 2, 3, 4, 5}) Dwc_compact.KERNEL(src, res); \
|
||||
double t4 = usecond(); \
|
||||
for(int n = 0; n < nIter; n++) Dwc_compact.KERNEL(src, res); \
|
||||
double t5 = usecond(); \
|
||||
double secs_res = (t5-t4)/1e6; \
|
||||
grid_printf_msg("Performance(%35s, %s): %2.4f s, %6.0f GFlop/s, %6.0f GByte/s, speedup vs ref = %.2f, fraction of hop = %.2f\n", \
|
||||
"compact_"#KERNEL, precision.c_str(), secs_res, clov_gflop_total/secs_res, clov_gbyte_total/secs_res, secs_ref/secs_res, secs_res/secs_hop); \
|
||||
assert(resultsAgree(ref, res, #KERNEL)); \
|
||||
}
|
||||
|
||||
BENCH_CLOVER_KERNEL(Mooee);
|
||||
BENCH_CLOVER_KERNEL(MooeeDag);
|
||||
BENCH_CLOVER_KERNEL(MooeeInv);
|
||||
BENCH_CLOVER_KERNEL(MooeeInvDag);
|
||||
|
||||
grid_printf_msg("finalize %s\n", precision.c_str());
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
Grid_init(&argc, &argv);
|
||||
|
||||
runBenchmark<vComplexD>(&argc, &argv);
|
||||
runBenchmark<vComplexF>(&argc, &argv);
|
||||
|
||||
Grid_finalize();
|
||||
}
|
@ -235,7 +235,6 @@ void TestWhat(What & Ddwf,
|
||||
pickCheckerboard(Odd ,chi_o,chi);
|
||||
pickCheckerboard(Even,phi_e,phi);
|
||||
pickCheckerboard(Odd ,phi_o,phi);
|
||||
RealD t1,t2;
|
||||
|
||||
SchurDiagMooeeOperator<What,LatticeFermion> HermOpEO(Ddwf);
|
||||
HermOpEO.MpcDagMpc(chi_e,dchi_e);
|
||||
|
@ -215,7 +215,6 @@ int main (int argc, char ** argv)
|
||||
pickCheckerboard(Odd , chi_o, chi);
|
||||
pickCheckerboard(Even, phi_e, phi);
|
||||
pickCheckerboard(Odd , phi_o, phi);
|
||||
RealD t1,t2;
|
||||
|
||||
SchurDiagMooeeOperator<DomainWallEOFAFermionR,LatticeFermion> HermOpEO(Ddwf);
|
||||
HermOpEO.MpcDagMpc(chi_e, dchi_e);
|
||||
|
@ -212,8 +212,6 @@ int main (int argc, char ** argv)
|
||||
pickCheckerboard(Odd ,chi_o,chi);
|
||||
pickCheckerboard(Even,phi_e,phi);
|
||||
pickCheckerboard(Odd ,phi_o,phi);
|
||||
RealD t1,t2;
|
||||
|
||||
|
||||
SchurDiagMooeeOperator<DomainWallFermionR,LatticeFermion> HermOpEO(Ddwf);
|
||||
HermOpEO.MpcDagMpc(chi_e,dchi_e);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user