mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-24 02:32:02 +01:00
Compare commits
15 Commits
feature/fe
...
ef820a26cd
Author | SHA1 | Date | |
---|---|---|---|
ef820a26cd | |||
5012adfebf | |||
bb5c16b97f | |||
0d80eeb545 | |||
b0f4eee78b | |||
5340e50427 | |||
0f1c5b08a1 | |||
70988e43d2 | |||
aab3bcb46f | |||
da06d15f73 | |||
e8b1251b8c | |||
fad5a74a4b | |||
e83f6a6ae9 | |||
6283d11d50 | |||
6616d5d090 |
@ -44,22 +44,14 @@ directory
|
|||||||
#ifdef __NVCC__
|
#ifdef __NVCC__
|
||||||
//disables nvcc specific warning in json.hpp
|
//disables nvcc specific warning in json.hpp
|
||||||
#pragma clang diagnostic ignored "-Wdeprecated-register"
|
#pragma clang diagnostic ignored "-Wdeprecated-register"
|
||||||
|
|
||||||
#if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDACC_VER_MINOR__ >= 5)
|
|
||||||
//disables nvcc specific warning in json.hpp
|
|
||||||
#pragma nv_diag_suppress unsigned_compare_with_zero
|
|
||||||
#pragma nv_diag_suppress cast_to_qualified_type
|
|
||||||
//disables nvcc specific warning in many files
|
|
||||||
#pragma nv_diag_suppress esa_on_defaulted_function_ignored
|
|
||||||
#pragma nv_diag_suppress extra_semicolon
|
|
||||||
#else
|
|
||||||
//disables nvcc specific warning in json.hpp
|
|
||||||
#pragma diag_suppress unsigned_compare_with_zero
|
#pragma diag_suppress unsigned_compare_with_zero
|
||||||
#pragma diag_suppress cast_to_qualified_type
|
#pragma diag_suppress cast_to_qualified_type
|
||||||
|
|
||||||
//disables nvcc specific warning in many files
|
//disables nvcc specific warning in many files
|
||||||
#pragma diag_suppress esa_on_defaulted_function_ignored
|
#pragma diag_suppress esa_on_defaulted_function_ignored
|
||||||
#pragma diag_suppress extra_semicolon
|
#pragma diag_suppress extra_semicolon
|
||||||
#endif
|
|
||||||
|
//Eigen only
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Disable vectorisation in Eigen on the Power8/9 and PowerPC
|
// Disable vectorisation in Eigen on the Power8/9 and PowerPC
|
||||||
|
@ -14,11 +14,7 @@
|
|||||||
/* NVCC save and restore compile environment*/
|
/* NVCC save and restore compile environment*/
|
||||||
#ifdef __NVCC__
|
#ifdef __NVCC__
|
||||||
#pragma push
|
#pragma push
|
||||||
#if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDACC_VER_MINOR__ >= 5)
|
|
||||||
#pragma nv_diag_suppress code_is_unreachable
|
|
||||||
#else
|
|
||||||
#pragma diag_suppress code_is_unreachable
|
#pragma diag_suppress code_is_unreachable
|
||||||
#endif
|
|
||||||
#pragma push_macro("__CUDA_ARCH__")
|
#pragma push_macro("__CUDA_ARCH__")
|
||||||
#pragma push_macro("__NVCC__")
|
#pragma push_macro("__NVCC__")
|
||||||
#pragma push_macro("__CUDACC__")
|
#pragma push_macro("__CUDACC__")
|
||||||
|
@ -262,7 +262,7 @@ public:
|
|||||||
autoView( Tnp_v , (*Tnp), AcceleratorWrite);
|
autoView( Tnp_v , (*Tnp), AcceleratorWrite);
|
||||||
autoView( Tnm_v , (*Tnm), AcceleratorWrite);
|
autoView( Tnm_v , (*Tnm), AcceleratorWrite);
|
||||||
const int Nsimd = CComplex::Nsimd();
|
const int Nsimd = CComplex::Nsimd();
|
||||||
accelerator_for(ss, FineGrid->oSites(), Nsimd, {
|
accelerator_forNB(ss, FineGrid->oSites(), Nsimd, {
|
||||||
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
|
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
|
||||||
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
|
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
|
||||||
});
|
});
|
||||||
|
@ -264,7 +264,7 @@ public:
|
|||||||
auto Tnp_v = Tnp->View();
|
auto Tnp_v = Tnp->View();
|
||||||
auto Tnm_v = Tnm->View();
|
auto Tnm_v = Tnm->View();
|
||||||
constexpr int Nsimd = vector_type::Nsimd();
|
constexpr int Nsimd = vector_type::Nsimd();
|
||||||
accelerator_for(ss, in.Grid()->oSites(), Nsimd, {
|
accelerator_forNB(ss, in.Grid()->oSites(), Nsimd, {
|
||||||
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
|
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
|
||||||
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
|
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
|
||||||
});
|
});
|
||||||
|
@ -113,43 +113,7 @@ public:
|
|||||||
blockPromote(guess_coarse,guess,subspace);
|
blockPromote(guess_coarse,guess,subspace);
|
||||||
guess.Checkerboard() = src.Checkerboard();
|
guess.Checkerboard() = src.Checkerboard();
|
||||||
};
|
};
|
||||||
|
};
|
||||||
void operator()(const std::vector<FineField> &src,std::vector<FineField> &guess) {
|
|
||||||
int Nevec = (int)evec_coarse.size();
|
|
||||||
int Nsrc = (int)src.size();
|
|
||||||
// make temp variables
|
|
||||||
std::vector<CoarseField> src_coarse(Nsrc,evec_coarse[0].Grid());
|
|
||||||
std::vector<CoarseField> guess_coarse(Nsrc,evec_coarse[0].Grid());
|
|
||||||
//Preporcessing
|
|
||||||
std::cout << GridLogMessage << "Start BlockProject for loop" << std::endl;
|
|
||||||
for (int j=0;j<Nsrc;j++)
|
|
||||||
{
|
|
||||||
guess_coarse[j] = Zero();
|
|
||||||
std::cout << GridLogMessage << "BlockProject iter: " << j << std::endl;
|
|
||||||
blockProject(src_coarse[j],src[j],subspace);
|
|
||||||
}
|
|
||||||
//deflation set up for eigen vector batchsize 1 and source batch size equal number of sources
|
|
||||||
std::cout << GridLogMessage << "Start ProjectAccum for loop" << std::endl;
|
|
||||||
for (int i=0;i<Nevec;i++)
|
|
||||||
{
|
|
||||||
std::cout << GridLogMessage << "ProjectAccum Nvec: " << i << std::endl;
|
|
||||||
const CoarseField & tmp = evec_coarse[i];
|
|
||||||
for (int j=0;j<Nsrc;j++)
|
|
||||||
{
|
|
||||||
axpy(guess_coarse[j],TensorRemove(innerProduct(tmp,src_coarse[j])) / eval_coarse[i],tmp,guess_coarse[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//postprocessing
|
|
||||||
std::cout << GridLogMessage << "Start BlockPromote for loop" << std::endl;
|
|
||||||
for (int j=0;j<Nsrc;j++)
|
|
||||||
{
|
|
||||||
std::cout << GridLogMessage << "BlockProject iter: " << j << std::endl;
|
|
||||||
blockPromote(guess_coarse[j],guess[j],subspace);
|
|
||||||
guess[j].Checkerboard() = src[j].Checkerboard();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ void MemoryManager::PrintBytes(void)
|
|||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
||||||
int MemoryManager::Victim[MemoryManager::NallocType];
|
int MemoryManager::Victim[MemoryManager::NallocType];
|
||||||
int MemoryManager::Ncache[MemoryManager::NallocType] = { 2, 8, 8, 16, 8, 16 };
|
int MemoryManager::Ncache[MemoryManager::NallocType] = { 2, 8, 2, 8, 2, 8 };
|
||||||
uint64_t MemoryManager::CacheBytes[MemoryManager::NallocType];
|
uint64_t MemoryManager::CacheBytes[MemoryManager::NallocType];
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// Actual allocation and deallocation utils
|
// Actual allocation and deallocation utils
|
||||||
|
@ -36,11 +36,6 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
||||||
|
|
||||||
#define STRINGIFY(x) #x
|
|
||||||
#define TOSTRING(x) STRINGIFY(x)
|
|
||||||
#define FILE_LINE __FILE__ ":" TOSTRING(__LINE__)
|
|
||||||
#define AUDIT(a) MemoryManager::Audit(FILE_LINE)
|
|
||||||
|
|
||||||
/*Pinning pages is costly*/
|
/*Pinning pages is costly*/
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Advise the LatticeAccelerator class
|
// Advise the LatticeAccelerator class
|
||||||
@ -97,9 +92,8 @@ private:
|
|||||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim,uint64_t &cbytes) ;
|
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim,uint64_t &cbytes) ;
|
||||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t &cbytes) ;
|
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t &cbytes) ;
|
||||||
|
|
||||||
public:
|
|
||||||
static void PrintBytes(void);
|
static void PrintBytes(void);
|
||||||
static void Audit(std::string s);
|
public:
|
||||||
static void Init(void);
|
static void Init(void);
|
||||||
static void InitMessage(void);
|
static void InitMessage(void);
|
||||||
static void *AcceleratorAllocate(size_t bytes);
|
static void *AcceleratorAllocate(size_t bytes);
|
||||||
@ -119,8 +113,6 @@ private:
|
|||||||
static uint64_t DeviceToHostBytes;
|
static uint64_t DeviceToHostBytes;
|
||||||
static uint64_t HostToDeviceXfer;
|
static uint64_t HostToDeviceXfer;
|
||||||
static uint64_t DeviceToHostXfer;
|
static uint64_t DeviceToHostXfer;
|
||||||
static uint64_t DeviceEvictions;
|
|
||||||
static uint64_t DeviceDestroy;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#ifndef GRID_UVM
|
#ifndef GRID_UVM
|
||||||
@ -178,7 +170,6 @@ private:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
static void Print(void);
|
static void Print(void);
|
||||||
static void PrintAll(void);
|
|
||||||
static void PrintState( void* CpuPtr);
|
static void PrintState( void* CpuPtr);
|
||||||
static int isOpen (void* CpuPtr);
|
static int isOpen (void* CpuPtr);
|
||||||
static void ViewClose(void* CpuPtr,ViewMode mode);
|
static void ViewClose(void* CpuPtr,ViewMode mode);
|
||||||
|
@ -3,13 +3,8 @@
|
|||||||
|
|
||||||
#warning "Using explicit device memory copies"
|
#warning "Using explicit device memory copies"
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
//#define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
||||||
#define MAXLINE 512
|
#define dprintf(...)
|
||||||
static char print_buffer [ MAXLINE ];
|
|
||||||
|
|
||||||
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
|
|
||||||
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
|
|
||||||
//#define dprintf(...)
|
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
@ -28,8 +23,6 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
uint64_t MemoryManager::DeviceEvictions;
|
|
||||||
uint64_t MemoryManager::DeviceDestroy;
|
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// Priority ordering for unlocked entries
|
// Priority ordering for unlocked entries
|
||||||
@ -111,17 +104,15 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
mprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
if(AccCache.AccPtr) {
|
if(AccCache.AccPtr) {
|
||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
DeviceDestroy++;
|
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
AccCache.AccPtr=(uint64_t) NULL;
|
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||||
dprintf("MemoryManager: Free(%lx) LRU %ld Total %ld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
EntryErase(CpuPtr);
|
||||||
@ -130,36 +121,26 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
||||||
{
|
{
|
||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
// Make CPU consistent, remove from Accelerator, remove from LRU, LEAVE CPU only entry
|
// Make CPU consistent, remove from Accelerator, remove entry
|
||||||
// Cannot be acclocked. If allocated must be in LRU pool.
|
// Cannot be locked. If allocated must be in LRU pool.
|
||||||
//
|
|
||||||
// Nov 2022... Felix issue: Allocating two CpuPtrs, can have an entry in LRU-q with CPUlock.
|
|
||||||
// and require to evict the AccPtr copy. Eviction was a mistake in CpuViewOpen
|
|
||||||
// but there is a weakness where CpuLock entries are attempted for erase
|
|
||||||
// Take these OUT LRU queue when CPU locked?
|
|
||||||
// Cannot take out the table as cpuLock data is important.
|
|
||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
mprintf("MemoryManager: Evict cpu %lx acc %lx cpuLock %ld accLock %ld\n",
|
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
|
assert(AccCache.accLock==0);
|
||||||
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
|
assert(AccCache.cpuLock==0);
|
||||||
assert(AccCache.accLock==0); // Cannot evict so logic bomb
|
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
|
||||||
if(AccCache.state==AccDirty) {
|
if(AccCache.state==AccDirty) {
|
||||||
Flush(AccCache);
|
Flush(AccCache);
|
||||||
}
|
}
|
||||||
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
if(AccCache.AccPtr) {
|
if(AccCache.AccPtr) {
|
||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
LRUremove(AccCache);
|
|
||||||
AccCache.AccPtr=(uint64_t)NULL;
|
|
||||||
AccCache.state=CpuDirty; // CPU primary now
|
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
dprintf("MemoryManager: Free(%lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
LRUremove(AccCache);
|
||||||
|
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||||
}
|
}
|
||||||
// uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
DeviceEvictions++;
|
EntryErase(CpuPtr);
|
||||||
// EntryErase(CpuPtr);
|
|
||||||
}
|
}
|
||||||
void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
||||||
{
|
{
|
||||||
@ -169,7 +150,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
|||||||
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
||||||
mprintf("MemoryManager: Flush %lx -> %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
DeviceToHostBytes+=AccCache.bytes;
|
DeviceToHostBytes+=AccCache.bytes;
|
||||||
DeviceToHostXfer++;
|
DeviceToHostXfer++;
|
||||||
AccCache.state=Consistent;
|
AccCache.state=Consistent;
|
||||||
@ -184,7 +165,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
||||||
DeviceBytes+=AccCache.bytes;
|
DeviceBytes+=AccCache.bytes;
|
||||||
}
|
}
|
||||||
mprintf("MemoryManager: Clone %lx <- %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
HostToDeviceBytes+=AccCache.bytes;
|
HostToDeviceBytes+=AccCache.bytes;
|
||||||
HostToDeviceXfer++;
|
HostToDeviceXfer++;
|
||||||
@ -210,7 +191,6 @@ void MemoryManager::CpuDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
||||||
{
|
{
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
dprintf("AcceleratorViewClose %lx\n",(uint64_t)Ptr);
|
|
||||||
AcceleratorViewClose((uint64_t)Ptr);
|
AcceleratorViewClose((uint64_t)Ptr);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
CpuViewClose((uint64_t)Ptr);
|
CpuViewClose((uint64_t)Ptr);
|
||||||
@ -222,7 +202,6 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
|
|||||||
{
|
{
|
||||||
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
dprintf("AcceleratorViewOpen %lx\n",(uint64_t)CpuPtr);
|
|
||||||
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
@ -233,16 +212,13 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
|
|||||||
}
|
}
|
||||||
void MemoryManager::EvictVictims(uint64_t bytes)
|
void MemoryManager::EvictVictims(uint64_t bytes)
|
||||||
{
|
{
|
||||||
assert(bytes<DeviceMaxBytes);
|
|
||||||
while(bytes+DeviceLRUBytes > DeviceMaxBytes){
|
while(bytes+DeviceLRUBytes > DeviceMaxBytes){
|
||||||
if ( DeviceLRUBytes > 0){
|
if ( DeviceLRUBytes > 0){
|
||||||
assert(LRU.size()>0);
|
assert(LRU.size()>0);
|
||||||
uint64_t victim = LRU.back(); // From the LRU
|
uint64_t victim = LRU.back();
|
||||||
auto AccCacheIterator = EntryLookup(victim);
|
auto AccCacheIterator = EntryLookup(victim);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
Evict(AccCache);
|
Evict(AccCache);
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -265,12 +241,11 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
assert(AccCache.cpuLock==0); // Programming error
|
assert(AccCache.cpuLock==0); // Programming error
|
||||||
|
|
||||||
if(AccCache.state!=Empty) {
|
if(AccCache.state!=Empty) {
|
||||||
dprintf("ViewOpen found entry %lx %lx : %ld %ld accLock %ld\n",
|
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
|
||||||
(uint64_t)AccCache.CpuPtr,
|
(uint64_t)AccCache.CpuPtr,
|
||||||
(uint64_t)CpuPtr,
|
(uint64_t)CpuPtr,
|
||||||
(uint64_t)AccCache.bytes,
|
(uint64_t)AccCache.bytes,
|
||||||
(uint64_t)bytes,
|
(uint64_t)bytes);
|
||||||
(uint64_t)AccCache.accLock);
|
|
||||||
assert(AccCache.CpuPtr == CpuPtr);
|
assert(AccCache.CpuPtr == CpuPtr);
|
||||||
assert(AccCache.bytes ==bytes);
|
assert(AccCache.bytes ==bytes);
|
||||||
}
|
}
|
||||||
@ -305,7 +280,6 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock= 1;
|
AccCache.accLock= 1;
|
||||||
dprintf("Copied Empty entry into device accLock= %d\n",AccCache.accLock);
|
|
||||||
} else if(AccCache.state==CpuDirty ){
|
} else if(AccCache.state==CpuDirty ){
|
||||||
if(mode==AcceleratorWriteDiscard) {
|
if(mode==AcceleratorWriteDiscard) {
|
||||||
CpuDiscard(AccCache);
|
CpuDiscard(AccCache);
|
||||||
@ -318,30 +292,28 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("CpuDirty entry into device ++accLock= %d\n",AccCache.accLock);
|
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==Consistent) {
|
} else if(AccCache.state==Consistent) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("Consistent entry into device ++accLock= %d\n",AccCache.accLock);
|
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==AccDirty) {
|
} else if(AccCache.state==AccDirty) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("AccDirty entry ++accLock= %d\n",AccCache.accLock);
|
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(AccCache.accLock>0);
|
// If view is opened on device remove from LRU
|
||||||
// If view is opened on device must remove from LRU
|
|
||||||
if(AccCache.LRU_valid==1){
|
if(AccCache.LRU_valid==1){
|
||||||
// must possibly remove from LRU as now locked on GPU
|
// must possibly remove from LRU as now locked on GPU
|
||||||
dprintf("AccCache entry removed from LRU \n");
|
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,12 +334,10 @@ void MemoryManager::AcceleratorViewClose(uint64_t CpuPtr)
|
|||||||
assert(AccCache.accLock>0);
|
assert(AccCache.accLock>0);
|
||||||
|
|
||||||
AccCache.accLock--;
|
AccCache.accLock--;
|
||||||
|
|
||||||
// Move to LRU queue if not locked and close on device
|
// Move to LRU queue if not locked and close on device
|
||||||
if(AccCache.accLock==0) {
|
if(AccCache.accLock==0) {
|
||||||
dprintf("AccleratorViewClose %lx AccLock decremented to %ld move to LRU queue\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
|
||||||
LRUinsert(AccCache);
|
LRUinsert(AccCache);
|
||||||
} else {
|
|
||||||
dprintf("AccleratorViewClose %lx AccLock decremented to %ld\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
||||||
@ -404,10 +374,9 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
|||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
|
||||||
// CPU doesn't need to free space
|
if (!AccCache.AccPtr) {
|
||||||
// if (!AccCache.AccPtr) {
|
EvictVictims(bytes);
|
||||||
// EvictVictims(bytes);
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
assert((mode==CpuRead)||(mode==CpuWrite));
|
assert((mode==CpuRead)||(mode==CpuWrite));
|
||||||
assert(AccCache.accLock==0); // Programming error
|
assert(AccCache.accLock==0); // Programming error
|
||||||
@ -461,28 +430,20 @@ void MemoryManager::NotifyDeletion(void *_ptr)
|
|||||||
void MemoryManager::Print(void)
|
void MemoryManager::Print(void)
|
||||||
{
|
{
|
||||||
PrintBytes();
|
PrintBytes();
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogMessage << "Memory Manager " << std::endl;
|
std::cout << GridLogDebug << "Memory Manager " << std::endl;
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceBytes << " bytes allocated on device " << std::endl;
|
std::cout << GridLogDebug << DeviceBytes << " bytes allocated on device " << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceLRUBytes<< " bytes evictable on device " << std::endl;
|
std::cout << GridLogDebug << DeviceLRUBytes<< " bytes evictable on device " << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceMaxBytes<< " bytes max on device " << std::endl;
|
std::cout << GridLogDebug << DeviceMaxBytes<< " bytes max on device " << std::endl;
|
||||||
std::cout << GridLogMessage << HostToDeviceXfer << " transfers to device " << std::endl;
|
std::cout << GridLogDebug << HostToDeviceXfer << " transfers to device " << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceToHostXfer << " transfers from device " << std::endl;
|
std::cout << GridLogDebug << DeviceToHostXfer << " transfers from device " << std::endl;
|
||||||
std::cout << GridLogMessage << HostToDeviceBytes<< " bytes transfered to device " << std::endl;
|
std::cout << GridLogDebug << HostToDeviceBytes<< " bytes transfered to device " << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceToHostBytes<< " bytes transfered from device " << std::endl;
|
std::cout << GridLogDebug << DeviceToHostBytes<< " bytes transfered from device " << std::endl;
|
||||||
std::cout << GridLogMessage << DeviceEvictions << " Evictions from device " << std::endl;
|
std::cout << GridLogDebug << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
|
||||||
std::cout << GridLogMessage << DeviceDestroy << " Destroyed vectors on device " << std::endl;
|
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogMessage << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
|
std::cout << GridLogDebug << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||||
}
|
|
||||||
void MemoryManager::PrintAll(void)
|
|
||||||
{
|
|
||||||
Print();
|
|
||||||
std::cout << GridLogMessage << std::endl;
|
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
|
||||||
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
|
||||||
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
||||||
auto &AccCache = it->second;
|
auto &AccCache = it->second;
|
||||||
|
|
||||||
@ -492,13 +453,13 @@ void MemoryManager::PrintAll(void)
|
|||||||
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
||||||
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
||||||
|
|
||||||
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
std::cout << GridLogDebug << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||||
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||||
<< "\t" << AccCache.cpuLock
|
<< "\t" << AccCache.cpuLock
|
||||||
<< "\t" << AccCache.accLock
|
<< "\t" << AccCache.accLock
|
||||||
<< "\t" << AccCache.LRU_valid<<std::endl;
|
<< "\t" << AccCache.LRU_valid<<std::endl;
|
||||||
}
|
}
|
||||||
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
||||||
|
|
||||||
};
|
};
|
||||||
int MemoryManager::isOpen (void* _CpuPtr)
|
int MemoryManager::isOpen (void* _CpuPtr)
|
||||||
@ -512,61 +473,6 @@ int MemoryManager::isOpen (void* _CpuPtr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void MemoryManager::Audit(std::string s)
|
|
||||||
{
|
|
||||||
uint64_t CpuBytes=0;
|
|
||||||
uint64_t AccBytes=0;
|
|
||||||
uint64_t LruBytes1=0;
|
|
||||||
uint64_t LruBytes2=0;
|
|
||||||
uint64_t LruCnt=0;
|
|
||||||
uint64_t LockedBytes=0;
|
|
||||||
|
|
||||||
std::cout << " Memory Manager::Audit() from "<<s<<std::endl;
|
|
||||||
for(auto it=LRU.begin();it!=LRU.end();it++){
|
|
||||||
uint64_t cpuPtr = *it;
|
|
||||||
assert(EntryPresent(cpuPtr));
|
|
||||||
auto AccCacheIterator = EntryLookup(cpuPtr);
|
|
||||||
auto & AccCache = AccCacheIterator->second;
|
|
||||||
LruBytes2+=AccCache.bytes;
|
|
||||||
assert(AccCache.LRU_valid==1);
|
|
||||||
assert(AccCache.LRU_entry==it);
|
|
||||||
}
|
|
||||||
std::cout << " Memory Manager::Audit() LRU queue matches table entries "<<std::endl;
|
|
||||||
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
|
||||||
auto &AccCache = it->second;
|
|
||||||
|
|
||||||
std::string str;
|
|
||||||
if ( AccCache.state==Empty ) str = std::string("Empty");
|
|
||||||
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
|
|
||||||
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
|
||||||
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
|
||||||
|
|
||||||
CpuBytes+=AccCache.bytes;
|
|
||||||
if( AccCache.AccPtr ) AccBytes+=AccCache.bytes;
|
|
||||||
if( AccCache.LRU_valid ) LruBytes1+=AccCache.bytes;
|
|
||||||
if( AccCache.LRU_valid ) LruCnt++;
|
|
||||||
|
|
||||||
if ( AccCache.cpuLock || AccCache.accLock ) {
|
|
||||||
assert(AccCache.LRU_valid==0);
|
|
||||||
std::cout << GridLogError << s<< "\n\t 0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
|
||||||
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
|
||||||
<< "\t cpuLock " << AccCache.cpuLock
|
|
||||||
<< "\t accLock " << AccCache.accLock
|
|
||||||
<< "\t LRUvalid " << AccCache.LRU_valid<<std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert( AccCache.cpuLock== 0 ) ;
|
|
||||||
assert( AccCache.accLock== 0 ) ;
|
|
||||||
}
|
|
||||||
std::cout << " Memory Manager::Audit() no locked table entries "<<std::endl;
|
|
||||||
assert(LruBytes1==LruBytes2);
|
|
||||||
assert(LruBytes1==DeviceLRUBytes);
|
|
||||||
std::cout << " Memory Manager::Audit() evictable bytes matches sum over table "<<std::endl;
|
|
||||||
assert(AccBytes==DeviceBytes);
|
|
||||||
std::cout << " Memory Manager::Audit() device bytes matches sum over table "<<std::endl;
|
|
||||||
assert(LruCnt == LRU.size());
|
|
||||||
std::cout << " Memory Manager::Audit() LRU entry count matches "<<std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
void MemoryManager::PrintState(void* _CpuPtr)
|
void MemoryManager::PrintState(void* _CpuPtr)
|
||||||
{
|
{
|
||||||
@ -583,8 +489,8 @@ void MemoryManager::PrintState(void* _CpuPtr)
|
|||||||
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
|
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
|
||||||
|
|
||||||
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
||||||
std::cout << GridLogMessage << "\tx"<<std::hex<<AccCache.CpuPtr<<std::dec
|
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||||
<< "\tx"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||||
<< "\t" << AccCache.cpuLock
|
<< "\t" << AccCache.cpuLock
|
||||||
<< "\t" << AccCache.accLock
|
<< "\t" << AccCache.accLock
|
||||||
<< "\t" << AccCache.LRU_valid<<std::endl;
|
<< "\t" << AccCache.LRU_valid<<std::endl;
|
||||||
|
@ -12,10 +12,7 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
uint64_t MemoryManager::DeviceEvictions;
|
|
||||||
uint64_t MemoryManager::DeviceDestroy;
|
|
||||||
|
|
||||||
void MemoryManager::Audit(std::string s){};
|
|
||||||
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
||||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
||||||
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
|
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
|
||||||
@ -24,7 +21,6 @@ void MemoryManager::PrintState(void* CpuPtr)
|
|||||||
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
|
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
|
||||||
};
|
};
|
||||||
void MemoryManager::Print(void){};
|
void MemoryManager::Print(void){};
|
||||||
void MemoryManager::PrintAll(void){};
|
|
||||||
void MemoryManager::NotifyDeletion(void *ptr){};
|
void MemoryManager::NotifyDeletion(void *ptr){};
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -53,10 +53,11 @@ public:
|
|||||||
// Communicator should know nothing of the physics grid, only processor grid.
|
// Communicator should know nothing of the physics grid, only processor grid.
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
int _Nprocessors; // How many in all
|
int _Nprocessors; // How many in all
|
||||||
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
|
||||||
int _processor; // linear processor rank
|
int _processor; // linear processor rank
|
||||||
Coordinate _processor_coor; // linear processor coordinate
|
|
||||||
unsigned long _ndimension;
|
unsigned long _ndimension;
|
||||||
|
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
|
||||||
|
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
|
||||||
|
Coordinate _processor_coor; // linear processor coordinate
|
||||||
static Grid_MPI_Comm communicator_world;
|
static Grid_MPI_Comm communicator_world;
|
||||||
Grid_MPI_Comm communicator;
|
Grid_MPI_Comm communicator;
|
||||||
std::vector<Grid_MPI_Comm> communicator_halo;
|
std::vector<Grid_MPI_Comm> communicator_halo;
|
||||||
@ -97,6 +98,7 @@ public:
|
|||||||
int BossRank(void) ;
|
int BossRank(void) ;
|
||||||
int ThisRank(void) ;
|
int ThisRank(void) ;
|
||||||
const Coordinate & ThisProcessorCoor(void) ;
|
const Coordinate & ThisProcessorCoor(void) ;
|
||||||
|
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
|
||||||
const Coordinate & ProcessorGrid(void) ;
|
const Coordinate & ProcessorGrid(void) ;
|
||||||
int ProcessorCount(void) ;
|
int ProcessorCount(void) ;
|
||||||
|
|
||||||
@ -142,16 +144,16 @@ public:
|
|||||||
int bytes);
|
int bytes);
|
||||||
|
|
||||||
double StencilSendToRecvFrom(void *xmit,
|
double StencilSendToRecvFrom(void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int do_xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int do_recv,
|
||||||
int bytes,int dir);
|
int bytes,int dir);
|
||||||
|
|
||||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int do_xmit,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int do_recv,
|
||||||
int bytes,int dir);
|
int bytes,int dir);
|
||||||
|
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
|||||||
// Remap using the shared memory optimising routine
|
// Remap using the shared memory optimising routine
|
||||||
// The remap creates a comm which must be freed
|
// The remap creates a comm which must be freed
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm);
|
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
|
||||||
InitFromMPICommunicator(processors,optimal_comm);
|
InitFromMPICommunicator(processors,optimal_comm);
|
||||||
SetCommunicator(optimal_comm);
|
SetCommunicator(optimal_comm);
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
|||||||
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
|
||||||
Coordinate parent_processor_coor(_ndimension,0);
|
Coordinate parent_processor_coor(_ndimension,0);
|
||||||
Coordinate parent_processors (_ndimension,1);
|
Coordinate parent_processors (_ndimension,1);
|
||||||
|
Coordinate shm_processors (_ndimension,1);
|
||||||
// Can make 5d grid from 4d etc...
|
// Can make 5d grid from 4d etc...
|
||||||
int pad = _ndimension-parent_ndimension;
|
int pad = _ndimension-parent_ndimension;
|
||||||
for(int d=0;d<parent_ndimension;d++){
|
for(int d=0;d<parent_ndimension;d++){
|
||||||
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
parent_processor_coor[pad+d]=parent._processor_coor[d];
|
||||||
parent_processors [pad+d]=parent._processors[d];
|
parent_processors [pad+d]=parent._processors[d];
|
||||||
|
shm_processors [pad+d]=parent._shm_processors[d];
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
|
|||||||
ccoor[d] = parent_processor_coor[d] % processors[d];
|
ccoor[d] = parent_processor_coor[d] % processors[d];
|
||||||
scoor[d] = parent_processor_coor[d] / processors[d];
|
scoor[d] = parent_processor_coor[d] / processors[d];
|
||||||
ssize[d] = parent_processors[d] / processors[d];
|
ssize[d] = parent_processors[d] / processors[d];
|
||||||
|
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
|
||||||
}
|
}
|
||||||
|
|
||||||
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms
|
||||||
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
}
|
}
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
int dest,
|
int dest, int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int from,
|
int from, int dor,
|
||||||
int bytes,int dir)
|
int bytes,int dir)
|
||||||
{
|
{
|
||||||
std::vector<CommsRequest_t> list;
|
std::vector<CommsRequest_t> list;
|
||||||
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir);
|
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
|
||||||
StencilSendToRecvFromComplete(list,dir);
|
StencilSendToRecvFromComplete(list,dir);
|
||||||
return offbytes;
|
return offbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int from,
|
int from,int dor,
|
||||||
int bytes,int dir)
|
int bytes,int dir)
|
||||||
{
|
{
|
||||||
int ncomm =communicator_halo.size();
|
int ncomm =communicator_halo.size();
|
||||||
@ -370,6 +372,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
double off_node_bytes=0.0;
|
double off_node_bytes=0.0;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
|
if ( dox ) {
|
||||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+from*32;
|
tag= dir+from*32;
|
||||||
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
@ -377,7 +380,9 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
list.push_back(rrq);
|
list.push_back(rrq);
|
||||||
off_node_bytes+=bytes;
|
off_node_bytes+=bytes;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dor) {
|
||||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+_processor*32;
|
tag= dir+_processor*32;
|
||||||
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
@ -391,10 +396,11 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
|
||||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
|
||||||
// this->StencilSendToRecvFromComplete(list,dir);
|
this->StencilSendToRecvFromComplete(list,dir);
|
||||||
// }
|
}
|
||||||
|
|
||||||
return off_node_bytes;
|
return off_node_bytes;
|
||||||
}
|
}
|
||||||
|
@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
|
|||||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
|
||||||
: CartesianCommunicator(processors)
|
: CartesianCommunicator(processors)
|
||||||
{
|
{
|
||||||
|
_shm_processors = Coordinate(processors.size(),1);
|
||||||
srank=0;
|
srank=0;
|
||||||
SetCommunicator(communicator_world);
|
SetCommunicator(communicator_world);
|
||||||
}
|
}
|
||||||
|
|
||||||
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
|
||||||
{
|
{
|
||||||
|
_shm_processors = Coordinate(processors.size(),1);
|
||||||
_processors = processors;
|
_processors = processors;
|
||||||
_ndimension = processors.size(); assert(_ndimension>=1);
|
_ndimension = processors.size(); assert(_ndimension>=1);
|
||||||
_processor_coor.resize(_ndimension);
|
_processor_coor.resize(_ndimension);
|
||||||
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
|
|||||||
}
|
}
|
||||||
|
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int dor,
|
||||||
int bytes, int dir)
|
int bytes, int dir)
|
||||||
{
|
{
|
||||||
return 2.0*bytes;
|
return 2.0*bytes;
|
||||||
}
|
}
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,
|
int xmit_to_rank,int dox,
|
||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,
|
int recv_from_rank,int dor,
|
||||||
int bytes, int dir)
|
int bytes, int dir)
|
||||||
{
|
{
|
||||||
return 2.0*bytes;
|
return 2.0*bytes;
|
||||||
|
@ -93,9 +93,10 @@ public:
|
|||||||
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
// Create an optimal reordered communicator that makes MPI_Cart_create get it right
|
||||||
//////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////
|
||||||
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
|
||||||
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
// Turns MPI_COMM_WORLD into right layout for Cartesian
|
||||||
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian
|
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
|
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
|
||||||
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
// Provide shared memory facilities off comm world
|
// Provide shared memory facilities off comm world
|
||||||
|
@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
|
|||||||
}
|
}
|
||||||
return log2size;
|
return log2size;
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
// Look and see if it looks like an HPE 8600 based on hostname conventions
|
||||||
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
|
|||||||
gethostname(name,namelen);
|
gethostname(name,namelen);
|
||||||
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
|
||||||
|
|
||||||
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm);
|
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
|
||||||
else OptimalCommunicatorSharedMemory(processors,optimal_comm);
|
else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
|
||||||
}
|
}
|
||||||
static inline int divides(int a,int b)
|
static inline int divides(int a,int b)
|
||||||
{
|
{
|
||||||
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
|
|||||||
dim=(dim+1) %ndimension;
|
dim=(dim+1) %ndimension;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Assert power of two shm_size.
|
// Assert power of two shm_size.
|
||||||
@ -294,6 +294,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
|||||||
Coordinate HyperCoor(ndimension);
|
Coordinate HyperCoor(ndimension);
|
||||||
|
|
||||||
GetShmDims(WorldDims,ShmDims);
|
GetShmDims(WorldDims,ShmDims);
|
||||||
|
SHM = ShmDims;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish torus of processes and nodes with sub-blockings
|
// Establish torus of processes and nodes with sub-blockings
|
||||||
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
|
|||||||
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Identify subblock of ranks on node spreading across dims
|
// Identify subblock of ranks on node spreading across dims
|
||||||
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
|
|||||||
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
|
||||||
|
|
||||||
GetShmDims(WorldDims,ShmDims);
|
GetShmDims(WorldDims,ShmDims);
|
||||||
|
SHM=ShmDims;
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
// Establish torus of processes and nodes with sub-blockings
|
// Establish torus of processes and nodes with sub-blockings
|
||||||
////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////
|
||||||
|
@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
|
|||||||
_ShmSetup=1;
|
_ShmSetup=1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm)
|
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
|
||||||
{
|
{
|
||||||
optimal_comm = WorldComm;
|
optimal_comm = WorldComm;
|
||||||
|
SHM = Coordinate(processors.size(),1);
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
23551
Grid/json/json.hpp
23551
Grid/json/json.hpp
File diff suppressed because it is too large
Load Diff
@ -129,7 +129,7 @@ public:
|
|||||||
|
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWrite);
|
auto me = View(AcceleratorWriteDiscard);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -152,7 +152,7 @@ public:
|
|||||||
|
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWrite);
|
auto me = View(AcceleratorWriteDiscard);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -174,7 +174,7 @@ public:
|
|||||||
this->checkerboard=cb;
|
this->checkerboard=cb;
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWrite);
|
auto me = View(AcceleratorWriteDiscard);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -245,7 +245,7 @@ public:
|
|||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
// user defined constructor
|
// user defined constructor
|
||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
Lattice(GridBase *grid,ViewMode mode=AcceleratorWrite) {
|
Lattice(GridBase *grid,ViewMode mode=AcceleratorWriteDiscard) {
|
||||||
this->_grid = grid;
|
this->_grid = grid;
|
||||||
resize(this->_grid->oSites());
|
resize(this->_grid->oSites());
|
||||||
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
|
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
|
||||||
@ -288,7 +288,7 @@ public:
|
|||||||
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
|
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
this->checkerboard = r.Checkerboard();
|
this->checkerboard = r.Checkerboard();
|
||||||
auto me = View(AcceleratorWrite);
|
auto me = View(AcceleratorWriteDiscard);
|
||||||
auto him= r.View(AcceleratorRead);
|
auto him= r.View(AcceleratorRead);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
coalescedWrite(me[ss],him(ss));
|
coalescedWrite(me[ss],him(ss));
|
||||||
@ -303,7 +303,7 @@ public:
|
|||||||
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
|
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
|
||||||
this->checkerboard = r.Checkerboard();
|
this->checkerboard = r.Checkerboard();
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
auto me = View(AcceleratorWrite);
|
auto me = View(AcceleratorWriteDiscard);
|
||||||
auto him= r.View(AcceleratorRead);
|
auto him= r.View(AcceleratorRead);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
coalescedWrite(me[ss],him(ss));
|
coalescedWrite(me[ss],him(ss));
|
||||||
|
@ -28,9 +28,6 @@ Author: Christoph Lehner <christoph@lhnr.de>
|
|||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
#include <Grid/lattice/Lattice_reduction_gpu.h>
|
#include <Grid/lattice/Lattice_reduction_gpu.h>
|
||||||
#endif
|
#endif
|
||||||
#if defined(GRID_SYCL)
|
|
||||||
#include <Grid/lattice/Lattice_reduction_sycl.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -130,7 +127,7 @@ inline Double max(const Double *arg, Integer osites)
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
|
inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
|
||||||
{
|
{
|
||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)||defined(GRID_SYCL)
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
return sum_gpu(arg,osites);
|
return sum_gpu(arg,osites);
|
||||||
#else
|
#else
|
||||||
return sum_cpu(arg,osites);
|
return sum_cpu(arg,osites);
|
||||||
@ -139,7 +136,7 @@ inline typename vobj::scalar_object sum(const vobj *arg, Integer osites)
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
|
inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
|
||||||
{
|
{
|
||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)||defined(GRID_SYCL)
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
return sumD_gpu(arg,osites);
|
return sumD_gpu(arg,osites);
|
||||||
#else
|
#else
|
||||||
return sumD_cpu(arg,osites);
|
return sumD_cpu(arg,osites);
|
||||||
@ -148,7 +145,7 @@ inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline typename vobj::scalar_objectD sumD_large(const vobj *arg, Integer osites)
|
inline typename vobj::scalar_objectD sumD_large(const vobj *arg, Integer osites)
|
||||||
{
|
{
|
||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)||defined(GRID_SYCL)
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
return sumD_gpu_large(arg,osites);
|
return sumD_gpu_large(arg,osites);
|
||||||
#else
|
#else
|
||||||
return sumD_cpu(arg,osites);
|
return sumD_cpu(arg,osites);
|
||||||
@ -158,13 +155,13 @@ inline typename vobj::scalar_objectD sumD_large(const vobj *arg, Integer osites)
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
||||||
{
|
{
|
||||||
Integer osites = arg.Grid()->oSites();
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)||defined(GRID_SYCL)
|
|
||||||
typename vobj::scalar_object ssum;
|
|
||||||
autoView( arg_v, arg, AcceleratorRead);
|
autoView( arg_v, arg, AcceleratorRead);
|
||||||
ssum= sum_gpu(&arg_v[0],osites);
|
Integer osites = arg.Grid()->oSites();
|
||||||
|
auto ssum= sum_gpu(&arg_v[0],osites);
|
||||||
#else
|
#else
|
||||||
autoView(arg_v, arg, CpuRead);
|
autoView(arg_v, arg, CpuRead);
|
||||||
|
Integer osites = arg.Grid()->oSites();
|
||||||
auto ssum= sum_cpu(&arg_v[0],osites);
|
auto ssum= sum_cpu(&arg_v[0],osites);
|
||||||
#endif
|
#endif
|
||||||
arg.Grid()->GlobalSum(ssum);
|
arg.Grid()->GlobalSum(ssum);
|
||||||
@ -174,7 +171,7 @@ inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
|
|||||||
template<class vobj>
|
template<class vobj>
|
||||||
inline typename vobj::scalar_object sum_large(const Lattice<vobj> &arg)
|
inline typename vobj::scalar_object sum_large(const Lattice<vobj> &arg)
|
||||||
{
|
{
|
||||||
#if defined(GRID_CUDA)||defined(GRID_HIP)||defined(GRID_SYCL)
|
#if defined(GRID_CUDA)||defined(GRID_HIP)
|
||||||
autoView( arg_v, arg, AcceleratorRead);
|
autoView( arg_v, arg, AcceleratorRead);
|
||||||
Integer osites = arg.Grid()->oSites();
|
Integer osites = arg.Grid()->oSites();
|
||||||
auto ssum= sum_gpu_large(&arg_v[0],osites);
|
auto ssum= sum_gpu_large(&arg_v[0],osites);
|
||||||
@ -238,10 +235,11 @@ inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &
|
|||||||
typedef decltype(innerProductD(vobj(),vobj())) inner_t;
|
typedef decltype(innerProductD(vobj(),vobj())) inner_t;
|
||||||
Vector<inner_t> inner_tmp(sites);
|
Vector<inner_t> inner_tmp(sites);
|
||||||
auto inner_tmp_v = &inner_tmp[0];
|
auto inner_tmp_v = &inner_tmp[0];
|
||||||
|
|
||||||
{
|
{
|
||||||
autoView( left_v , left, AcceleratorRead);
|
autoView( left_v , left, AcceleratorRead);
|
||||||
autoView( right_v,right, AcceleratorRead);
|
autoView( right_v,right, AcceleratorRead);
|
||||||
// This code could read coalesce
|
|
||||||
// GPU - SIMT lane compliance...
|
// GPU - SIMT lane compliance...
|
||||||
accelerator_for( ss, sites, 1,{
|
accelerator_for( ss, sites, 1,{
|
||||||
auto x_l = left_v[ss];
|
auto x_l = left_v[ss];
|
||||||
|
@ -1,125 +0,0 @@
|
|||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Possibly promote to double and sum
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_objectD sumD_gpu_tensor(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
typedef typename vobj::scalar_object sobj;
|
|
||||||
typedef typename vobj::scalar_objectD sobjD;
|
|
||||||
sobj *mysum =(sobj *) malloc_shared(sizeof(sobj),*theGridAccelerator);
|
|
||||||
sobj identity; zeroit(identity);
|
|
||||||
sobj ret ;
|
|
||||||
|
|
||||||
Integer nsimd= vobj::Nsimd();
|
|
||||||
|
|
||||||
theGridAccelerator->submit([&](cl::sycl::handler &cgh) {
|
|
||||||
auto Reduction = cl::sycl::reduction(mysum,identity,std::plus<>());
|
|
||||||
cgh.parallel_for(cl::sycl::range<1>{osites},
|
|
||||||
Reduction,
|
|
||||||
[=] (cl::sycl::id<1> item, auto &sum) {
|
|
||||||
auto osite = item[0];
|
|
||||||
sum +=Reduce(lat[osite]);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
theGridAccelerator->wait();
|
|
||||||
ret = mysum[0];
|
|
||||||
free(mysum,*theGridAccelerator);
|
|
||||||
sobjD dret; convertType(dret,ret);
|
|
||||||
return dret;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_objectD sumD_gpu_large(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
return sumD_gpu_tensor(lat,osites);
|
|
||||||
}
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
return sumD_gpu_large(lat,osites);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
return sumD_gpu_large(lat,osites);
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Return as same precision as input performing reduction in double precision though
|
|
||||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_object sum_gpu(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
typedef typename vobj::scalar_object sobj;
|
|
||||||
sobj result;
|
|
||||||
result = sumD_gpu(lat,osites);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_object sum_gpu_large(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
typedef typename vobj::scalar_object sobj;
|
|
||||||
sobj result;
|
|
||||||
result = sumD_gpu_large(lat,osites);
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
||||||
|
|
||||||
/*
|
|
||||||
template<class Double> Double svm_reduce(Double *vec,uint64_t L)
|
|
||||||
{
|
|
||||||
Double sumResult; zeroit(sumResult);
|
|
||||||
Double *d_sum =(Double *)cl::sycl::malloc_shared(sizeof(Double),*theGridAccelerator);
|
|
||||||
Double identity; zeroit(identity);
|
|
||||||
theGridAccelerator->submit([&](cl::sycl::handler &cgh) {
|
|
||||||
auto Reduction = cl::sycl::reduction(d_sum,identity,std::plus<>());
|
|
||||||
cgh.parallel_for(cl::sycl::range<1>{L},
|
|
||||||
Reduction,
|
|
||||||
[=] (cl::sycl::id<1> index, auto &sum) {
|
|
||||||
sum +=vec[index];
|
|
||||||
});
|
|
||||||
});
|
|
||||||
theGridAccelerator->wait();
|
|
||||||
Double ret = d_sum[0];
|
|
||||||
free(d_sum,*theGridAccelerator);
|
|
||||||
std::cout << " svm_reduce finished "<<L<<" sites sum = " << ret <<std::endl;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class vobj>
|
|
||||||
inline typename vobj::scalar_objectD sumD_gpu_repack(const vobj *lat, Integer osites)
|
|
||||||
{
|
|
||||||
typedef typename vobj::vector_type vector;
|
|
||||||
typedef typename vobj::scalar_type scalar;
|
|
||||||
|
|
||||||
typedef typename vobj::scalar_typeD scalarD;
|
|
||||||
typedef typename vobj::scalar_objectD sobjD;
|
|
||||||
|
|
||||||
sobjD ret;
|
|
||||||
scalarD *ret_p = (scalarD *)&ret;
|
|
||||||
|
|
||||||
const int nsimd = vobj::Nsimd();
|
|
||||||
const int words = sizeof(vobj)/sizeof(vector);
|
|
||||||
|
|
||||||
Vector<scalar> buffer(osites*nsimd);
|
|
||||||
scalar *buf = &buffer[0];
|
|
||||||
vector *dat = (vector *)lat;
|
|
||||||
|
|
||||||
for(int w=0;w<words;w++) {
|
|
||||||
|
|
||||||
accelerator_for(ss,osites,nsimd,{
|
|
||||||
int lane = acceleratorSIMTlane(nsimd);
|
|
||||||
buf[ss*nsimd+lane] = dat[ss*words+w].getlane(lane);
|
|
||||||
});
|
|
||||||
//Precision change at this point is to late to gain precision
|
|
||||||
ret_p[w] = svm_reduce(buf,nsimd*osites);
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
*/
|
|
@ -1,126 +0,0 @@
|
|||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
// If NOT CUDA or HIP -- we should provide
|
|
||||||
// -- atomicAdd(float *,float)
|
|
||||||
// -- atomicAdd(double *,double)
|
|
||||||
//
|
|
||||||
// Augment CUDA with complex atomics
|
|
||||||
#if !defined(GRID_HIP) || !defined(GRID_CUDA)
|
|
||||||
inline void atomicAdd(float *acc,float elem)
|
|
||||||
{
|
|
||||||
*acc += elem;
|
|
||||||
}
|
|
||||||
inline void atomicAdd(double *acc,double elem)
|
|
||||||
{
|
|
||||||
*acc += elem;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
inline void atomicAdd(ComplexD *accum,ComplexD & elem)
|
|
||||||
{
|
|
||||||
double *a_p = (double *)accum;
|
|
||||||
double *e_p = (double *)&elem;
|
|
||||||
for(int w=0;w<2;w++){
|
|
||||||
atomicAdd(&a_p[w],e_p[w]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
inline void atomicAdd(ComplexF *accum,ComplexF & elem)
|
|
||||||
{
|
|
||||||
float *a_p = (float *)accum;
|
|
||||||
float *e_p = (float *)&elem;
|
|
||||||
for(int w=0;w<2;w++){
|
|
||||||
atomicAdd(&a_p[w],e_p[w]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Augment CUDA with vobj atomics
|
|
||||||
template<class vobj> accelerator_inline void atomicAdd(vobj *accum, vobj & elem)
|
|
||||||
{
|
|
||||||
typedef typename vobj::scalar_type scalar_type;
|
|
||||||
scalar_type *a_p= (scalar_type *)accum;
|
|
||||||
scalar_type *e_p= (scalar_type *)& elem;
|
|
||||||
for(int w=0;w<vobj::Nsimd();w++){
|
|
||||||
atomicAdd(&a_p[w],e_p[w]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Atomics based slice sum
|
|
||||||
template<class vobj> inline void sliceSumGpu(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
|
||||||
{
|
|
||||||
typedef typename vobj::scalar_object sobj;
|
|
||||||
typedef typename vobj::scalar_object::scalar_type scalar_type;
|
|
||||||
GridBase *grid = Data.Grid();
|
|
||||||
assert(grid!=NULL);
|
|
||||||
|
|
||||||
const int Nd = grid->_ndimension;
|
|
||||||
const int Nsimd = grid->Nsimd();
|
|
||||||
|
|
||||||
assert(orthogdim >= 0);
|
|
||||||
assert(orthogdim < Nd);
|
|
||||||
|
|
||||||
int fd=grid->_fdimensions[orthogdim];
|
|
||||||
int ld=grid->_ldimensions[orthogdim];
|
|
||||||
int rd=grid->_rdimensions[orthogdim];
|
|
||||||
|
|
||||||
// Move to device memory and copy in / out
|
|
||||||
Vector<vobj> lvSum(rd); // will locally sum vectors first
|
|
||||||
Vector<sobj> lsSum(ld,Zero()); // sum across these down to scalars
|
|
||||||
ExtractBuffer<sobj> extracted(Nsimd); // splitting the SIMD
|
|
||||||
|
|
||||||
result.resize(fd); // And then global sum to return the same vector to every node
|
|
||||||
for(int r=0;r<rd;r++){
|
|
||||||
lvSum[r]=Zero();
|
|
||||||
}
|
|
||||||
|
|
||||||
int e1= grid->_slice_nblock[orthogdim];
|
|
||||||
int e2= grid->_slice_block [orthogdim];
|
|
||||||
int stride=grid->_slice_stride[orthogdim];
|
|
||||||
|
|
||||||
// sum over reduced dimension planes, breaking out orthog dir
|
|
||||||
// Parallel over orthog direction
|
|
||||||
autoView( Data_v, Data, AcceleratorRead);
|
|
||||||
auto lvSum_p=&lvSum[0];
|
|
||||||
int ostride = grid->_ostride[orthogdim];
|
|
||||||
accelerator_for( ree,rd*e1*e2,1, {
|
|
||||||
int b = ree%e2;
|
|
||||||
int re= ree/e2;
|
|
||||||
int n=re%e1;
|
|
||||||
int r=re/e1;
|
|
||||||
int so=r*ostride;
|
|
||||||
int ss=so+n*stride+b;
|
|
||||||
atomicAdd(&lvSum_p[r],Data_v[ss]);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Sum across simd lanes in the plane, breaking out orthog dir.
|
|
||||||
Coordinate icoor(Nd);
|
|
||||||
|
|
||||||
for(int rt=0;rt<rd;rt++){
|
|
||||||
|
|
||||||
extract(lvSum[rt],extracted);
|
|
||||||
|
|
||||||
for(int idx=0;idx<Nsimd;idx++){
|
|
||||||
|
|
||||||
grid->iCoorFromIindex(icoor,idx);
|
|
||||||
|
|
||||||
int ldx =rt+icoor[orthogdim]*rd;
|
|
||||||
|
|
||||||
lsSum[ldx]=lsSum[ldx]+extracted[idx];
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sum over nodes.
|
|
||||||
for(int t=0;t<fd;t++){
|
|
||||||
int pt = t/ld; // processor plane
|
|
||||||
int lt = t%ld;
|
|
||||||
if ( pt == grid->_processor_coor[orthogdim] ) {
|
|
||||||
result[t]=lsSum[lt];
|
|
||||||
} else {
|
|
||||||
result[t]=Zero();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
scalar_type * ptr = (scalar_type *) &result[0];
|
|
||||||
int words = fd*sizeof(sobj)/sizeof(scalar_type);
|
|
||||||
grid->GlobalSumVector(ptr, words);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -65,39 +65,31 @@ GridLogger GridLogSolver (1, "Solver", GridLogColours, "NORMAL");
|
|||||||
GridLogger GridLogError (1, "Error" , GridLogColours, "RED");
|
GridLogger GridLogError (1, "Error" , GridLogColours, "RED");
|
||||||
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
|
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
|
||||||
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
|
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
|
||||||
GridLogger GridLogMemory (1, "Memory", GridLogColours, "NORMAL");
|
|
||||||
GridLogger GridLogTracing(1, "Tracing", GridLogColours, "NORMAL");
|
|
||||||
GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
||||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
||||||
GridLogger GridLogDslash (1, "Dslash", GridLogColours, "BLUE");
|
|
||||||
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
||||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
||||||
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
||||||
|
|
||||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||||
GridLogError.Active(1);
|
GridLogError.Active(0);
|
||||||
GridLogWarning.Active(0);
|
GridLogWarning.Active(0);
|
||||||
GridLogMessage.Active(1); // at least the messages should be always on
|
GridLogMessage.Active(1); // at least the messages should be always on
|
||||||
GridLogMemory.Active(0);
|
|
||||||
GridLogTracing.Active(0);
|
|
||||||
GridLogIterative.Active(0);
|
GridLogIterative.Active(0);
|
||||||
GridLogDebug.Active(0);
|
GridLogDebug.Active(0);
|
||||||
GridLogPerformance.Active(0);
|
GridLogPerformance.Active(0);
|
||||||
GridLogDslash.Active(0);
|
|
||||||
GridLogIntegrator.Active(1);
|
GridLogIntegrator.Active(1);
|
||||||
GridLogColours.Active(0);
|
GridLogColours.Active(0);
|
||||||
GridLogHMC.Active(1);
|
GridLogHMC.Active(1);
|
||||||
|
|
||||||
for (int i = 0; i < logstreams.size(); i++) {
|
for (int i = 0; i < logstreams.size(); i++) {
|
||||||
if (logstreams[i] == std::string("Tracing")) GridLogTracing.Active(1);
|
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
|
||||||
if (logstreams[i] == std::string("Memory")) GridLogMemory.Active(1);
|
|
||||||
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
|
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
|
||||||
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
|
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
|
||||||
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
||||||
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
||||||
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
||||||
if (logstreams[i] == std::string("Dslash")) GridLogDslash.Active(1);
|
if (logstreams[i] == std::string("NoIntegrator")) GridLogIntegrator.Active(0);
|
||||||
if (logstreams[i] == std::string("NoIntegrator"))GridLogIntegrator.Active(0);
|
|
||||||
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
||||||
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
||||||
}
|
}
|
||||||
|
@ -138,8 +138,7 @@ public:
|
|||||||
stream << std::setw(log.topWidth);
|
stream << std::setw(log.topWidth);
|
||||||
}
|
}
|
||||||
stream << log.topName << log.background()<< " : ";
|
stream << log.topName << log.background()<< " : ";
|
||||||
// stream << log.colour() << std::left;
|
stream << log.colour() << std::left;
|
||||||
stream << std::left;
|
|
||||||
if (log.chanWidth > 0)
|
if (log.chanWidth > 0)
|
||||||
{
|
{
|
||||||
stream << std::setw(log.chanWidth);
|
stream << std::setw(log.chanWidth);
|
||||||
@ -154,9 +153,9 @@ public:
|
|||||||
stream << log.evidence()
|
stream << log.evidence()
|
||||||
<< now << log.background() << " : " ;
|
<< now << log.background() << " : " ;
|
||||||
}
|
}
|
||||||
// stream << log.colour();
|
stream << log.colour();
|
||||||
stream << std::right;
|
|
||||||
stream.flags(f);
|
stream.flags(f);
|
||||||
|
|
||||||
return stream;
|
return stream;
|
||||||
} else {
|
} else {
|
||||||
return devnull;
|
return devnull;
|
||||||
@ -181,12 +180,9 @@ extern GridLogger GridLogWarning;
|
|||||||
extern GridLogger GridLogMessage;
|
extern GridLogger GridLogMessage;
|
||||||
extern GridLogger GridLogDebug ;
|
extern GridLogger GridLogDebug ;
|
||||||
extern GridLogger GridLogPerformance;
|
extern GridLogger GridLogPerformance;
|
||||||
extern GridLogger GridLogDslash;
|
|
||||||
extern GridLogger GridLogIterative ;
|
extern GridLogger GridLogIterative ;
|
||||||
extern GridLogger GridLogIntegrator ;
|
extern GridLogger GridLogIntegrator ;
|
||||||
extern GridLogger GridLogHMC;
|
extern GridLogger GridLogHMC;
|
||||||
extern GridLogger GridLogMemory;
|
|
||||||
extern GridLogger GridLogTracing;
|
|
||||||
extern Colours GridLogColours;
|
extern Colours GridLogColours;
|
||||||
|
|
||||||
std::string demangle(const char* name) ;
|
std::string demangle(const char* name) ;
|
||||||
|
@ -31,7 +31,6 @@ directory
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <string>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
@ -655,8 +654,7 @@ class IldgWriter : public ScidacWriter {
|
|||||||
// Fill ILDG header data struct
|
// Fill ILDG header data struct
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
ildgFormat ildgfmt ;
|
ildgFormat ildgfmt ;
|
||||||
const std::string stNC = std::to_string( Nc ) ;
|
ildgfmt.field = std::string("su3gauge");
|
||||||
ildgfmt.field = std::string("su"+stNC+"gauge");
|
|
||||||
|
|
||||||
if ( format == std::string("IEEE32BIG") ) {
|
if ( format == std::string("IEEE32BIG") ) {
|
||||||
ildgfmt.precision = 32;
|
ildgfmt.precision = 32;
|
||||||
@ -873,8 +871,7 @@ class IldgReader : public GridLimeReader {
|
|||||||
} else {
|
} else {
|
||||||
|
|
||||||
assert(found_ildgFormat);
|
assert(found_ildgFormat);
|
||||||
const std::string stNC = std::to_string( Nc ) ;
|
assert ( ildgFormat_.field == std::string("su3gauge") );
|
||||||
assert ( ildgFormat_.field == std::string("su"+stNC+"gauge") );
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Populate our Grid metadata as best we can
|
// Populate our Grid metadata as best we can
|
||||||
@ -882,7 +879,7 @@ class IldgReader : public GridLimeReader {
|
|||||||
|
|
||||||
std::ostringstream vers; vers << ildgFormat_.version;
|
std::ostringstream vers; vers << ildgFormat_.version;
|
||||||
FieldMetaData_.hdr_version = vers.str();
|
FieldMetaData_.hdr_version = vers.str();
|
||||||
FieldMetaData_.data_type = std::string("4D_SU"+stNC+"_GAUGE_"+stNC+"x"+stNC);
|
FieldMetaData_.data_type = std::string("4D_SU3_GAUGE_3X3");
|
||||||
|
|
||||||
FieldMetaData_.nd=4;
|
FieldMetaData_.nd=4;
|
||||||
FieldMetaData_.dimension.resize(4);
|
FieldMetaData_.dimension.resize(4);
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
Copyright (C) 2015
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Jamie Hudspith <renwick.james.hudspth@gmail.com>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -182,8 +182,8 @@ class GaugeStatistics
|
|||||||
public:
|
public:
|
||||||
void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
|
void operator()(Lattice<vLorentzColourMatrixD> & data,FieldMetaData &header)
|
||||||
{
|
{
|
||||||
header.link_trace = WilsonLoops<Impl>::linkTrace(data);
|
header.link_trace=WilsonLoops<Impl>::linkTrace(data);
|
||||||
header.plaquette = WilsonLoops<Impl>::avgPlaquette(data);
|
header.plaquette =WilsonLoops<Impl>::avgPlaquette(data);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics;
|
typedef GaugeStatistics<PeriodicGimplD> PeriodicGaugeStatistics;
|
||||||
@ -203,24 +203,20 @@ template<> inline void PrepareMetaData<vLorentzColourMatrixD>(Lattice<vLorentzCo
|
|||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
inline void reconstruct3(LorentzColourMatrix & cm)
|
inline void reconstruct3(LorentzColourMatrix & cm)
|
||||||
{
|
{
|
||||||
assert( Nc < 4 && Nc > 1 ) ;
|
const int x=0;
|
||||||
|
const int y=1;
|
||||||
|
const int z=2;
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
#if Nc == 2
|
|
||||||
cm(mu)()(1,0) = -adj(cm(mu)()(0,y)) ;
|
|
||||||
cm(mu)()(1,1) = adj(cm(mu)()(0,x)) ;
|
|
||||||
#else
|
|
||||||
const int x=0 , y=1 , z=2 ; // a little disinenuous labelling
|
|
||||||
cm(mu)()(2,x) = adj(cm(mu)()(0,y)*cm(mu)()(1,z)-cm(mu)()(0,z)*cm(mu)()(1,y)); //x= yz-zy
|
cm(mu)()(2,x) = adj(cm(mu)()(0,y)*cm(mu)()(1,z)-cm(mu)()(0,z)*cm(mu)()(1,y)); //x= yz-zy
|
||||||
cm(mu)()(2,y) = adj(cm(mu)()(0,z)*cm(mu)()(1,x)-cm(mu)()(0,x)*cm(mu)()(1,z)); //y= zx-xz
|
cm(mu)()(2,y) = adj(cm(mu)()(0,z)*cm(mu)()(1,x)-cm(mu)()(0,x)*cm(mu)()(1,z)); //y= zx-xz
|
||||||
cm(mu)()(2,z) = adj(cm(mu)()(0,x)*cm(mu)()(1,y)-cm(mu)()(0,y)*cm(mu)()(1,x)); //z= xy-yx
|
cm(mu)()(2,z) = adj(cm(mu)()(0,x)*cm(mu)()(1,y)-cm(mu)()(0,y)*cm(mu)()(1,x)); //z= xy-yx
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// Some data types for intermediate storage
|
// Some data types for intermediate storage
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, Nc-1>, Nd >;
|
template<typename vtype> using iLorentzColour2x3 = iVector<iVector<iVector<vtype, Nc>, 2>, Nd >;
|
||||||
|
|
||||||
typedef iLorentzColour2x3<Complex> LorentzColour2x3;
|
typedef iLorentzColour2x3<Complex> LorentzColour2x3;
|
||||||
typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F;
|
typedef iLorentzColour2x3<ComplexF> LorentzColour2x3F;
|
||||||
@ -282,6 +278,7 @@ struct GaugeSimpleMunger{
|
|||||||
|
|
||||||
template <class fobj, class sobj>
|
template <class fobj, class sobj>
|
||||||
struct GaugeSimpleUnmunger {
|
struct GaugeSimpleUnmunger {
|
||||||
|
|
||||||
void operator()(sobj &in, fobj &out) {
|
void operator()(sobj &in, fobj &out) {
|
||||||
for (int mu = 0; mu < Nd; mu++) {
|
for (int mu = 0; mu < Nd; mu++) {
|
||||||
for (int i = 0; i < Nc; i++) {
|
for (int i = 0; i < Nc; i++) {
|
||||||
@ -320,8 +317,8 @@ template<class fobj,class sobj>
|
|||||||
struct Gauge3x2munger{
|
struct Gauge3x2munger{
|
||||||
void operator() (fobj &in,sobj &out){
|
void operator() (fobj &in,sobj &out){
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
for(int i=0;i<Nc-1;i++){
|
for(int i=0;i<2;i++){
|
||||||
for(int j=0;j<Nc;j++){
|
for(int j=0;j<3;j++){
|
||||||
out(mu)()(i,j) = in(mu)(i)(j);
|
out(mu)()(i,j) = in(mu)(i)(j);
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
@ -333,8 +330,8 @@ template<class fobj,class sobj>
|
|||||||
struct Gauge3x2unmunger{
|
struct Gauge3x2unmunger{
|
||||||
void operator() (sobj &in,fobj &out){
|
void operator() (sobj &in,fobj &out){
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
for(int i=0;i<Nc-1;i++){
|
for(int i=0;i<2;i++){
|
||||||
for(int j=0;j<Nc;j++){
|
for(int j=0;j<3;j++){
|
||||||
out(mu)(i)(j) = in(mu)()(i,j);
|
out(mu)(i)(j) = in(mu)()(i,j);
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
Author: Matt Spraggs <matthew.spraggs@gmail.com>
|
Author: Matt Spraggs <matthew.spraggs@gmail.com>
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Jamie Hudspith <renwick.james.hudspth@gmail.com>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -31,8 +30,6 @@
|
|||||||
#ifndef GRID_NERSC_IO_H
|
#ifndef GRID_NERSC_IO_H
|
||||||
#define GRID_NERSC_IO_H
|
#define GRID_NERSC_IO_H
|
||||||
|
|
||||||
#include <string>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
@ -148,17 +145,15 @@ public:
|
|||||||
|
|
||||||
std::string format(header.floating_point);
|
std::string format(header.floating_point);
|
||||||
|
|
||||||
const int ieee32big = (format == std::string("IEEE32BIG"));
|
int ieee32big = (format == std::string("IEEE32BIG"));
|
||||||
const int ieee32 = (format == std::string("IEEE32"));
|
int ieee32 = (format == std::string("IEEE32"));
|
||||||
const int ieee64big = (format == std::string("IEEE64BIG"));
|
int ieee64big = (format == std::string("IEEE64BIG"));
|
||||||
const int ieee64 = (format == std::string("IEEE64") || \
|
int ieee64 = (format == std::string("IEEE64") || format == std::string("IEEE64LITTLE"));
|
||||||
format == std::string("IEEE64LITTLE"));
|
|
||||||
|
|
||||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||||
// depending on datatype, set up munger;
|
// depending on datatype, set up munger;
|
||||||
// munger is a function of <floating point, Real, data_type>
|
// munger is a function of <floating point, Real, data_type>
|
||||||
const std::string stNC = std::to_string( Nc ) ;
|
if ( header.data_type == std::string("4D_SU3_GAUGE") ) {
|
||||||
if ( header.data_type == std::string("4D_SU"+stNC+"_GAUGE") ) {
|
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F>
|
BinaryIO::readLatticeObject<vLorentzColourMatrixD, LorentzColour2x3F>
|
||||||
(Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
|
(Umu,file,Gauge3x2munger<LorentzColour2x3F,LorentzColourMatrix>(), offset,format,
|
||||||
@ -169,7 +164,7 @@ public:
|
|||||||
(Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
|
(Umu,file,Gauge3x2munger<LorentzColour2x3D,LorentzColourMatrix>(),offset,format,
|
||||||
nersc_csum,scidac_csuma,scidac_csumb);
|
nersc_csum,scidac_csuma,scidac_csumb);
|
||||||
}
|
}
|
||||||
} else if ( header.data_type == std::string("4D_SU"+stNC+"_GAUGE_"+stNC+"x"+stNC) ) {
|
} else if ( header.data_type == std::string("4D_SU3_GAUGE_3x3") ) {
|
||||||
if ( ieee32 || ieee32big ) {
|
if ( ieee32 || ieee32big ) {
|
||||||
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF>
|
BinaryIO::readLatticeObject<vLorentzColourMatrixD,LorentzColourMatrixF>
|
||||||
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
|
(Umu,file,GaugeSimpleMunger<LorentzColourMatrixF,LorentzColourMatrix>(),offset,format,
|
||||||
@ -214,29 +209,27 @@ public:
|
|||||||
template<class GaugeStats=PeriodicGaugeStatistics>
|
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||||
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
||||||
std::string file,
|
std::string file,
|
||||||
std::string ens_label = std::string("DWF"),
|
std::string ens_label = std::string("DWF"))
|
||||||
std::string ens_id = std::string("UKQCD"),
|
|
||||||
unsigned int sequence_number = 1)
|
|
||||||
{
|
{
|
||||||
writeConfiguration(Umu,file,0,1,ens_label,ens_id,sequence_number);
|
writeConfiguration(Umu,file,0,1,ens_label);
|
||||||
}
|
}
|
||||||
template<class GaugeStats=PeriodicGaugeStatistics>
|
template<class GaugeStats=PeriodicGaugeStatistics>
|
||||||
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
static inline void writeConfiguration(Lattice<vLorentzColourMatrixD > &Umu,
|
||||||
std::string file,
|
std::string file,
|
||||||
int two_row,
|
int two_row,
|
||||||
int bits32,
|
int bits32,
|
||||||
std::string ens_label = std::string("DWF"),
|
std::string ens_label = std::string("DWF"))
|
||||||
std::string ens_id = std::string("UKQCD"),
|
|
||||||
unsigned int sequence_number = 1)
|
|
||||||
{
|
{
|
||||||
typedef vLorentzColourMatrixD vobj;
|
typedef vLorentzColourMatrixD vobj;
|
||||||
typedef typename vobj::scalar_object sobj;
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
|
||||||
FieldMetaData header;
|
FieldMetaData header;
|
||||||
header.sequence_number = sequence_number;
|
///////////////////////////////////////////
|
||||||
header.ensemble_id = ens_id;
|
// Following should become arguments
|
||||||
|
///////////////////////////////////////////
|
||||||
|
header.sequence_number = 1;
|
||||||
|
header.ensemble_id = std::string("UKQCD");
|
||||||
header.ensemble_label = ens_label;
|
header.ensemble_label = ens_label;
|
||||||
header.hdr_version = "1.0" ;
|
|
||||||
|
|
||||||
typedef LorentzColourMatrixD fobj3D;
|
typedef LorentzColourMatrixD fobj3D;
|
||||||
typedef LorentzColour2x3D fobj2D;
|
typedef LorentzColour2x3D fobj2D;
|
||||||
@ -250,14 +243,10 @@ public:
|
|||||||
|
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
|
|
||||||
// Sod it -- always write NcxNc double
|
// Sod it -- always write 3x3 double
|
||||||
header.floating_point = std::string("IEEE64BIG");
|
header.floating_point = std::string("IEEE64BIG");
|
||||||
const std::string stNC = std::to_string( Nc ) ;
|
header.data_type = std::string("4D_SU3_GAUGE_3x3");
|
||||||
if( two_row ) {
|
GaugeSimpleUnmunger<fobj3D,sobj> munge;
|
||||||
header.data_type = std::string("4D_SU" + stNC + "_GAUGE" );
|
|
||||||
} else {
|
|
||||||
header.data_type = std::string("4D_SU" + stNC + "_GAUGE_" + stNC + "x" + stNC );
|
|
||||||
}
|
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
truncate(file);
|
truncate(file);
|
||||||
offset = writeHeader(header,file);
|
offset = writeHeader(header,file);
|
||||||
@ -265,15 +254,8 @@ public:
|
|||||||
grid->Broadcast(0,(void *)&offset,sizeof(offset));
|
grid->Broadcast(0,(void *)&offset,sizeof(offset));
|
||||||
|
|
||||||
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
uint32_t nersc_csum,scidac_csuma,scidac_csumb;
|
||||||
if( two_row ) {
|
|
||||||
Gauge3x2unmunger<fobj2D,sobj> munge;
|
|
||||||
BinaryIO::writeLatticeObject<vobj,fobj2D>(Umu,file,munge,offset,header.floating_point,
|
|
||||||
nersc_csum,scidac_csuma,scidac_csumb);
|
|
||||||
} else {
|
|
||||||
GaugeSimpleUnmunger<fobj3D,sobj> munge;
|
|
||||||
BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
|
BinaryIO::writeLatticeObject<vobj,fobj3D>(Umu,file,munge,offset,header.floating_point,
|
||||||
nersc_csum,scidac_csuma,scidac_csumb);
|
nersc_csum,scidac_csuma,scidac_csumb);
|
||||||
}
|
|
||||||
header.checksum = nersc_csum;
|
header.checksum = nersc_csum;
|
||||||
if ( grid->IsBoss() ) {
|
if ( grid->IsBoss() ) {
|
||||||
writeHeader(header,file);
|
writeHeader(header,file);
|
||||||
@ -306,6 +288,7 @@ public:
|
|||||||
MachineCharacteristics(header);
|
MachineCharacteristics(header);
|
||||||
|
|
||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
|
|
||||||
#ifdef RNG_RANLUX
|
#ifdef RNG_RANLUX
|
||||||
header.floating_point = std::string("UINT64");
|
header.floating_point = std::string("UINT64");
|
||||||
header.data_type = std::string("RANLUX48");
|
header.data_type = std::string("RANLUX48");
|
||||||
|
@ -27,12 +27,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
|
|
||||||
#include <Grid/perfmon/Timer.h>
|
|
||||||
#include <Grid/perfmon/PerfCount.h>
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
GridTimePoint theProgramStart = GridClock::now();
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
#define CacheControl(L,O,R) ((PERF_COUNT_HW_CACHE_##L)|(PERF_COUNT_HW_CACHE_OP_##O<<8)| (PERF_COUNT_HW_CACHE_RESULT_##R<<16))
|
#define CacheControl(L,O,R) ((PERF_COUNT_HW_CACHE_##L)|(PERF_COUNT_HW_CACHE_OP_##O<<8)| (PERF_COUNT_HW_CACHE_RESULT_##R<<16))
|
||||||
#define RawConfig(A,B) (A<<8|B)
|
#define RawConfig(A,B) (A<<8|B)
|
||||||
|
@ -30,12 +30,6 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_PERFCOUNT_H
|
#ifndef GRID_PERFCOUNT_H
|
||||||
#define GRID_PERFCOUNT_H
|
#define GRID_PERFCOUNT_H
|
||||||
|
|
||||||
|
|
||||||
#ifndef __SSC_START
|
|
||||||
#define __SSC_START
|
|
||||||
#define __SSC_STOP
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@ -78,9 +72,17 @@ static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid,
|
|||||||
inline uint64_t cyclecount(void){
|
inline uint64_t cyclecount(void){
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
#define __SSC_MARK(mark) __asm__ __volatile__ ("movl %0, %%ebx; .byte 0x64, 0x67, 0x90 " ::"i"(mark):"%ebx")
|
||||||
|
#define __SSC_STOP __SSC_MARK(0x110)
|
||||||
|
#define __SSC_START __SSC_MARK(0x111)
|
||||||
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
#define __SSC_MARK(mark)
|
||||||
|
#define __SSC_STOP
|
||||||
|
#define __SSC_START
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cycle counters arch dependent
|
* cycle counters arch dependent
|
||||||
*/
|
*/
|
||||||
|
@ -35,8 +35,17 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid)
|
NAMESPACE_BEGIN(Grid)
|
||||||
|
|
||||||
//typedef std::chrono::system_clock GridClock;
|
// Dress the output; use std::chrono
|
||||||
typedef std::chrono::high_resolution_clock GridClock;
|
// C++11 time facilities better?
|
||||||
|
inline double usecond(void) {
|
||||||
|
struct timeval tv;
|
||||||
|
#ifdef TIMERS_ON
|
||||||
|
gettimeofday(&tv,NULL);
|
||||||
|
#endif
|
||||||
|
return 1.0*tv.tv_usec + 1.0e6*tv.tv_sec;
|
||||||
|
}
|
||||||
|
|
||||||
|
typedef std::chrono::system_clock GridClock;
|
||||||
typedef std::chrono::time_point<GridClock> GridTimePoint;
|
typedef std::chrono::time_point<GridClock> GridTimePoint;
|
||||||
|
|
||||||
typedef std::chrono::seconds GridSecs;
|
typedef std::chrono::seconds GridSecs;
|
||||||
@ -44,15 +53,6 @@ typedef std::chrono::milliseconds GridMillisecs;
|
|||||||
typedef std::chrono::microseconds GridUsecs;
|
typedef std::chrono::microseconds GridUsecs;
|
||||||
typedef std::chrono::microseconds GridTime;
|
typedef std::chrono::microseconds GridTime;
|
||||||
|
|
||||||
extern GridTimePoint theProgramStart;
|
|
||||||
// Dress the output; use std::chrono
|
|
||||||
// C++11 time facilities better?
|
|
||||||
inline double usecond(void) {
|
|
||||||
auto usecs = std::chrono::duration_cast<GridUsecs>(GridClock::now()-theProgramStart);
|
|
||||||
return 1.0*usecs.count();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
|
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
|
||||||
{
|
{
|
||||||
stream << time.count()<<" s";
|
stream << time.count()<<" s";
|
||||||
|
@ -1,70 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
#ifdef GRID_TRACING_NVTX
|
|
||||||
#include <nvToolsExt.h>
|
|
||||||
class GridTracer {
|
|
||||||
public:
|
|
||||||
GridTracer(const char* name) {
|
|
||||||
nvtxRangePushA(name);
|
|
||||||
}
|
|
||||||
~GridTracer() {
|
|
||||||
nvtxRangePop();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
inline void tracePush(const char *name) { nvtxRangePushA(name); }
|
|
||||||
inline void tracePop(const char *name) { nvtxRangePop(); }
|
|
||||||
inline int traceStart(const char *name) { }
|
|
||||||
inline void traceStop(int ID) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GRID_TRACING_ROCTX
|
|
||||||
#include <roctracer/roctx.h>
|
|
||||||
class GridTracer {
|
|
||||||
public:
|
|
||||||
GridTracer(const char* name) {
|
|
||||||
roctxRangePushA(name);
|
|
||||||
std::cout << "roctxRangePush "<<name<<std::endl;
|
|
||||||
}
|
|
||||||
~GridTracer() {
|
|
||||||
roctxRangePop();
|
|
||||||
std::cout << "roctxRangePop "<<std::endl;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
inline void tracePush(const char *name) { roctxRangePushA(name); }
|
|
||||||
inline void tracePop(const char *name) { roctxRangePop(); }
|
|
||||||
inline int traceStart(const char *name) { roctxRangeStart(name); }
|
|
||||||
inline void traceStop(int ID) { roctxRangeStop(ID); }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GRID_TRACING_TIMER
|
|
||||||
class GridTracer {
|
|
||||||
public:
|
|
||||||
const char *name;
|
|
||||||
double elapsed;
|
|
||||||
GridTracer(const char* _name) {
|
|
||||||
name = _name;
|
|
||||||
elapsed=-usecond();
|
|
||||||
}
|
|
||||||
~GridTracer() {
|
|
||||||
elapsed+=usecond();
|
|
||||||
std::cout << GridLogTracing << name << " took " <<elapsed<< " us" <<std::endl;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
inline void tracePush(const char *name) { }
|
|
||||||
inline void tracePop(const char *name) { }
|
|
||||||
inline int traceStart(const char *name) { return 0; }
|
|
||||||
inline void traceStop(int ID) { }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef GRID_TRACING_NONE
|
|
||||||
#define GRID_TRACE(name)
|
|
||||||
inline void tracePush(const char *name) { }
|
|
||||||
inline void tracePop(const char *name) { }
|
|
||||||
inline int traceStart(const char *name) { return 0; }
|
|
||||||
inline void traceStop(int ID) { }
|
|
||||||
#else
|
|
||||||
#define GRID_TRACE(name) GridTracer uniq_name_using_macros##__COUNTER__(name);
|
|
||||||
#endif
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -16,12 +16,8 @@
|
|||||||
|
|
||||||
#ifdef __NVCC__
|
#ifdef __NVCC__
|
||||||
#pragma push
|
#pragma push
|
||||||
#if (__CUDACC_VER_MAJOR__ >= 11) && (__CUDACC_VER_MINOR__ >= 5)
|
|
||||||
#pragma nv_diag_suppress declared_but_not_referenced // suppress "function was declared but never referenced warning"
|
|
||||||
#else
|
|
||||||
#pragma diag_suppress declared_but_not_referenced // suppress "function was declared but never referenced warning"
|
#pragma diag_suppress declared_but_not_referenced // suppress "function was declared but never referenced warning"
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "pugixml.h"
|
#include "pugixml.h"
|
||||||
|
|
||||||
|
@ -451,20 +451,9 @@ template<class vobj> void pokeLorentz(vobj &lhs,const decltype(peekIndex<Lorentz
|
|||||||
// Fermion <-> propagator assignements
|
// Fermion <-> propagator assignements
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
//template <class Prop, class Ferm>
|
//template <class Prop, class Ferm>
|
||||||
#define FAST_FERM_TO_PROP
|
|
||||||
template <class Fimpl>
|
template <class Fimpl>
|
||||||
void FermToProp(typename Fimpl::PropagatorField &p, const typename Fimpl::FermionField &f, const int s, const int c)
|
void FermToProp(typename Fimpl::PropagatorField &p, const typename Fimpl::FermionField &f, const int s, const int c)
|
||||||
{
|
{
|
||||||
#ifdef FAST_FERM_TO_PROP
|
|
||||||
autoView(p_v,p,CpuWrite);
|
|
||||||
autoView(f_v,f,CpuRead);
|
|
||||||
thread_for(idx,p_v.oSites(),{
|
|
||||||
for(int ss = 0; ss < Ns; ++ss) {
|
|
||||||
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
|
||||||
p_v[idx]()(ss,s)(cc,c) = f_v[idx]()(ss)(cc); // Propagator sink index is LEFT, suitable for left mult by gauge link (e.g.)
|
|
||||||
}}
|
|
||||||
});
|
|
||||||
#else
|
|
||||||
for(int j = 0; j < Ns; ++j)
|
for(int j = 0; j < Ns; ++j)
|
||||||
{
|
{
|
||||||
auto pjs = peekSpin(p, j, s);
|
auto pjs = peekSpin(p, j, s);
|
||||||
@ -476,23 +465,12 @@ void FermToProp(typename Fimpl::PropagatorField &p, const typename Fimpl::Fermio
|
|||||||
}
|
}
|
||||||
pokeSpin(p, pjs, j, s);
|
pokeSpin(p, pjs, j, s);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//template <class Prop, class Ferm>
|
//template <class Prop, class Ferm>
|
||||||
template <class Fimpl>
|
template <class Fimpl>
|
||||||
void PropToFerm(typename Fimpl::FermionField &f, const typename Fimpl::PropagatorField &p, const int s, const int c)
|
void PropToFerm(typename Fimpl::FermionField &f, const typename Fimpl::PropagatorField &p, const int s, const int c)
|
||||||
{
|
{
|
||||||
#ifdef FAST_FERM_TO_PROP
|
|
||||||
autoView(p_v,p,CpuRead);
|
|
||||||
autoView(f_v,f,CpuWrite);
|
|
||||||
thread_for(idx,p_v.oSites(),{
|
|
||||||
for(int ss = 0; ss < Ns; ++ss) {
|
|
||||||
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
|
||||||
f_v[idx]()(ss)(cc) = p_v[idx]()(ss,s)(cc,c); // LEFT index is copied across for s,c right index
|
|
||||||
}}
|
|
||||||
});
|
|
||||||
#else
|
|
||||||
for(int j = 0; j < Ns; ++j)
|
for(int j = 0; j < Ns; ++j)
|
||||||
{
|
{
|
||||||
auto pjs = peekSpin(p, j, s);
|
auto pjs = peekSpin(p, j, s);
|
||||||
@ -504,7 +482,6 @@ void PropToFerm(typename Fimpl::FermionField &f, const typename Fimpl::Propagato
|
|||||||
}
|
}
|
||||||
pokeSpin(f, fj, j);
|
pokeSpin(f, fj, j);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
|
@ -40,6 +40,29 @@ class Action
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
bool is_smeared = false;
|
bool is_smeared = false;
|
||||||
|
RealD deriv_norm_sum;
|
||||||
|
RealD deriv_max_sum;
|
||||||
|
int deriv_num;
|
||||||
|
RealD deriv_us;
|
||||||
|
RealD S_us;
|
||||||
|
RealD refresh_us;
|
||||||
|
void reset_timer(void) {
|
||||||
|
deriv_us = S_us = refresh_us = 0.0;
|
||||||
|
deriv_num=0;
|
||||||
|
deriv_norm_sum = deriv_max_sum=0.0;
|
||||||
|
}
|
||||||
|
void deriv_log(RealD nrm, RealD max) { deriv_max_sum+=max; deriv_norm_sum+=nrm; deriv_num++;}
|
||||||
|
RealD deriv_max_average(void) { return deriv_max_sum/deriv_num; };
|
||||||
|
RealD deriv_norm_average(void) { return deriv_norm_sum/deriv_num; };
|
||||||
|
RealD deriv_timer(void) { return deriv_us; };
|
||||||
|
RealD S_timer(void) { return deriv_us; };
|
||||||
|
RealD refresh_timer(void) { return deriv_us; };
|
||||||
|
void deriv_timer_start(void) { deriv_us-=usecond(); }
|
||||||
|
void deriv_timer_stop(void) { deriv_us+=usecond(); }
|
||||||
|
void refresh_timer_start(void) { refresh_us-=usecond(); }
|
||||||
|
void refresh_timer_stop(void) { refresh_us+=usecond(); }
|
||||||
|
void S_timer_start(void) { S_us-=usecond(); }
|
||||||
|
void S_timer_stop(void) { S_us+=usecond(); }
|
||||||
// Heatbath?
|
// Heatbath?
|
||||||
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
|
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) = 0; // refresh pseudofermions
|
||||||
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
|
virtual RealD S(const GaugeField& U) = 0; // evaluate the action
|
||||||
|
@ -37,6 +37,10 @@ NAMESPACE_CHECK(ActionSet);
|
|||||||
#include <Grid/qcd/action/ActionParams.h>
|
#include <Grid/qcd/action/ActionParams.h>
|
||||||
NAMESPACE_CHECK(ActionParams);
|
NAMESPACE_CHECK(ActionParams);
|
||||||
|
|
||||||
|
#include <Grid/qcd/action/filters/MomentumFilter.h>
|
||||||
|
#include <Grid/qcd/action/filters/DirichletFilter.h>
|
||||||
|
#include <Grid/qcd/action/filters/DDHMCFilter.h>
|
||||||
|
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
// Gauge Actions
|
// Gauge Actions
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
|
@ -68,16 +68,9 @@ public:
|
|||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
// Support for MADWF tricks
|
// Support for MADWF tricks
|
||||||
///////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////
|
||||||
RealD Mass(void) { return (mass_plus + mass_minus) / 2.0; };
|
RealD Mass(void) { return mass; };
|
||||||
RealD MassPlus(void) { return mass_plus; };
|
|
||||||
RealD MassMinus(void) { return mass_minus; };
|
|
||||||
void SetMass(RealD _mass) {
|
void SetMass(RealD _mass) {
|
||||||
mass_plus=mass_minus=_mass;
|
mass=_mass;
|
||||||
SetCoefficientsInternal(_zolo_hi,_gamma,_b,_c); // Reset coeffs
|
|
||||||
} ;
|
|
||||||
void SetMass(RealD _mass_plus, RealD _mass_minus) {
|
|
||||||
mass_plus=_mass_plus;
|
|
||||||
mass_minus=_mass_minus;
|
|
||||||
SetCoefficientsInternal(_zolo_hi,_gamma,_b,_c); // Reset coeffs
|
SetCoefficientsInternal(_zolo_hi,_gamma,_b,_c); // Reset coeffs
|
||||||
} ;
|
} ;
|
||||||
void P(const FermionField &psi, FermionField &chi);
|
void P(const FermionField &psi, FermionField &chi);
|
||||||
@ -115,7 +108,7 @@ public:
|
|||||||
void MeooeDag5D (const FermionField &in, FermionField &out);
|
void MeooeDag5D (const FermionField &in, FermionField &out);
|
||||||
|
|
||||||
// protected:
|
// protected:
|
||||||
RealD mass_plus, mass_minus;
|
RealD mass;
|
||||||
|
|
||||||
// Save arguments to SetCoefficientsInternal
|
// Save arguments to SetCoefficientsInternal
|
||||||
Vector<Coeff_t> _gamma;
|
Vector<Coeff_t> _gamma;
|
||||||
|
@ -1,333 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonCloverFermionImplementation.h
|
|
||||||
|
|
||||||
Copyright (C) 2017 - 2022
|
|
||||||
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
|
||||||
Author: Mattia Bruno <mattia.bruno@cern.ch>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <Grid/Grid.h>
|
|
||||||
#include <Grid/qcd/spin/Dirac.h>
|
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
|
||||||
|
|
||||||
////////////////////////////////////////////
|
|
||||||
// Standard Clover
|
|
||||||
// (4+m0) + csw * clover_term
|
|
||||||
// Exp Clover
|
|
||||||
// (4+m0) * exp(csw/(4+m0) clover_term)
|
|
||||||
// = (4+m0) + csw * clover_term + ...
|
|
||||||
////////////////////////////////////////////
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////
|
|
||||||
// Generic Standard Clover
|
|
||||||
//////////////////////////////////
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
class CloverHelpers: public WilsonCloverHelpers<Impl> {
|
|
||||||
public:
|
|
||||||
|
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
|
||||||
INHERIT_CLOVER_TYPES(Impl);
|
|
||||||
|
|
||||||
typedef WilsonCloverHelpers<Impl> Helpers;
|
|
||||||
|
|
||||||
static void Instantiate(CloverField& CloverTerm, CloverField& CloverTermInv, RealD csw_t, RealD diag_mass) {
|
|
||||||
GridBase *grid = CloverTerm.Grid();
|
|
||||||
CloverTerm += diag_mass;
|
|
||||||
|
|
||||||
int lvol = grid->lSites();
|
|
||||||
int DimRep = Impl::Dimension;
|
|
||||||
{
|
|
||||||
autoView(CTv,CloverTerm,CpuRead);
|
|
||||||
autoView(CTIv,CloverTermInv,CpuWrite);
|
|
||||||
thread_for(site, lvol, {
|
|
||||||
Coordinate lcoor;
|
|
||||||
grid->LocalIndexToLocalCoor(site, lcoor);
|
|
||||||
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
|
||||||
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
|
||||||
typename SiteClover::scalar_object Qx = Zero(), Qxinv = Zero();
|
|
||||||
peekLocalSite(Qx, CTv, lcoor);
|
|
||||||
|
|
||||||
for (int j = 0; j < Ns; j++)
|
|
||||||
for (int k = 0; k < Ns; k++)
|
|
||||||
for (int a = 0; a < DimRep; a++)
|
|
||||||
for (int b = 0; b < DimRep; b++){
|
|
||||||
auto zz = Qx()(j, k)(a, b);
|
|
||||||
EigenCloverOp(a + j * DimRep, b + k * DimRep) = std::complex<double>(zz);
|
|
||||||
}
|
|
||||||
|
|
||||||
EigenInvCloverOp = EigenCloverOp.inverse();
|
|
||||||
for (int j = 0; j < Ns; j++)
|
|
||||||
for (int k = 0; k < Ns; k++)
|
|
||||||
for (int a = 0; a < DimRep; a++)
|
|
||||||
for (int b = 0; b < DimRep; b++)
|
|
||||||
Qxinv()(j, k)(a, b) = EigenInvCloverOp(a + j * DimRep, b + k * DimRep);
|
|
||||||
pokeLocalSite(Qxinv, CTIv, lcoor);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu) {
|
|
||||||
return Helpers::Cmunu(U, lambda, mu, nu);
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////
|
|
||||||
// Generic Exp Clover
|
|
||||||
//////////////////////////////////
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
class ExpCloverHelpers: public WilsonCloverHelpers<Impl> {
|
|
||||||
public:
|
|
||||||
|
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
|
||||||
INHERIT_CLOVER_TYPES(Impl);
|
|
||||||
|
|
||||||
template <typename vtype> using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
|
||||||
typedef WilsonCloverHelpers<Impl> Helpers;
|
|
||||||
|
|
||||||
// Can this be avoided?
|
|
||||||
static void IdentityTimesC(const CloverField& in, RealD c) {
|
|
||||||
int DimRep = Impl::Dimension;
|
|
||||||
|
|
||||||
autoView(in_v, in, AcceleratorWrite);
|
|
||||||
|
|
||||||
accelerator_for(ss, in.Grid()->oSites(), 1, {
|
|
||||||
for (int sa=0; sa<Ns; sa++)
|
|
||||||
for (int ca=0; ca<DimRep; ca++)
|
|
||||||
in_v[ss]()(sa,sa)(ca,ca) = c;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
static int getNMAX(RealD prec, RealD R) {
|
|
||||||
/* compute stop condition for exponential */
|
|
||||||
int NMAX=1;
|
|
||||||
RealD cond=R*R/2.;
|
|
||||||
|
|
||||||
while (cond*std::exp(R)>prec) {
|
|
||||||
NMAX++;
|
|
||||||
cond*=R/(double)(NMAX+1);
|
|
||||||
}
|
|
||||||
return NMAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int getNMAX(Lattice<iImplClover<vComplexD>> &t, RealD R) {return getNMAX(1e-12,R);}
|
|
||||||
static int getNMAX(Lattice<iImplClover<vComplexF>> &t, RealD R) {return getNMAX(1e-6,R);}
|
|
||||||
|
|
||||||
static void Instantiate(CloverField& Clover, CloverField& CloverInv, RealD csw_t, RealD diag_mass) {
|
|
||||||
GridBase* grid = Clover.Grid();
|
|
||||||
CloverField ExpClover(grid);
|
|
||||||
|
|
||||||
int NMAX = getNMAX(Clover, 3.*csw_t/diag_mass);
|
|
||||||
|
|
||||||
Clover *= (1.0/diag_mass);
|
|
||||||
|
|
||||||
// Taylor expansion, slow but generic
|
|
||||||
// Horner scheme: a0 + a1 x + a2 x^2 + .. = a0 + x (a1 + x(...))
|
|
||||||
// qN = cN
|
|
||||||
// qn = cn + qn+1 X
|
|
||||||
std::vector<RealD> cn(NMAX+1);
|
|
||||||
cn[0] = 1.0;
|
|
||||||
for (int i=1; i<=NMAX; i++)
|
|
||||||
cn[i] = cn[i-1] / RealD(i);
|
|
||||||
|
|
||||||
ExpClover = Zero();
|
|
||||||
IdentityTimesC(ExpClover, cn[NMAX]);
|
|
||||||
for (int i=NMAX-1; i>=0; i--)
|
|
||||||
ExpClover = ExpClover * Clover + cn[i];
|
|
||||||
|
|
||||||
// prepare inverse
|
|
||||||
CloverInv = (-1.0)*Clover;
|
|
||||||
|
|
||||||
Clover = ExpClover * diag_mass;
|
|
||||||
|
|
||||||
ExpClover = Zero();
|
|
||||||
IdentityTimesC(ExpClover, cn[NMAX]);
|
|
||||||
for (int i=NMAX-1; i>=0; i--)
|
|
||||||
ExpClover = ExpClover * CloverInv + cn[i];
|
|
||||||
|
|
||||||
CloverInv = ExpClover * (1.0/diag_mass);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu) {
|
|
||||||
assert(0);
|
|
||||||
return lambda;
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////
|
|
||||||
// Compact Standard Clover
|
|
||||||
//////////////////////////////////
|
|
||||||
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
class CompactCloverHelpers: public CompactWilsonCloverHelpers<Impl>,
|
|
||||||
public WilsonCloverHelpers<Impl> {
|
|
||||||
public:
|
|
||||||
|
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
|
||||||
INHERIT_CLOVER_TYPES(Impl);
|
|
||||||
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
|
||||||
|
|
||||||
typedef WilsonCloverHelpers<Impl> Helpers;
|
|
||||||
typedef CompactWilsonCloverHelpers<Impl> CompactHelpers;
|
|
||||||
|
|
||||||
static void InstantiateClover(CloverField& Clover, CloverField& CloverInv, RealD csw_t, RealD diag_mass) {
|
|
||||||
Clover += diag_mass;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void InvertClover(CloverField& InvClover,
|
|
||||||
const CloverDiagonalField& diagonal,
|
|
||||||
const CloverTriangleField& triangle,
|
|
||||||
CloverDiagonalField& diagonalInv,
|
|
||||||
CloverTriangleField& triangleInv,
|
|
||||||
bool fixedBoundaries) {
|
|
||||||
|
|
||||||
CompactHelpers::Invert(diagonal, triangle, diagonalInv, triangleInv);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: implement Cmunu for better performances with compact layout, but don't do it
|
|
||||||
// here, but rather in WilsonCloverHelpers.h -> CompactWilsonCloverHelpers
|
|
||||||
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu) {
|
|
||||||
return Helpers::Cmunu(U, lambda, mu, nu);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
//////////////////////////////////
|
|
||||||
// Compact Exp Clover
|
|
||||||
//////////////////////////////////
|
|
||||||
|
|
||||||
template<class Impl>
|
|
||||||
class CompactExpCloverHelpers: public CompactWilsonCloverHelpers<Impl> {
|
|
||||||
public:
|
|
||||||
|
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
|
||||||
INHERIT_CLOVER_TYPES(Impl);
|
|
||||||
INHERIT_COMPACT_CLOVER_TYPES(Impl);
|
|
||||||
|
|
||||||
template <typename vtype> using iImplClover = iScalar<iMatrix<iMatrix<vtype, Impl::Dimension>, Ns>>;
|
|
||||||
typedef CompactWilsonCloverHelpers<Impl> CompactHelpers;
|
|
||||||
|
|
||||||
// Can this be avoided?
|
|
||||||
static void IdentityTimesC(const CloverField& in, RealD c) {
|
|
||||||
int DimRep = Impl::Dimension;
|
|
||||||
|
|
||||||
autoView(in_v, in, AcceleratorWrite);
|
|
||||||
|
|
||||||
accelerator_for(ss, in.Grid()->oSites(), 1, {
|
|
||||||
for (int sa=0; sa<Ns; sa++)
|
|
||||||
for (int ca=0; ca<DimRep; ca++)
|
|
||||||
in_v[ss]()(sa,sa)(ca,ca) = c;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
static int getNMAX(RealD prec, RealD R) {
|
|
||||||
/* compute stop condition for exponential */
|
|
||||||
int NMAX=1;
|
|
||||||
RealD cond=R*R/2.;
|
|
||||||
|
|
||||||
while (cond*std::exp(R)>prec) {
|
|
||||||
NMAX++;
|
|
||||||
cond*=R/(double)(NMAX+1);
|
|
||||||
}
|
|
||||||
return NMAX;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int getNMAX(Lattice<iImplClover<vComplexD>> &t, RealD R) {return getNMAX(1e-12,R);}
|
|
||||||
static int getNMAX(Lattice<iImplClover<vComplexF>> &t, RealD R) {return getNMAX(1e-6,R);}
|
|
||||||
|
|
||||||
static void InstantiateClover(CloverField& Clover, CloverField& CloverInv, RealD csw_t, RealD diag_mass) {
|
|
||||||
|
|
||||||
GridBase* grid = Clover.Grid();
|
|
||||||
CloverField ExpClover(grid);
|
|
||||||
|
|
||||||
int NMAX = getNMAX(Clover, 3.*csw_t/diag_mass);
|
|
||||||
|
|
||||||
Clover *= (1.0/diag_mass);
|
|
||||||
|
|
||||||
// Taylor expansion, slow but generic
|
|
||||||
// Horner scheme: a0 + a1 x + a2 x^2 + .. = a0 + x (a1 + x(...))
|
|
||||||
// qN = cN
|
|
||||||
// qn = cn + qn+1 X
|
|
||||||
std::vector<RealD> cn(NMAX+1);
|
|
||||||
cn[0] = 1.0;
|
|
||||||
for (int i=1; i<=NMAX; i++)
|
|
||||||
cn[i] = cn[i-1] / RealD(i);
|
|
||||||
|
|
||||||
ExpClover = Zero();
|
|
||||||
IdentityTimesC(ExpClover, cn[NMAX]);
|
|
||||||
for (int i=NMAX-1; i>=0; i--)
|
|
||||||
ExpClover = ExpClover * Clover + cn[i];
|
|
||||||
|
|
||||||
// prepare inverse
|
|
||||||
CloverInv = (-1.0)*Clover;
|
|
||||||
|
|
||||||
Clover = ExpClover * diag_mass;
|
|
||||||
|
|
||||||
ExpClover = Zero();
|
|
||||||
IdentityTimesC(ExpClover, cn[NMAX]);
|
|
||||||
for (int i=NMAX-1; i>=0; i--)
|
|
||||||
ExpClover = ExpClover * CloverInv + cn[i];
|
|
||||||
|
|
||||||
CloverInv = ExpClover * (1.0/diag_mass);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void InvertClover(CloverField& InvClover,
|
|
||||||
const CloverDiagonalField& diagonal,
|
|
||||||
const CloverTriangleField& triangle,
|
|
||||||
CloverDiagonalField& diagonalInv,
|
|
||||||
CloverTriangleField& triangleInv,
|
|
||||||
bool fixedBoundaries) {
|
|
||||||
|
|
||||||
if (fixedBoundaries)
|
|
||||||
{
|
|
||||||
CompactHelpers::Invert(diagonal, triangle, diagonalInv, triangleInv);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
CompactHelpers::ConvertLayout(InvClover, diagonalInv, triangleInv);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static GaugeLinkField Cmunu(std::vector<GaugeLinkField> &U, GaugeLinkField &lambda, int mu, int nu) {
|
|
||||||
assert(0);
|
|
||||||
return lambda;
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
|
@ -31,7 +31,6 @@
|
|||||||
|
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||||
#include <Grid/qcd/action/fermion/CloverHelpers.h>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -86,7 +85,7 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
// + (2 * 1 + 4 * 1/2) triangle parts = 4 triangle parts = 60 complex words per site
|
// + (2 * 1 + 4 * 1/2) triangle parts = 4 triangle parts = 60 complex words per site
|
||||||
// = 84 complex words per site
|
// = 84 complex words per site
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
class CompactWilsonCloverFermion : public WilsonFermion<Impl>,
|
class CompactWilsonCloverFermion : public WilsonFermion<Impl>,
|
||||||
public WilsonCloverHelpers<Impl>,
|
public WilsonCloverHelpers<Impl>,
|
||||||
public CompactWilsonCloverHelpers<Impl> {
|
public CompactWilsonCloverHelpers<Impl> {
|
||||||
@ -225,7 +224,7 @@ public:
|
|||||||
RealD csw_t;
|
RealD csw_t;
|
||||||
RealD cF;
|
RealD cF;
|
||||||
|
|
||||||
bool fixedBoundaries;
|
bool open_boundaries;
|
||||||
|
|
||||||
CloverDiagonalField Diagonal, DiagonalEven, DiagonalOdd;
|
CloverDiagonalField Diagonal, DiagonalEven, DiagonalOdd;
|
||||||
CloverDiagonalField DiagonalInv, DiagonalInvEven, DiagonalInvOdd;
|
CloverDiagonalField DiagonalInv, DiagonalInvEven, DiagonalInvOdd;
|
||||||
|
@ -138,52 +138,38 @@ typedef WilsonTMFermion<WilsonImplF> WilsonTMFermionF;
|
|||||||
typedef WilsonTMFermion<WilsonImplD> WilsonTMFermionD;
|
typedef WilsonTMFermion<WilsonImplD> WilsonTMFermionD;
|
||||||
|
|
||||||
// Clover fermions
|
// Clover fermions
|
||||||
template <typename WImpl> using WilsonClover = WilsonCloverFermion<WImpl, CloverHelpers<WImpl>>;
|
typedef WilsonCloverFermion<WilsonImplR> WilsonCloverFermionR;
|
||||||
template <typename WImpl> using WilsonExpClover = WilsonCloverFermion<WImpl, ExpCloverHelpers<WImpl>>;
|
typedef WilsonCloverFermion<WilsonImplF> WilsonCloverFermionF;
|
||||||
|
typedef WilsonCloverFermion<WilsonImplD> WilsonCloverFermionD;
|
||||||
|
|
||||||
typedef WilsonClover<WilsonImplR> WilsonCloverFermionR;
|
typedef WilsonCloverFermion<WilsonAdjImplR> WilsonCloverAdjFermionR;
|
||||||
typedef WilsonClover<WilsonImplF> WilsonCloverFermionF;
|
typedef WilsonCloverFermion<WilsonAdjImplF> WilsonCloverAdjFermionF;
|
||||||
typedef WilsonClover<WilsonImplD> WilsonCloverFermionD;
|
typedef WilsonCloverFermion<WilsonAdjImplD> WilsonCloverAdjFermionD;
|
||||||
|
|
||||||
typedef WilsonExpClover<WilsonImplR> WilsonExpCloverFermionR;
|
typedef WilsonCloverFermion<WilsonTwoIndexSymmetricImplR> WilsonCloverTwoIndexSymmetricFermionR;
|
||||||
typedef WilsonExpClover<WilsonImplF> WilsonExpCloverFermionF;
|
typedef WilsonCloverFermion<WilsonTwoIndexSymmetricImplF> WilsonCloverTwoIndexSymmetricFermionF;
|
||||||
typedef WilsonExpClover<WilsonImplD> WilsonExpCloverFermionD;
|
typedef WilsonCloverFermion<WilsonTwoIndexSymmetricImplD> WilsonCloverTwoIndexSymmetricFermionD;
|
||||||
|
|
||||||
typedef WilsonClover<WilsonAdjImplR> WilsonCloverAdjFermionR;
|
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> WilsonCloverTwoIndexAntiSymmetricFermionR;
|
||||||
typedef WilsonClover<WilsonAdjImplF> WilsonCloverAdjFermionF;
|
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||||
typedef WilsonClover<WilsonAdjImplD> WilsonCloverAdjFermionD;
|
typedef WilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||||
|
|
||||||
typedef WilsonClover<WilsonTwoIndexSymmetricImplR> WilsonCloverTwoIndexSymmetricFermionR;
|
|
||||||
typedef WilsonClover<WilsonTwoIndexSymmetricImplF> WilsonCloverTwoIndexSymmetricFermionF;
|
|
||||||
typedef WilsonClover<WilsonTwoIndexSymmetricImplD> WilsonCloverTwoIndexSymmetricFermionD;
|
|
||||||
|
|
||||||
typedef WilsonClover<WilsonTwoIndexAntiSymmetricImplR> WilsonCloverTwoIndexAntiSymmetricFermionR;
|
|
||||||
typedef WilsonClover<WilsonTwoIndexAntiSymmetricImplF> WilsonCloverTwoIndexAntiSymmetricFermionF;
|
|
||||||
typedef WilsonClover<WilsonTwoIndexAntiSymmetricImplD> WilsonCloverTwoIndexAntiSymmetricFermionD;
|
|
||||||
|
|
||||||
// Compact Clover fermions
|
// Compact Clover fermions
|
||||||
template <typename WImpl> using CompactWilsonClover = CompactWilsonCloverFermion<WImpl, CompactCloverHelpers<WImpl>>;
|
typedef CompactWilsonCloverFermion<WilsonImplR> CompactWilsonCloverFermionR;
|
||||||
template <typename WImpl> using CompactWilsonExpClover = CompactWilsonCloverFermion<WImpl, CompactExpCloverHelpers<WImpl>>;
|
typedef CompactWilsonCloverFermion<WilsonImplF> CompactWilsonCloverFermionF;
|
||||||
|
typedef CompactWilsonCloverFermion<WilsonImplD> CompactWilsonCloverFermionD;
|
||||||
|
|
||||||
typedef CompactWilsonClover<WilsonImplR> CompactWilsonCloverFermionR;
|
typedef CompactWilsonCloverFermion<WilsonAdjImplR> CompactWilsonCloverAdjFermionR;
|
||||||
typedef CompactWilsonClover<WilsonImplF> CompactWilsonCloverFermionF;
|
typedef CompactWilsonCloverFermion<WilsonAdjImplF> CompactWilsonCloverAdjFermionF;
|
||||||
typedef CompactWilsonClover<WilsonImplD> CompactWilsonCloverFermionD;
|
typedef CompactWilsonCloverFermion<WilsonAdjImplD> CompactWilsonCloverAdjFermionD;
|
||||||
|
|
||||||
typedef CompactWilsonExpClover<WilsonImplR> CompactWilsonExpCloverFermionR;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplR> CompactWilsonCloverTwoIndexSymmetricFermionR;
|
||||||
typedef CompactWilsonExpClover<WilsonImplF> CompactWilsonExpCloverFermionF;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplF> CompactWilsonCloverTwoIndexSymmetricFermionF;
|
||||||
typedef CompactWilsonExpClover<WilsonImplD> CompactWilsonExpCloverFermionD;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexSymmetricImplD> CompactWilsonCloverTwoIndexSymmetricFermionD;
|
||||||
|
|
||||||
typedef CompactWilsonClover<WilsonAdjImplR> CompactWilsonCloverAdjFermionR;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplR> CompactWilsonCloverTwoIndexAntiSymmetricFermionR;
|
||||||
typedef CompactWilsonClover<WilsonAdjImplF> CompactWilsonCloverAdjFermionF;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplF> CompactWilsonCloverTwoIndexAntiSymmetricFermionF;
|
||||||
typedef CompactWilsonClover<WilsonAdjImplD> CompactWilsonCloverAdjFermionD;
|
typedef CompactWilsonCloverFermion<WilsonTwoIndexAntiSymmetricImplD> CompactWilsonCloverTwoIndexAntiSymmetricFermionD;
|
||||||
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexSymmetricImplR> CompactWilsonCloverTwoIndexSymmetricFermionR;
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexSymmetricImplF> CompactWilsonCloverTwoIndexSymmetricFermionF;
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexSymmetricImplD> CompactWilsonCloverTwoIndexSymmetricFermionD;
|
|
||||||
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexAntiSymmetricImplR> CompactWilsonCloverTwoIndexAntiSymmetricFermionR;
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexAntiSymmetricImplF> CompactWilsonCloverTwoIndexAntiSymmetricFermionF;
|
|
||||||
typedef CompactWilsonClover<WilsonTwoIndexAntiSymmetricImplD> CompactWilsonCloverTwoIndexAntiSymmetricFermionD;
|
|
||||||
|
|
||||||
// Domain Wall fermions
|
// Domain Wall fermions
|
||||||
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
typedef DomainWallFermion<WilsonImplR> DomainWallFermionR;
|
||||||
|
@ -49,6 +49,8 @@ public:
|
|||||||
|
|
||||||
virtual FermionField &tmp(void) = 0;
|
virtual FermionField &tmp(void) = 0;
|
||||||
|
|
||||||
|
virtual void DirichletBlock(Coordinate & _Block) { assert(0); };
|
||||||
|
|
||||||
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
|
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
|
||||||
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };
|
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };
|
||||||
|
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
|
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverTypes.h>
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverHelpers.h>
|
||||||
#include <Grid/qcd/action/fermion/CloverHelpers.h>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -52,7 +51,7 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
// csw_r = csw_t to recover the isotropic version
|
// csw_r = csw_t to recover the isotropic version
|
||||||
//////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
class WilsonCloverFermion : public WilsonFermion<Impl>,
|
class WilsonCloverFermion : public WilsonFermion<Impl>,
|
||||||
public WilsonCloverHelpers<Impl>
|
public WilsonCloverHelpers<Impl>
|
||||||
{
|
{
|
||||||
|
@ -209,8 +209,6 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template<class Impl> class CompactWilsonCloverHelpers {
|
template<class Impl> class CompactWilsonCloverHelpers {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -47,6 +47,8 @@ class CompactWilsonCloverTypes {
|
|||||||
public:
|
public:
|
||||||
INHERIT_IMPL_TYPES(Impl);
|
INHERIT_IMPL_TYPES(Impl);
|
||||||
|
|
||||||
|
static_assert(Nd == 4 && Nc == 3 && Ns == 4 && Impl::Dimension == 3, "Wrong dimensions");
|
||||||
|
|
||||||
static constexpr int Nred = Nc * Nhs; // 6
|
static constexpr int Nred = Nc * Nhs; // 6
|
||||||
static constexpr int Nblock = Nhs; // 2
|
static constexpr int Nblock = Nhs; // 2
|
||||||
static constexpr int Ndiagonal = Nred; // 6
|
static constexpr int Ndiagonal = Nred; // 6
|
||||||
|
@ -117,19 +117,19 @@ public:
|
|||||||
typedef decltype(coalescedRead(*in)) sobj;
|
typedef decltype(coalescedRead(*in)) sobj;
|
||||||
typedef decltype(coalescedRead(*out0)) hsobj;
|
typedef decltype(coalescedRead(*out0)) hsobj;
|
||||||
|
|
||||||
constexpr unsigned int Nsimd = vobj::Nsimd();
|
unsigned int Nsimd = vobj::Nsimd();
|
||||||
unsigned int mask = Nsimd >> (type + 1);
|
unsigned int mask = Nsimd >> (type + 1);
|
||||||
int lane = acceleratorSIMTlane(Nsimd);
|
int lane = acceleratorSIMTlane(Nsimd);
|
||||||
int j0 = lane &(~mask); // inner coor zero
|
int j0 = lane &(~mask); // inner coor zero
|
||||||
int j1 = lane |(mask) ; // inner coor one
|
int j1 = lane |(mask) ; // inner coor one
|
||||||
const vobj *vp0 = &in[k]; // out0[j] = merge low bit of type from in[k] and in[m]
|
const vobj *vp0 = &in[k];
|
||||||
const vobj *vp1 = &in[m]; // out1[j] = merge hi bit of type from in[k] and in[m]
|
const vobj *vp1 = &in[m];
|
||||||
const vobj *vp = (lane&mask) ? vp1:vp0;// if my lane has high bit take vp1, low bit take vp0
|
const vobj *vp = (lane&mask) ? vp1:vp0;
|
||||||
auto sa = coalescedRead(*vp,j0); // lane to read for out 0, NB 50% read coalescing
|
auto sa = coalescedRead(*vp,j0);
|
||||||
auto sb = coalescedRead(*vp,j1); // lane to read for out 1
|
auto sb = coalescedRead(*vp,j1);
|
||||||
hsobj psa, psb;
|
hsobj psa, psb;
|
||||||
projector::Proj(psa,sa,mu,dag); // spin project the result0
|
projector::Proj(psa,sa,mu,dag);
|
||||||
projector::Proj(psb,sb,mu,dag); // spin project the result1
|
projector::Proj(psb,sb,mu,dag);
|
||||||
coalescedWrite(out0[j],psa);
|
coalescedWrite(out0[j],psa);
|
||||||
coalescedWrite(out1[j],psb);
|
coalescedWrite(out1[j],psb);
|
||||||
#else
|
#else
|
||||||
|
@ -75,6 +75,10 @@ public:
|
|||||||
FermionField _tmp;
|
FermionField _tmp;
|
||||||
FermionField &tmp(void) { return _tmp; }
|
FermionField &tmp(void) { return _tmp; }
|
||||||
|
|
||||||
|
int Dirichlet;
|
||||||
|
Coordinate Block;
|
||||||
|
|
||||||
|
/********** Deprecate timers **********/
|
||||||
void Report(void);
|
void Report(void);
|
||||||
void ZeroCounters(void);
|
void ZeroCounters(void);
|
||||||
double DhopCalls;
|
double DhopCalls;
|
||||||
@ -174,6 +178,17 @@ public:
|
|||||||
GridRedBlackCartesian &FourDimRedBlackGrid,
|
GridRedBlackCartesian &FourDimRedBlackGrid,
|
||||||
double _M5,const ImplParams &p= ImplParams());
|
double _M5,const ImplParams &p= ImplParams());
|
||||||
|
|
||||||
|
virtual void DirichletBlock(Coordinate & block)
|
||||||
|
{
|
||||||
|
assert(block.size()==Nd+1);
|
||||||
|
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
|
||||||
|
Dirichlet = 1;
|
||||||
|
Block = block;
|
||||||
|
Stencil.DirichletBlock(block);
|
||||||
|
StencilEven.DirichletBlock(block);
|
||||||
|
StencilOdd.DirichletBlock(block);
|
||||||
|
}
|
||||||
|
}
|
||||||
// Constructors
|
// Constructors
|
||||||
/*
|
/*
|
||||||
WilsonFermion5D(int simd,
|
WilsonFermion5D(int simd,
|
||||||
|
@ -47,7 +47,7 @@ CayleyFermion5D<Impl>::CayleyFermion5D(GaugeField &_Umu,
|
|||||||
FiveDimRedBlackGrid,
|
FiveDimRedBlackGrid,
|
||||||
FourDimGrid,
|
FourDimGrid,
|
||||||
FourDimRedBlackGrid,_M5,p),
|
FourDimRedBlackGrid,_M5,p),
|
||||||
mass_plus(_mass), mass_minus(_mass)
|
mass(_mass)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,8 +209,8 @@ void CayleyFermion5D<Impl>::M5D (const FermionField &psi, FermionField &chi)
|
|||||||
{
|
{
|
||||||
int Ls=this->Ls;
|
int Ls=this->Ls;
|
||||||
Vector<Coeff_t> diag (Ls,1.0);
|
Vector<Coeff_t> diag (Ls,1.0);
|
||||||
Vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1]=mass_minus;
|
Vector<Coeff_t> upper(Ls,-1.0); upper[Ls-1]=mass;
|
||||||
Vector<Coeff_t> lower(Ls,-1.0); lower[0] =mass_plus;
|
Vector<Coeff_t> lower(Ls,-1.0); lower[0] =mass;
|
||||||
M5D(psi,chi,chi,lower,diag,upper);
|
M5D(psi,chi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
@ -220,8 +220,8 @@ void CayleyFermion5D<Impl>::Meooe5D (const FermionField &psi, FermionField &D
|
|||||||
Vector<Coeff_t> diag = bs;
|
Vector<Coeff_t> diag = bs;
|
||||||
Vector<Coeff_t> upper= cs;
|
Vector<Coeff_t> upper= cs;
|
||||||
Vector<Coeff_t> lower= cs;
|
Vector<Coeff_t> lower= cs;
|
||||||
upper[Ls-1]=-mass_minus*upper[Ls-1];
|
upper[Ls-1]=-mass*upper[Ls-1];
|
||||||
lower[0] =-mass_plus*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5D(psi,psi,Din,lower,diag,upper);
|
M5D(psi,psi,Din,lower,diag,upper);
|
||||||
}
|
}
|
||||||
// FIXME Redunant with the above routine; check this and eliminate
|
// FIXME Redunant with the above routine; check this and eliminate
|
||||||
@ -235,8 +235,8 @@ template<class Impl> void CayleyFermion5D<Impl>::Meo5D (const FermionField &
|
|||||||
upper[i]=-ceo[i];
|
upper[i]=-ceo[i];
|
||||||
lower[i]=-ceo[i];
|
lower[i]=-ceo[i];
|
||||||
}
|
}
|
||||||
upper[Ls-1]=-mass_minus*upper[Ls-1];
|
upper[Ls-1]=-mass*upper[Ls-1];
|
||||||
lower[0] =-mass_plus*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5D(psi,psi,chi,lower,diag,upper);
|
M5D(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
@ -250,8 +250,8 @@ void CayleyFermion5D<Impl>::Mooee (const FermionField &psi, FermionField &
|
|||||||
upper[i]=-cee[i];
|
upper[i]=-cee[i];
|
||||||
lower[i]=-cee[i];
|
lower[i]=-cee[i];
|
||||||
}
|
}
|
||||||
upper[Ls-1]=-mass_minus*upper[Ls-1];
|
upper[Ls-1]=-mass*upper[Ls-1];
|
||||||
lower[0] =-mass_plus*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5D(psi,psi,chi,lower,diag,upper);
|
M5D(psi,psi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
template<class Impl>
|
template<class Impl>
|
||||||
@ -266,9 +266,9 @@ void CayleyFermion5D<Impl>::MooeeDag (const FermionField &psi, FermionField &
|
|||||||
// Assemble the 5d matrix
|
// Assemble the 5d matrix
|
||||||
if ( s==0 ) {
|
if ( s==0 ) {
|
||||||
upper[s] = -cee[s+1] ;
|
upper[s] = -cee[s+1] ;
|
||||||
lower[s] = mass_minus*cee[Ls-1];
|
lower[s] = mass*cee[Ls-1];
|
||||||
} else if ( s==(Ls-1)) {
|
} else if ( s==(Ls-1)) {
|
||||||
upper[s] = mass_plus*cee[0];
|
upper[s] = mass*cee[0];
|
||||||
lower[s] = -cee[s-1];
|
lower[s] = -cee[s-1];
|
||||||
} else {
|
} else {
|
||||||
upper[s]=-cee[s+1];
|
upper[s]=-cee[s+1];
|
||||||
@ -291,8 +291,8 @@ void CayleyFermion5D<Impl>::M5Ddag (const FermionField &psi, FermionField &chi)
|
|||||||
Vector<Coeff_t> diag(Ls,1.0);
|
Vector<Coeff_t> diag(Ls,1.0);
|
||||||
Vector<Coeff_t> upper(Ls,-1.0);
|
Vector<Coeff_t> upper(Ls,-1.0);
|
||||||
Vector<Coeff_t> lower(Ls,-1.0);
|
Vector<Coeff_t> lower(Ls,-1.0);
|
||||||
upper[Ls-1]=-mass_plus*upper[Ls-1];
|
upper[Ls-1]=-mass*upper[Ls-1];
|
||||||
lower[0] =-mass_minus*lower[0];
|
lower[0] =-mass*lower[0];
|
||||||
M5Ddag(psi,chi,chi,lower,diag,upper);
|
M5Ddag(psi,chi,chi,lower,diag,upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -307,9 +307,9 @@ void CayleyFermion5D<Impl>::MeooeDag5D (const FermionField &psi, FermionField
|
|||||||
for (int s=0;s<Ls;s++){
|
for (int s=0;s<Ls;s++){
|
||||||
if ( s== 0 ) {
|
if ( s== 0 ) {
|
||||||
upper[s] = cs[s+1];
|
upper[s] = cs[s+1];
|
||||||
lower[s] =-mass_minus*cs[Ls-1];
|
lower[s] =-mass*cs[Ls-1];
|
||||||
} else if ( s==(Ls-1) ) {
|
} else if ( s==(Ls-1) ) {
|
||||||
upper[s] =-mass_plus*cs[0];
|
upper[s] =-mass*cs[0];
|
||||||
lower[s] = cs[s-1];
|
lower[s] = cs[s-1];
|
||||||
} else {
|
} else {
|
||||||
upper[s] = cs[s+1];
|
upper[s] = cs[s+1];
|
||||||
@ -552,7 +552,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,Vector<Coeff_t
|
|||||||
|
|
||||||
lee[i] =-cee[i+1]/bee[i]; // sub-diag entry on the ith column
|
lee[i] =-cee[i+1]/bee[i]; // sub-diag entry on the ith column
|
||||||
|
|
||||||
leem[i]=mass_minus*cee[Ls-1]/bee[0];
|
leem[i]=mass*cee[Ls-1]/bee[0];
|
||||||
for(int j=0;j<i;j++) {
|
for(int j=0;j<i;j++) {
|
||||||
assert(bee[j+1]!=Coeff_t(0.0));
|
assert(bee[j+1]!=Coeff_t(0.0));
|
||||||
leem[i]*= aee[j]/bee[j+1];
|
leem[i]*= aee[j]/bee[j+1];
|
||||||
@ -560,7 +560,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,Vector<Coeff_t
|
|||||||
|
|
||||||
uee[i] =-aee[i]/bee[i]; // up-diag entry on the ith row
|
uee[i] =-aee[i]/bee[i]; // up-diag entry on the ith row
|
||||||
|
|
||||||
ueem[i]=mass_plus;
|
ueem[i]=mass;
|
||||||
for(int j=1;j<=i;j++) ueem[i]*= cee[j]/bee[j];
|
for(int j=1;j<=i;j++) ueem[i]*= cee[j]/bee[j];
|
||||||
ueem[i]*= aee[0]/bee[0];
|
ueem[i]*= aee[0]/bee[0];
|
||||||
|
|
||||||
@ -573,7 +573,7 @@ void CayleyFermion5D<Impl>::SetCoefficientsInternal(RealD zolo_hi,Vector<Coeff_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Coeff_t delta_d=mass_minus*cee[Ls-1];
|
Coeff_t delta_d=mass*cee[Ls-1];
|
||||||
for(int j=0;j<Ls-1;j++) {
|
for(int j=0;j<Ls-1;j++) {
|
||||||
assert(bee[j] != Coeff_t(0.0));
|
assert(bee[j] != Coeff_t(0.0));
|
||||||
delta_d *= cee[j]/bee[j];
|
delta_d *= cee[j]/bee[j];
|
||||||
@ -642,10 +642,6 @@ void CayleyFermion5D<Impl>::ContractConservedCurrent( PropagatorField &q_in_1,
|
|||||||
Current curr_type,
|
Current curr_type,
|
||||||
unsigned int mu)
|
unsigned int mu)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(mass_plus == mass_minus);
|
|
||||||
RealD mass = mass_plus;
|
|
||||||
|
|
||||||
#if (!defined(GRID_HIP))
|
#if (!defined(GRID_HIP))
|
||||||
Gamma::Algebra Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::Algebra::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
@ -781,8 +777,6 @@ void CayleyFermion5D<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
|||||||
assert(mu>=0);
|
assert(mu>=0);
|
||||||
assert(mu<Nd);
|
assert(mu<Nd);
|
||||||
|
|
||||||
assert(mass_plus == mass_minus);
|
|
||||||
RealD mass = mass_plus;
|
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
int tshift = (mu == Nd-1) ? 1 : 0;
|
int tshift = (mu == Nd-1) ? 1 : 0;
|
||||||
|
@ -32,10 +32,9 @@
|
|||||||
#include <Grid/qcd/spin/Dirac.h>
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||||
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
CompactWilsonCloverFermion<Impl, CloverHelpers>::CompactWilsonCloverFermion(GaugeField& _Umu,
|
CompactWilsonCloverFermion<Impl>::CompactWilsonCloverFermion(GaugeField& _Umu,
|
||||||
GridCartesian& Fgrid,
|
GridCartesian& Fgrid,
|
||||||
GridRedBlackCartesian& Hgrid,
|
GridRedBlackCartesian& Hgrid,
|
||||||
const RealD _mass,
|
const RealD _mass,
|
||||||
@ -48,7 +47,7 @@ CompactWilsonCloverFermion<Impl, CloverHelpers>::CompactWilsonCloverFermion(Gaug
|
|||||||
, csw_r(_csw_r)
|
, csw_r(_csw_r)
|
||||||
, csw_t(_csw_t)
|
, csw_t(_csw_t)
|
||||||
, cF(_cF)
|
, cF(_cF)
|
||||||
, fixedBoundaries(impl_p.boundary_phases[Nd-1] == 0.0)
|
, open_boundaries(impl_p.boundary_phases[Nd-1] == 0.0)
|
||||||
, Diagonal(&Fgrid), Triangle(&Fgrid)
|
, Diagonal(&Fgrid), Triangle(&Fgrid)
|
||||||
, DiagonalEven(&Hgrid), TriangleEven(&Hgrid)
|
, DiagonalEven(&Hgrid), TriangleEven(&Hgrid)
|
||||||
, DiagonalOdd(&Hgrid), TriangleOdd(&Hgrid)
|
, DiagonalOdd(&Hgrid), TriangleOdd(&Hgrid)
|
||||||
@ -59,85 +58,80 @@ CompactWilsonCloverFermion<Impl, CloverHelpers>::CompactWilsonCloverFermion(Gaug
|
|||||||
, BoundaryMask(&Fgrid)
|
, BoundaryMask(&Fgrid)
|
||||||
, BoundaryMaskEven(&Hgrid), BoundaryMaskOdd(&Hgrid)
|
, BoundaryMaskEven(&Hgrid), BoundaryMaskOdd(&Hgrid)
|
||||||
{
|
{
|
||||||
assert(Nd == 4 && Nc == 3 && Ns == 4 && Impl::Dimension == 3);
|
|
||||||
|
|
||||||
csw_r *= 0.5;
|
csw_r *= 0.5;
|
||||||
csw_t *= 0.5;
|
csw_t *= 0.5;
|
||||||
if (clover_anisotropy.isAnisotropic)
|
if (clover_anisotropy.isAnisotropic)
|
||||||
csw_r /= clover_anisotropy.xi_0;
|
csw_r /= clover_anisotropy.xi_0;
|
||||||
|
|
||||||
ImportGauge(_Umu);
|
ImportGauge(_Umu);
|
||||||
if (fixedBoundaries) {
|
if (open_boundaries)
|
||||||
this->BoundaryMaskEven.Checkerboard() = Even;
|
|
||||||
this->BoundaryMaskOdd.Checkerboard() = Odd;
|
|
||||||
CompactHelpers::SetupMasks(this->BoundaryMask, this->BoundaryMaskEven, this->BoundaryMaskOdd);
|
CompactHelpers::SetupMasks(this->BoundaryMask, this->BoundaryMaskEven, this->BoundaryMaskOdd);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::Dhop(const FermionField& in, FermionField& out, int dag) {
|
void CompactWilsonCloverFermion<Impl>::Dhop(const FermionField& in, FermionField& out, int dag) {
|
||||||
WilsonBase::Dhop(in, out, dag);
|
WilsonBase::Dhop(in, out, dag);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::DhopOE(const FermionField& in, FermionField& out, int dag) {
|
void CompactWilsonCloverFermion<Impl>::DhopOE(const FermionField& in, FermionField& out, int dag) {
|
||||||
WilsonBase::DhopOE(in, out, dag);
|
WilsonBase::DhopOE(in, out, dag);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::DhopEO(const FermionField& in, FermionField& out, int dag) {
|
void CompactWilsonCloverFermion<Impl>::DhopEO(const FermionField& in, FermionField& out, int dag) {
|
||||||
WilsonBase::DhopEO(in, out, dag);
|
WilsonBase::DhopEO(in, out, dag);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::DhopDir(const FermionField& in, FermionField& out, int dir, int disp) {
|
void CompactWilsonCloverFermion<Impl>::DhopDir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||||
WilsonBase::DhopDir(in, out, dir, disp);
|
WilsonBase::DhopDir(in, out, dir, disp);
|
||||||
if(this->fixedBoundaries) ApplyBoundaryMask(out);
|
if(this->open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::DhopDirAll(const FermionField& in, std::vector<FermionField>& out) {
|
void CompactWilsonCloverFermion<Impl>::DhopDirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||||
WilsonBase::DhopDirAll(in, out);
|
WilsonBase::DhopDirAll(in, out);
|
||||||
if(this->fixedBoundaries) {
|
if(this->open_boundaries) {
|
||||||
for(auto& o : out) ApplyBoundaryMask(o);
|
for(auto& o : out) ApplyBoundaryMask(o);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::M(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::M(const FermionField& in, FermionField& out) {
|
||||||
out.Checkerboard() = in.Checkerboard();
|
out.Checkerboard() = in.Checkerboard();
|
||||||
WilsonBase::Dhop(in, out, DaggerNo); // call base to save applying bc
|
WilsonBase::Dhop(in, out, DaggerNo); // call base to save applying bc
|
||||||
Mooee(in, Tmp);
|
Mooee(in, Tmp);
|
||||||
axpy(out, 1.0, out, Tmp);
|
axpy(out, 1.0, out, Tmp);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::Mdag(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::Mdag(const FermionField& in, FermionField& out) {
|
||||||
out.Checkerboard() = in.Checkerboard();
|
out.Checkerboard() = in.Checkerboard();
|
||||||
WilsonBase::Dhop(in, out, DaggerYes); // call base to save applying bc
|
WilsonBase::Dhop(in, out, DaggerYes); // call base to save applying bc
|
||||||
MooeeDag(in, Tmp);
|
MooeeDag(in, Tmp);
|
||||||
axpy(out, 1.0, out, Tmp);
|
axpy(out, 1.0, out, Tmp);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::Meooe(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::Meooe(const FermionField& in, FermionField& out) {
|
||||||
WilsonBase::Meooe(in, out);
|
WilsonBase::Meooe(in, out);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MeooeDag(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::MeooeDag(const FermionField& in, FermionField& out) {
|
||||||
WilsonBase::MeooeDag(in, out);
|
WilsonBase::MeooeDag(in, out);
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::Mooee(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::Mooee(const FermionField& in, FermionField& out) {
|
||||||
if(in.Grid()->_isCheckerBoarded) {
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
if(in.Checkerboard() == Odd) {
|
if(in.Checkerboard() == Odd) {
|
||||||
MooeeInternal(in, out, DiagonalOdd, TriangleOdd);
|
MooeeInternal(in, out, DiagonalOdd, TriangleOdd);
|
||||||
@ -147,16 +141,16 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::Mooee(const FermionField&
|
|||||||
} else {
|
} else {
|
||||||
MooeeInternal(in, out, Diagonal, Triangle);
|
MooeeInternal(in, out, Diagonal, Triangle);
|
||||||
}
|
}
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeDag(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::MooeeDag(const FermionField& in, FermionField& out) {
|
||||||
Mooee(in, out); // blocks are hermitian
|
Mooee(in, out); // blocks are hermitian
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeInv(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::MooeeInv(const FermionField& in, FermionField& out) {
|
||||||
if(in.Grid()->_isCheckerBoarded) {
|
if(in.Grid()->_isCheckerBoarded) {
|
||||||
if(in.Checkerboard() == Odd) {
|
if(in.Checkerboard() == Odd) {
|
||||||
MooeeInternal(in, out, DiagonalInvOdd, TriangleInvOdd);
|
MooeeInternal(in, out, DiagonalInvOdd, TriangleInvOdd);
|
||||||
@ -166,27 +160,27 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeInv(const FermionFiel
|
|||||||
} else {
|
} else {
|
||||||
MooeeInternal(in, out, DiagonalInv, TriangleInv);
|
MooeeInternal(in, out, DiagonalInv, TriangleInv);
|
||||||
}
|
}
|
||||||
if(fixedBoundaries) ApplyBoundaryMask(out);
|
if(open_boundaries) ApplyBoundaryMask(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeInvDag(const FermionField& in, FermionField& out) {
|
void CompactWilsonCloverFermion<Impl>::MooeeInvDag(const FermionField& in, FermionField& out) {
|
||||||
MooeeInv(in, out); // blocks are hermitian
|
MooeeInv(in, out); // blocks are hermitian
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::Mdir(const FermionField& in, FermionField& out, int dir, int disp) {
|
void CompactWilsonCloverFermion<Impl>::Mdir(const FermionField& in, FermionField& out, int dir, int disp) {
|
||||||
DhopDir(in, out, dir, disp);
|
DhopDir(in, out, dir, disp);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MdirAll(const FermionField& in, std::vector<FermionField>& out) {
|
void CompactWilsonCloverFermion<Impl>::MdirAll(const FermionField& in, std::vector<FermionField>& out) {
|
||||||
DhopDirAll(in, out);
|
DhopDirAll(in, out);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) {
|
void CompactWilsonCloverFermion<Impl>::MDeriv(GaugeField& force, const FermionField& X, const FermionField& Y, int dag) {
|
||||||
assert(!fixedBoundaries); // TODO check for changes required for open bc
|
assert(!open_boundaries); // TODO check for changes required for open bc
|
||||||
|
|
||||||
// NOTE: code copied from original clover term
|
// NOTE: code copied from original clover term
|
||||||
conformable(X.Grid(), Y.Grid());
|
conformable(X.Grid(), Y.Grid());
|
||||||
@ -257,7 +251,7 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField& force,
|
|||||||
}
|
}
|
||||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||||
force_mu -= factor*CloverHelpers::Cmunu(U, lambda, mu, nu); // checked
|
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,18 +261,18 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField& force,
|
|||||||
force += clover_force;
|
force += clover_force;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
void CompactWilsonCloverFermion<Impl>::MooDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
void CompactWilsonCloverFermion<Impl>::MeeDeriv(GaugeField& mat, const FermionField& U, const FermionField& V, int dag) {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeInternal(const FermionField& in,
|
void CompactWilsonCloverFermion<Impl>::MooeeInternal(const FermionField& in,
|
||||||
FermionField& out,
|
FermionField& out,
|
||||||
const CloverDiagonalField& diagonal,
|
const CloverDiagonalField& diagonal,
|
||||||
const CloverTriangleField& triangle) {
|
const CloverTriangleField& triangle) {
|
||||||
@ -291,8 +285,8 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::MooeeInternal(const Fermio
|
|||||||
CompactHelpers::MooeeKernel(diagonal.oSites(), 1, in, out, diagonal, triangle);
|
CompactHelpers::MooeeKernel(diagonal.oSites(), 1, in, out, diagonal, triangle);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
void CompactWilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeField& _Umu) {
|
void CompactWilsonCloverFermion<Impl>::ImportGauge(const GaugeField& _Umu) {
|
||||||
// NOTE: parts copied from original implementation
|
// NOTE: parts copied from original implementation
|
||||||
|
|
||||||
// Import gauge into base class
|
// Import gauge into base class
|
||||||
@ -305,7 +299,6 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeFie
|
|||||||
GridBase* grid = _Umu.Grid();
|
GridBase* grid = _Umu.Grid();
|
||||||
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
typename Impl::GaugeLinkField Bx(grid), By(grid), Bz(grid), Ex(grid), Ey(grid), Ez(grid);
|
||||||
CloverField TmpOriginal(grid);
|
CloverField TmpOriginal(grid);
|
||||||
CloverField TmpInverse(grid);
|
|
||||||
|
|
||||||
// Compute the field strength terms mu>nu
|
// Compute the field strength terms mu>nu
|
||||||
double t2 = usecond();
|
double t2 = usecond();
|
||||||
@ -325,30 +318,22 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeFie
|
|||||||
TmpOriginal += Helpers::fillCloverXT(Ex) * csw_t;
|
TmpOriginal += Helpers::fillCloverXT(Ex) * csw_t;
|
||||||
TmpOriginal += Helpers::fillCloverYT(Ey) * csw_t;
|
TmpOriginal += Helpers::fillCloverYT(Ey) * csw_t;
|
||||||
TmpOriginal += Helpers::fillCloverZT(Ez) * csw_t;
|
TmpOriginal += Helpers::fillCloverZT(Ez) * csw_t;
|
||||||
|
TmpOriginal += this->diag_mass;
|
||||||
// Instantiate the clover term
|
|
||||||
// - In case of the standard clover the mass term is added
|
|
||||||
// - In case of the exponential clover the clover term is exponentiated
|
|
||||||
double t4 = usecond();
|
|
||||||
CloverHelpers::InstantiateClover(TmpOriginal, TmpInverse, csw_t, this->diag_mass);
|
|
||||||
|
|
||||||
// Convert the data layout of the clover term
|
// Convert the data layout of the clover term
|
||||||
double t5 = usecond();
|
double t4 = usecond();
|
||||||
CompactHelpers::ConvertLayout(TmpOriginal, Diagonal, Triangle);
|
CompactHelpers::ConvertLayout(TmpOriginal, Diagonal, Triangle);
|
||||||
|
|
||||||
// Modify the clover term at the temporal boundaries in case of open boundary conditions
|
// Possible modify the boundary values
|
||||||
double t6 = usecond();
|
double t5 = usecond();
|
||||||
if(fixedBoundaries) CompactHelpers::ModifyBoundaries(Diagonal, Triangle, csw_t, cF, this->diag_mass);
|
if(open_boundaries) CompactHelpers::ModifyBoundaries(Diagonal, Triangle, csw_t, cF, this->diag_mass);
|
||||||
|
|
||||||
// Invert the Clover term
|
// Invert the clover term in the improved layout
|
||||||
// In case of the exponential clover with (anti-)periodic boundary conditions exp(-Clover) saved
|
double t6 = usecond();
|
||||||
// in TmpInverse can be used. In all other cases the clover term has to be explictly inverted.
|
CompactHelpers::Invert(Diagonal, Triangle, DiagonalInv, TriangleInv);
|
||||||
// TODO: For now this inversion is explictly done on the CPU
|
|
||||||
double t7 = usecond();
|
|
||||||
CloverHelpers::InvertClover(TmpInverse, Diagonal, Triangle, DiagonalInv, TriangleInv, fixedBoundaries);
|
|
||||||
|
|
||||||
// Fill the remaining clover fields
|
// Fill the remaining clover fields
|
||||||
double t8 = usecond();
|
double t7 = usecond();
|
||||||
pickCheckerboard(Even, DiagonalEven, Diagonal);
|
pickCheckerboard(Even, DiagonalEven, Diagonal);
|
||||||
pickCheckerboard(Even, TriangleEven, Triangle);
|
pickCheckerboard(Even, TriangleEven, Triangle);
|
||||||
pickCheckerboard(Odd, DiagonalOdd, Diagonal);
|
pickCheckerboard(Odd, DiagonalOdd, Diagonal);
|
||||||
@ -359,19 +344,20 @@ void CompactWilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeFie
|
|||||||
pickCheckerboard(Odd, TriangleInvOdd, TriangleInv);
|
pickCheckerboard(Odd, TriangleInvOdd, TriangleInv);
|
||||||
|
|
||||||
// Report timings
|
// Report timings
|
||||||
double t9 = usecond();
|
double t8 = usecond();
|
||||||
|
#if 0
|
||||||
std::cout << GridLogDebug << "CompactWilsonCloverFermion::ImportGauge timings:" << std::endl;
|
std::cout << GridLogMessage << "CompactWilsonCloverFermion::ImportGauge timings:"
|
||||||
std::cout << GridLogDebug << "WilsonFermion::Importgauge = " << (t1 - t0) / 1e6 << std::endl;
|
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||||
std::cout << GridLogDebug << "allocations = " << (t2 - t1) / 1e6 << std::endl;
|
<< ", allocations = " << (t2 - t1) / 1e6
|
||||||
std::cout << GridLogDebug << "field strength = " << (t3 - t2) / 1e6 << std::endl;
|
<< ", field strength = " << (t3 - t2) / 1e6
|
||||||
std::cout << GridLogDebug << "fill clover = " << (t4 - t3) / 1e6 << std::endl;
|
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||||
std::cout << GridLogDebug << "instantiate clover = " << (t5 - t4) / 1e6 << std::endl;
|
<< ", convert = " << (t5 - t4) / 1e6
|
||||||
std::cout << GridLogDebug << "convert layout = " << (t6 - t5) / 1e6 << std::endl;
|
<< ", boundaries = " << (t6 - t5) / 1e6
|
||||||
std::cout << GridLogDebug << "modify boundaries = " << (t7 - t6) / 1e6 << std::endl;
|
<< ", inversions = " << (t7 - t6) / 1e6
|
||||||
std::cout << GridLogDebug << "invert clover = " << (t8 - t7) / 1e6 << std::endl;
|
<< ", pick cbs = " << (t8 - t7) / 1e6
|
||||||
std::cout << GridLogDebug << "pick cbs = " << (t9 - t8) / 1e6 << std::endl;
|
<< ", total = " << (t8 - t0) / 1e6
|
||||||
std::cout << GridLogDebug << "total = " << (t9 - t0) / 1e6 << std::endl;
|
<< std::endl;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -34,8 +34,8 @@
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template<class Impl>
|
||||||
WilsonCloverFermion<Impl, CloverHelpers>::WilsonCloverFermion(GaugeField& _Umu,
|
WilsonCloverFermion<Impl>::WilsonCloverFermion(GaugeField& _Umu,
|
||||||
GridCartesian& Fgrid,
|
GridCartesian& Fgrid,
|
||||||
GridRedBlackCartesian& Hgrid,
|
GridRedBlackCartesian& Hgrid,
|
||||||
const RealD _mass,
|
const RealD _mass,
|
||||||
@ -74,8 +74,8 @@ WilsonCloverFermion<Impl, CloverHelpers>::WilsonCloverFermion(GaugeField&
|
|||||||
}
|
}
|
||||||
|
|
||||||
// *NOT* EO
|
// *NOT* EO
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::M(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::M(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
FermionField temp(out.Grid());
|
FermionField temp(out.Grid());
|
||||||
|
|
||||||
@ -89,8 +89,8 @@ void WilsonCloverFermion<Impl, CloverHelpers>::M(const FermionField &in, Fermion
|
|||||||
out += temp;
|
out += temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::Mdag(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::Mdag(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
FermionField temp(out.Grid());
|
FermionField temp(out.Grid());
|
||||||
|
|
||||||
@ -104,8 +104,8 @@ void WilsonCloverFermion<Impl, CloverHelpers>::Mdag(const FermionField &in, Ferm
|
|||||||
out += temp;
|
out += temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeField &_Umu)
|
void WilsonCloverFermion<Impl>::ImportGauge(const GaugeField &_Umu)
|
||||||
{
|
{
|
||||||
double t0 = usecond();
|
double t0 = usecond();
|
||||||
WilsonFermion<Impl>::ImportGauge(_Umu);
|
WilsonFermion<Impl>::ImportGauge(_Umu);
|
||||||
@ -131,11 +131,47 @@ void WilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeField &_Um
|
|||||||
CloverTerm += Helpers::fillCloverXT(Ex) * csw_t;
|
CloverTerm += Helpers::fillCloverXT(Ex) * csw_t;
|
||||||
CloverTerm += Helpers::fillCloverYT(Ey) * csw_t;
|
CloverTerm += Helpers::fillCloverYT(Ey) * csw_t;
|
||||||
CloverTerm += Helpers::fillCloverZT(Ez) * csw_t;
|
CloverTerm += Helpers::fillCloverZT(Ez) * csw_t;
|
||||||
|
CloverTerm += diag_mass;
|
||||||
|
|
||||||
double t4 = usecond();
|
double t4 = usecond();
|
||||||
CloverHelpers::Instantiate(CloverTerm, CloverTermInv, csw_t, this->diag_mass);
|
int lvol = _Umu.Grid()->lSites();
|
||||||
|
int DimRep = Impl::Dimension;
|
||||||
|
|
||||||
double t5 = usecond();
|
double t5 = usecond();
|
||||||
|
{
|
||||||
|
autoView(CTv,CloverTerm,CpuRead);
|
||||||
|
autoView(CTIv,CloverTermInv,CpuWrite);
|
||||||
|
thread_for(site, lvol, {
|
||||||
|
Coordinate lcoor;
|
||||||
|
grid->LocalIndexToLocalCoor(site, lcoor);
|
||||||
|
Eigen::MatrixXcd EigenCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
|
Eigen::MatrixXcd EigenInvCloverOp = Eigen::MatrixXcd::Zero(Ns * DimRep, Ns * DimRep);
|
||||||
|
typename SiteClover::scalar_object Qx = Zero(), Qxinv = Zero();
|
||||||
|
peekLocalSite(Qx, CTv, lcoor);
|
||||||
|
//if (csw!=0){
|
||||||
|
for (int j = 0; j < Ns; j++)
|
||||||
|
for (int k = 0; k < Ns; k++)
|
||||||
|
for (int a = 0; a < DimRep; a++)
|
||||||
|
for (int b = 0; b < DimRep; b++){
|
||||||
|
auto zz = Qx()(j, k)(a, b);
|
||||||
|
EigenCloverOp(a + j * DimRep, b + k * DimRep) = std::complex<double>(zz);
|
||||||
|
}
|
||||||
|
// if (site==0) std::cout << "site =" << site << "\n" << EigenCloverOp << std::endl;
|
||||||
|
|
||||||
|
EigenInvCloverOp = EigenCloverOp.inverse();
|
||||||
|
//std::cout << EigenInvCloverOp << std::endl;
|
||||||
|
for (int j = 0; j < Ns; j++)
|
||||||
|
for (int k = 0; k < Ns; k++)
|
||||||
|
for (int a = 0; a < DimRep; a++)
|
||||||
|
for (int b = 0; b < DimRep; b++)
|
||||||
|
Qxinv()(j, k)(a, b) = EigenInvCloverOp(a + j * DimRep, b + k * DimRep);
|
||||||
|
// if (site==0) std::cout << "site =" << site << "\n" << EigenInvCloverOp << std::endl;
|
||||||
|
// }
|
||||||
|
pokeLocalSite(Qxinv, CTIv, lcoor);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
double t6 = usecond();
|
||||||
// Separate the even and odd parts
|
// Separate the even and odd parts
|
||||||
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
pickCheckerboard(Even, CloverTermEven, CloverTerm);
|
||||||
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
pickCheckerboard(Odd, CloverTermOdd, CloverTerm);
|
||||||
@ -148,44 +184,48 @@ void WilsonCloverFermion<Impl, CloverHelpers>::ImportGauge(const GaugeField &_Um
|
|||||||
|
|
||||||
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
pickCheckerboard(Even, CloverTermInvDagEven, adj(CloverTermInv));
|
||||||
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
pickCheckerboard(Odd, CloverTermInvDagOdd, adj(CloverTermInv));
|
||||||
double t6 = usecond();
|
double t7 = usecond();
|
||||||
|
|
||||||
std::cout << GridLogDebug << "WilsonCloverFermion::ImportGauge timings:" << std::endl;
|
#if 0
|
||||||
std::cout << GridLogDebug << "WilsonFermion::Importgauge = " << (t1 - t0) / 1e6 << std::endl;
|
std::cout << GridLogMessage << "WilsonCloverFermion::ImportGauge timings:"
|
||||||
std::cout << GridLogDebug << "allocations = " << (t2 - t1) / 1e6 << std::endl;
|
<< " WilsonFermion::Importgauge = " << (t1 - t0) / 1e6
|
||||||
std::cout << GridLogDebug << "field strength = " << (t3 - t2) / 1e6 << std::endl;
|
<< ", allocations = " << (t2 - t1) / 1e6
|
||||||
std::cout << GridLogDebug << "fill clover = " << (t4 - t3) / 1e6 << std::endl;
|
<< ", field strength = " << (t3 - t2) / 1e6
|
||||||
std::cout << GridLogDebug << "instantiation = " << (t5 - t4) / 1e6 << std::endl;
|
<< ", fill clover = " << (t4 - t3) / 1e6
|
||||||
std::cout << GridLogDebug << "pick cbs = " << (t6 - t5) / 1e6 << std::endl;
|
<< ", misc = " << (t5 - t4) / 1e6
|
||||||
std::cout << GridLogDebug << "total = " << (t6 - t0) / 1e6 << std::endl;
|
<< ", inversions = " << (t6 - t5) / 1e6
|
||||||
|
<< ", pick cbs = " << (t7 - t6) / 1e6
|
||||||
|
<< ", total = " << (t7 - t0) / 1e6
|
||||||
|
<< std::endl;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::Mooee(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::Mooee(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(in, out, DaggerNo, InverseNo);
|
this->MooeeInternal(in, out, DaggerNo, InverseNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MooeeDag(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::MooeeDag(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(in, out, DaggerYes, InverseNo);
|
this->MooeeInternal(in, out, DaggerYes, InverseNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MooeeInv(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::MooeeInv(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(in, out, DaggerNo, InverseYes);
|
this->MooeeInternal(in, out, DaggerNo, InverseYes);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MooeeInvDag(const FermionField &in, FermionField &out)
|
void WilsonCloverFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &out)
|
||||||
{
|
{
|
||||||
this->MooeeInternal(in, out, DaggerYes, InverseYes);
|
this->MooeeInternal(in, out, DaggerYes, InverseYes);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MooeeInternal(const FermionField &in, FermionField &out, int dag, int inv)
|
void WilsonCloverFermion<Impl>::MooeeInternal(const FermionField &in, FermionField &out, int dag, int inv)
|
||||||
{
|
{
|
||||||
out.Checkerboard() = in.Checkerboard();
|
out.Checkerboard() = in.Checkerboard();
|
||||||
CloverField *Clover;
|
CloverField *Clover;
|
||||||
@ -238,8 +278,8 @@ void WilsonCloverFermion<Impl, CloverHelpers>::MooeeInternal(const FermionField
|
|||||||
} // MooeeInternal
|
} // MooeeInternal
|
||||||
|
|
||||||
// Derivative parts unpreconditioned pseudofermions
|
// Derivative parts unpreconditioned pseudofermions
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
void WilsonCloverFermion<Impl>::MDeriv(GaugeField &force, const FermionField &X, const FermionField &Y, int dag)
|
||||||
{
|
{
|
||||||
conformable(X.Grid(), Y.Grid());
|
conformable(X.Grid(), Y.Grid());
|
||||||
conformable(X.Grid(), force.Grid());
|
conformable(X.Grid(), force.Grid());
|
||||||
@ -309,7 +349,7 @@ void WilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField &force, const F
|
|||||||
}
|
}
|
||||||
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
PropagatorField Slambda = Gamma(sigma[count]) * Lambda; // sigma checked
|
||||||
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
Impl::TraceSpinImpl(lambda, Slambda); // traceSpin ok
|
||||||
force_mu -= factor*CloverHelpers::Cmunu(U, lambda, mu, nu); // checked
|
force_mu -= factor*Helpers::Cmunu(U, lambda, mu, nu); // checked
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -320,15 +360,15 @@ void WilsonCloverFermion<Impl, CloverHelpers>::MDeriv(GaugeField &force, const F
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Derivative parts
|
// Derivative parts
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MooDeriv(GaugeField &mat, const FermionField &X, const FermionField &Y, int dag)
|
void WilsonCloverFermion<Impl>::MooDeriv(GaugeField &mat, const FermionField &X, const FermionField &Y, int dag)
|
||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Derivative parts
|
// Derivative parts
|
||||||
template<class Impl, class CloverHelpers>
|
template <class Impl>
|
||||||
void WilsonCloverFermion<Impl, CloverHelpers>::MeeDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
void WilsonCloverFermion<Impl>::MeeDeriv(GaugeField &mat, const FermionField &U, const FermionField &V, int dag)
|
||||||
{
|
{
|
||||||
assert(0); // not implemented yet
|
assert(0); // not implemented yet
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
|
|||||||
UmuOdd (_FourDimRedBlackGrid),
|
UmuOdd (_FourDimRedBlackGrid),
|
||||||
Lebesgue(_FourDimGrid),
|
Lebesgue(_FourDimGrid),
|
||||||
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
LebesgueEvenOdd(_FourDimRedBlackGrid),
|
||||||
_tmp(&FiveDimRedBlackGrid)
|
_tmp(&FiveDimRedBlackGrid),
|
||||||
|
Dirichlet(0)
|
||||||
{
|
{
|
||||||
// some assertions
|
// some assertions
|
||||||
assert(FiveDimGrid._ndimension==5);
|
assert(FiveDimGrid._ndimension==5);
|
||||||
@ -218,6 +219,14 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
|
|||||||
{
|
{
|
||||||
GaugeField HUmu(_Umu.Grid());
|
GaugeField HUmu(_Umu.Grid());
|
||||||
HUmu = _Umu*(-0.5);
|
HUmu = _Umu*(-0.5);
|
||||||
|
if ( Dirichlet ) {
|
||||||
|
std::cout << GridLogMessage << " Dirichlet BCs 5d " <<Block<<std::endl;
|
||||||
|
Coordinate GaugeBlock(Nd);
|
||||||
|
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
|
||||||
|
std::cout << GridLogMessage << " Dirichlet BCs 4d " <<GaugeBlock<<std::endl;
|
||||||
|
DirichletFilter<GaugeField> Filter(GaugeBlock);
|
||||||
|
Filter.applyFilter(HUmu);
|
||||||
|
}
|
||||||
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
|
Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
|
||||||
pickCheckerboard(Even,UmuEven,Umu);
|
pickCheckerboard(Even,UmuEven,Umu);
|
||||||
pickCheckerboard(Odd ,UmuOdd,Umu);
|
pickCheckerboard(Odd ,UmuOdd,Umu);
|
||||||
|
@ -4,13 +4,12 @@ Grid physics library, www.github.com/paboyle/Grid
|
|||||||
|
|
||||||
Source file: ./lib/qcd/action/fermion/WilsonFermion.cc
|
Source file: ./lib/qcd/action/fermion/WilsonFermion.cc
|
||||||
|
|
||||||
Copyright (C) 2022
|
Copyright (C) 2015
|
||||||
|
|
||||||
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Fabian Joswig <fabian.joswig@ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -600,47 +599,11 @@ void WilsonFermion<Impl>::ContractConservedCurrent(PropagatorField &q_in_1,
|
|||||||
Current curr_type,
|
Current curr_type,
|
||||||
unsigned int mu)
|
unsigned int mu)
|
||||||
{
|
{
|
||||||
if(curr_type != Current::Vector)
|
|
||||||
{
|
|
||||||
std::cout << GridLogError << "Only the conserved vector current is implemented so far." << std::endl;
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
Gamma g5(Gamma::Algebra::Gamma5);
|
Gamma g5(Gamma::Algebra::Gamma5);
|
||||||
conformable(_grid, q_in_1.Grid());
|
conformable(_grid, q_in_1.Grid());
|
||||||
conformable(_grid, q_in_2.Grid());
|
conformable(_grid, q_in_2.Grid());
|
||||||
conformable(_grid, q_out.Grid());
|
conformable(_grid, q_out.Grid());
|
||||||
auto UGrid= this->GaugeGrid();
|
assert(0);
|
||||||
|
|
||||||
PropagatorField tmp_shifted(UGrid);
|
|
||||||
PropagatorField g5Lg5(UGrid);
|
|
||||||
PropagatorField R(UGrid);
|
|
||||||
PropagatorField gmuR(UGrid);
|
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
|
||||||
Gamma::Algebra::GammaX,
|
|
||||||
Gamma::Algebra::GammaY,
|
|
||||||
Gamma::Algebra::GammaZ,
|
|
||||||
Gamma::Algebra::GammaT,
|
|
||||||
};
|
|
||||||
Gamma gmu=Gamma(Gmu[mu]);
|
|
||||||
|
|
||||||
g5Lg5=g5*q_in_1*g5;
|
|
||||||
tmp_shifted=Cshift(q_in_2,mu,1);
|
|
||||||
Impl::multLinkField(R,this->Umu,tmp_shifted,mu);
|
|
||||||
gmuR=gmu*R;
|
|
||||||
|
|
||||||
q_out=adj(g5Lg5)*R;
|
|
||||||
q_out-=adj(g5Lg5)*gmuR;
|
|
||||||
|
|
||||||
tmp_shifted=Cshift(q_in_1,mu,1);
|
|
||||||
Impl::multLinkField(g5Lg5,this->Umu,tmp_shifted,mu);
|
|
||||||
g5Lg5=g5*g5Lg5*g5;
|
|
||||||
R=q_in_2;
|
|
||||||
gmuR=gmu*R;
|
|
||||||
|
|
||||||
q_out-=adj(g5Lg5)*R;
|
|
||||||
q_out-=adj(g5Lg5)*gmuR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -654,51 +617,9 @@ void WilsonFermion<Impl>::SeqConservedCurrent(PropagatorField &q_in,
|
|||||||
unsigned int tmax,
|
unsigned int tmax,
|
||||||
ComplexField &lattice_cmplx)
|
ComplexField &lattice_cmplx)
|
||||||
{
|
{
|
||||||
if(curr_type != Current::Vector)
|
|
||||||
{
|
|
||||||
std::cout << GridLogError << "Only the conserved vector current is implemented so far." << std::endl;
|
|
||||||
exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
int tshift = (mu == Nd-1) ? 1 : 0;
|
|
||||||
unsigned int LLt = GridDefaultLatt()[Tp];
|
|
||||||
conformable(_grid, q_in.Grid());
|
conformable(_grid, q_in.Grid());
|
||||||
conformable(_grid, q_out.Grid());
|
conformable(_grid, q_out.Grid());
|
||||||
auto UGrid= this->GaugeGrid();
|
assert(0);
|
||||||
|
|
||||||
PropagatorField tmp(UGrid);
|
|
||||||
PropagatorField Utmp(UGrid);
|
|
||||||
PropagatorField L(UGrid);
|
|
||||||
PropagatorField zz (UGrid);
|
|
||||||
zz=Zero();
|
|
||||||
LatticeInteger lcoor(UGrid); LatticeCoordinate(lcoor,Nd-1);
|
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
|
||||||
Gamma::Algebra::GammaX,
|
|
||||||
Gamma::Algebra::GammaY,
|
|
||||||
Gamma::Algebra::GammaZ,
|
|
||||||
Gamma::Algebra::GammaT,
|
|
||||||
};
|
|
||||||
Gamma gmu=Gamma(Gmu[mu]);
|
|
||||||
|
|
||||||
tmp = Cshift(q_in,mu,1);
|
|
||||||
Impl::multLinkField(Utmp,this->Umu,tmp,mu);
|
|
||||||
tmp = ( Utmp*lattice_cmplx - gmu*Utmp*lattice_cmplx ); // Forward hop
|
|
||||||
tmp = where((lcoor>=tmin),tmp,zz); // Mask the time
|
|
||||||
q_out = where((lcoor<=tmax),tmp,zz); // Position of current complicated
|
|
||||||
|
|
||||||
tmp = q_in *lattice_cmplx;
|
|
||||||
tmp = Cshift(tmp,mu,-1);
|
|
||||||
Impl::multLinkField(Utmp,this->Umu,tmp,mu+Nd); // Adjoint link
|
|
||||||
tmp = -( Utmp + gmu*Utmp );
|
|
||||||
// Mask the time
|
|
||||||
if (tmax == LLt - 1 && tshift == 1){ // quick fix to include timeslice 0 if tmax + tshift is over the last timeslice
|
|
||||||
unsigned int t0 = 0;
|
|
||||||
tmp = where(((lcoor==t0) || (lcoor>=tmin+tshift)),tmp,zz);
|
|
||||||
} else {
|
|
||||||
tmp = where((lcoor>=tmin+tshift),tmp,zz);
|
|
||||||
}
|
|
||||||
q_out+= where((lcoor<=tmax+tshift),tmp,zz); // Position of current complicated
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -498,7 +498,6 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
#ifndef GRID_CUDA
|
#ifndef GRID_CUDA
|
||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDag); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDag); return;}
|
||||||
#endif
|
#endif
|
||||||
acceleratorFenceComputeStream();
|
|
||||||
} else if( interior ) {
|
} else if( interior ) {
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagInt); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagInt); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagInt); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagInt); return;}
|
||||||
@ -506,13 +505,11 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagInt); return;}
|
||||||
#endif
|
#endif
|
||||||
} else if( exterior ) {
|
} else if( exterior ) {
|
||||||
acceleratorFenceComputeStream();
|
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDagExt); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDagExt); return;}
|
||||||
#ifndef GRID_CUDA
|
#ifndef GRID_CUDA
|
||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteDagExt); return;}
|
||||||
#endif
|
#endif
|
||||||
acceleratorFenceComputeStream();
|
|
||||||
}
|
}
|
||||||
assert(0 && " Kernel optimisation case not covered ");
|
assert(0 && " Kernel optimisation case not covered ");
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
Author: Daniel Richtmann <daniel.richtmann@gmail.com>
|
||||||
Author: Mattia Bruno <mattia.bruno@cern.ch>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -33,12 +32,10 @@
|
|||||||
#include <Grid/qcd/spin/Dirac.h>
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
#include <Grid/qcd/action/fermion/CompactWilsonCloverFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/implementation/CompactWilsonCloverFermionImplementation.h>
|
#include <Grid/qcd/action/fermion/implementation/CompactWilsonCloverFermionImplementation.h>
|
||||||
#include <Grid/qcd/action/fermion/CloverHelpers.h>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
#include "impl.h"
|
#include "impl.h"
|
||||||
template class CompactWilsonCloverFermion<IMPLEMENTATION, CompactCloverHelpers<IMPLEMENTATION>>;
|
template class CompactWilsonCloverFermion<IMPLEMENTATION>;
|
||||||
template class CompactWilsonCloverFermion<IMPLEMENTATION, CompactExpCloverHelpers<IMPLEMENTATION>>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
Author: Mattia Bruno <mattia.bruno@cern.ch>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software; you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
@ -32,12 +31,10 @@
|
|||||||
#include <Grid/qcd/spin/Dirac.h>
|
#include <Grid/qcd/spin/Dirac.h>
|
||||||
#include <Grid/qcd/action/fermion/WilsonCloverFermion.h>
|
#include <Grid/qcd/action/fermion/WilsonCloverFermion.h>
|
||||||
#include <Grid/qcd/action/fermion/implementation/WilsonCloverFermionImplementation.h>
|
#include <Grid/qcd/action/fermion/implementation/WilsonCloverFermionImplementation.h>
|
||||||
#include <Grid/qcd/action/fermion/CloverHelpers.h>
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
#include "impl.h"
|
#include "impl.h"
|
||||||
template class WilsonCloverFermion<IMPLEMENTATION, CloverHelpers<IMPLEMENTATION>>;
|
template class WilsonCloverFermion<IMPLEMENTATION>;
|
||||||
template class WilsonCloverFermion<IMPLEMENTATION, ExpCloverHelpers<IMPLEMENTATION>>;
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -1 +0,0 @@
|
|||||||
../WilsonKernelsInstantiation.cc.master
|
|
@ -0,0 +1,51 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/action/fermion/WilsonKernels.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015, 2020
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
|
||||||
|
Author: paboyle <paboyle@ph.ed.ac.uk>
|
||||||
|
Author: Nils Meyer <nils.meyer@ur.de> Regensburg University
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/qcd/action/fermion/FermionCore.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsImplementation.h>
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsHandImplementation.h>
|
||||||
|
|
||||||
|
#ifndef AVX512
|
||||||
|
#ifndef QPX
|
||||||
|
#ifndef A64FX
|
||||||
|
#ifndef A64FXFIXEDSIZE
|
||||||
|
#include <Grid/qcd/action/fermion/implementation/WilsonKernelsAsmImplementation.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#include "impl.h"
|
||||||
|
template class WilsonKernels<IMPLEMENTATION>;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -18,10 +18,6 @@ WILSON_IMPL_LIST=" \
|
|||||||
GparityWilsonImplF \
|
GparityWilsonImplF \
|
||||||
GparityWilsonImplD "
|
GparityWilsonImplD "
|
||||||
|
|
||||||
COMPACT_WILSON_IMPL_LIST=" \
|
|
||||||
WilsonImplF \
|
|
||||||
WilsonImplD "
|
|
||||||
|
|
||||||
DWF_IMPL_LIST=" \
|
DWF_IMPL_LIST=" \
|
||||||
WilsonImplF \
|
WilsonImplF \
|
||||||
WilsonImplD \
|
WilsonImplD \
|
||||||
@ -44,7 +40,7 @@ EOF
|
|||||||
|
|
||||||
done
|
done
|
||||||
|
|
||||||
CC_LIST="WilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
CC_LIST="WilsonCloverFermionInstantiation CompactWilsonCloverFermionInstantiation WilsonFermionInstantiation WilsonKernelsInstantiation WilsonTMFermionInstantiation"
|
||||||
|
|
||||||
for impl in $WILSON_IMPL_LIST
|
for impl in $WILSON_IMPL_LIST
|
||||||
do
|
do
|
||||||
@ -54,16 +50,6 @@ do
|
|||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
|
||||||
CC_LIST="CompactWilsonCloverFermionInstantiation"
|
|
||||||
|
|
||||||
for impl in $COMPACT_WILSON_IMPL_LIST
|
|
||||||
do
|
|
||||||
for f in $CC_LIST
|
|
||||||
do
|
|
||||||
ln -f -s ../$f.cc.master $impl/$f$impl.cc
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
CC_LIST=" \
|
CC_LIST=" \
|
||||||
CayleyFermion5DInstantiation \
|
CayleyFermion5DInstantiation \
|
||||||
ContinuedFractionFermion5DInstantiation \
|
ContinuedFractionFermion5DInstantiation \
|
||||||
|
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
102
Grid/qcd/action/filters/DDHMCFilter.h
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
//--------------------------------------------------------------------
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// DDHMC filter with sub-block size B[mu]
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template<typename GaugeField>
|
||||||
|
struct DDHMCFilter: public MomentumFilterBase<GaugeField>
|
||||||
|
{
|
||||||
|
Coordinate Block;
|
||||||
|
int Width;
|
||||||
|
|
||||||
|
DDHMCFilter(const Coordinate &_Block,int _Width=2): Block(_Block) { Width=_Width; }
|
||||||
|
|
||||||
|
void applyFilter(GaugeField &U) const override
|
||||||
|
{
|
||||||
|
GridBase *grid = U.Grid();
|
||||||
|
Coordinate Global=grid->GlobalDimensions();
|
||||||
|
GaugeField zzz(grid); zzz = Zero();
|
||||||
|
LatticeInteger coor(grid);
|
||||||
|
|
||||||
|
auto zzz_mu = PeekIndex<LorentzIndex>(zzz,0);
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// Zero BDY layers
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
std::cout<<GridLogMessage<<" DDHMC Force Filter Block "<<Block<<" width " <<Width<<std::endl;
|
||||||
|
for(int mu=0;mu<Nd;mu++) {
|
||||||
|
|
||||||
|
Integer B1 = Block[mu];
|
||||||
|
if ( B1 && (B1 <= Global[mu]) ) {
|
||||||
|
LatticeCoordinate(coor,mu);
|
||||||
|
|
||||||
|
////////////////////////////////
|
||||||
|
// OmegaBar - zero all links contained in slice B-1,0 and
|
||||||
|
// mu links connecting to Omega
|
||||||
|
////////////////////////////////
|
||||||
|
if ( Width==1) {
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||||
|
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
U_mu = where(mod(coor,B1)==Integer(B1-2),zzz_mu,U_mu);
|
||||||
|
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||||
|
}
|
||||||
|
if ( Width==2) {
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||||
|
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
U_mu = where(mod(coor,B1)==Integer(B1-3),zzz_mu,U_mu);
|
||||||
|
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||||
|
}
|
||||||
|
if ( Width==3) {
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-3),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-2),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(B1-1),zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(0) ,zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(1) ,zzz,U);
|
||||||
|
U = where(mod(coor,B1)==Integer(2) ,zzz,U);
|
||||||
|
auto U_mu = PeekIndex<LorentzIndex>(U,mu);
|
||||||
|
U_mu = where(mod(coor,B1)==Integer(B1-4),zzz_mu,U_mu);
|
||||||
|
PokeIndex<LorentzIndex>(U, U_mu, mu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
||||||
|
|
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
71
Grid/qcd/action/filters/DirichletFilter.h
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
//--------------------------------------------------------------------
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<typename MomentaField>
|
||||||
|
struct DirichletFilter: public MomentumFilterBase<MomentaField>
|
||||||
|
{
|
||||||
|
typedef typename MomentaField::vector_type vector_type; //SIMD-vectorized complex type
|
||||||
|
typedef typename MomentaField::scalar_type scalar_type; //scalar complex type
|
||||||
|
|
||||||
|
typedef iScalar<iScalar<iScalar<vector_type> > > ScalarType; //complex phase for each site
|
||||||
|
|
||||||
|
Coordinate Block;
|
||||||
|
|
||||||
|
DirichletFilter(const Coordinate &_Block): Block(_Block){}
|
||||||
|
|
||||||
|
void applyFilter(MomentaField &P) const override
|
||||||
|
{
|
||||||
|
GridBase *grid = P.Grid();
|
||||||
|
typedef decltype(PeekIndex<LorentzIndex>(P, 0)) LatCM;
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// Zero strictly links crossing between domains
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
LatticeInteger coor(grid);
|
||||||
|
LatCM zz(grid); zz = Zero();
|
||||||
|
for(int mu=0;mu<Nd;mu++) {
|
||||||
|
if ( (Block[mu]) && (Block[mu] < grid->GlobalDimensions()[mu] ) ) {
|
||||||
|
// If costly could provide Grid earlier and precompute masks
|
||||||
|
std::cout << " Dirichlet in mu="<<mu<<std::endl;
|
||||||
|
LatticeCoordinate(coor,mu);
|
||||||
|
auto P_mu = PeekIndex<LorentzIndex>(P, mu);
|
||||||
|
P_mu = where(mod(coor,Block[mu])==Integer(Block[mu]-1),zz,P_mu);
|
||||||
|
PokeIndex<LorentzIndex>(P, P_mu, mu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
||||||
|
|
@ -49,7 +49,7 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
typedef Lattice<SiteLink> LinkField;
|
typedef Lattice<SiteLink> LinkField;
|
||||||
typedef Lattice<SiteField> Field;
|
typedef Lattice<SiteField> Field;
|
||||||
typedef LinkField ComplexField;
|
typedef Field ComplexField;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef QedGImpl<vComplex> QedGImplR;
|
typedef QedGImpl<vComplex> QedGImplR;
|
||||||
|
@ -129,18 +129,10 @@ public:
|
|||||||
Runner(S);
|
Runner(S);
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////
|
//Use the checkpointer to initialize the RNGs and the gauge field, writing the resulting gauge field into U.
|
||||||
|
//This is called automatically by Run but may be useful elsewhere, e.g. for integrator tuning experiments
|
||||||
private:
|
void initializeGaugeFieldAndRNGs(Field &U){
|
||||||
template <class SmearingPolicy>
|
if(!Resources.haveRNGs()) Resources.AddRNGs();
|
||||||
void Runner(SmearingPolicy &Smearing) {
|
|
||||||
auto UGrid = Resources.GetCartesian();
|
|
||||||
Resources.AddRNGs();
|
|
||||||
Field U(UGrid);
|
|
||||||
|
|
||||||
// Can move this outside?
|
|
||||||
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
|
||||||
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
|
||||||
|
|
||||||
if (Parameters.StartingType == "HotStart") {
|
if (Parameters.StartingType == "HotStart") {
|
||||||
// Hot start
|
// Hot start
|
||||||
@ -167,6 +159,25 @@ private:
|
|||||||
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
<< "Valid [HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
private:
|
||||||
|
template <class SmearingPolicy>
|
||||||
|
void Runner(SmearingPolicy &Smearing) {
|
||||||
|
auto UGrid = Resources.GetCartesian();
|
||||||
|
Field U(UGrid);
|
||||||
|
|
||||||
|
initializeGaugeFieldAndRNGs(U);
|
||||||
|
|
||||||
|
typedef IntegratorType<SmearingPolicy> TheIntegrator;
|
||||||
|
TheIntegrator MDynamics(UGrid, Parameters.MD, TheAction, Smearing);
|
||||||
|
|
||||||
|
// Sets the momentum filter
|
||||||
|
MDynamics.setMomentumFilter(*(Resources.GetMomentumFilter()));
|
||||||
|
|
||||||
Smearing.set_Field(U);
|
Smearing.set_Field(U);
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ directory
|
|||||||
* @brief Classes for Hybrid Monte Carlo update
|
* @brief Classes for Hybrid Monte Carlo update
|
||||||
*
|
*
|
||||||
* @author Guido Cossu
|
* @author Guido Cossu
|
||||||
|
* @author Peter Boyle
|
||||||
*/
|
*/
|
||||||
//--------------------------------------------------------------------
|
//--------------------------------------------------------------------
|
||||||
#pragma once
|
#pragma once
|
||||||
@ -115,22 +116,17 @@ private:
|
|||||||
|
|
||||||
random(sRNG, rn_test);
|
random(sRNG, rn_test);
|
||||||
|
|
||||||
std::cout << GridLogMessage
|
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||||
<< "--------------------------------------------------\n";
|
std::cout << GridLogHMC << "exp(-dH) = " << prob << " Random = " << rn_test << "\n";
|
||||||
std::cout << GridLogMessage << "exp(-dH) = " << prob
|
std::cout << GridLogHMC << "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
||||||
<< " Random = " << rn_test << "\n";
|
|
||||||
std::cout << GridLogMessage
|
|
||||||
<< "Acc. Probability = " << ((prob < 1.0) ? prob : 1.0) << "\n";
|
|
||||||
|
|
||||||
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
if ((prob > 1.0) || (rn_test <= prob)) { // accepted
|
||||||
std::cout << GridLogMessage << "Metropolis_test -- ACCEPTED\n";
|
std::cout << GridLogHMC << "Metropolis_test -- ACCEPTED\n";
|
||||||
std::cout << GridLogMessage
|
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||||
<< "--------------------------------------------------\n";
|
|
||||||
return true;
|
return true;
|
||||||
} else { // rejected
|
} else { // rejected
|
||||||
std::cout << GridLogMessage << "Metropolis_test -- REJECTED\n";
|
std::cout << GridLogHMC << "Metropolis_test -- REJECTED\n";
|
||||||
std::cout << GridLogMessage
|
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||||
<< "--------------------------------------------------\n";
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -139,18 +135,67 @@ private:
|
|||||||
// Evolution
|
// Evolution
|
||||||
/////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////
|
||||||
RealD evolve_hmc_step(Field &U) {
|
RealD evolve_hmc_step(Field &U) {
|
||||||
TheIntegrator.refresh(U, sRNG, pRNG); // set U and initialize P and phi's
|
|
||||||
|
|
||||||
RealD H0 = TheIntegrator.S(U); // initial state action
|
GridBase *Grid = U.Grid();
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Mainly for DDHMC perform a random translation of U modulo volume
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogMessage << "Random shifting gauge field by [";
|
||||||
|
for(int d=0;d<Grid->Nd();d++) {
|
||||||
|
|
||||||
|
int L = Grid->GlobalDimensions()[d];
|
||||||
|
|
||||||
|
RealD rn_uniform; random(sRNG, rn_uniform);
|
||||||
|
|
||||||
|
int shift = (int) (rn_uniform*L);
|
||||||
|
|
||||||
|
std::cout << shift;
|
||||||
|
if(d<Grid->Nd()-1) std::cout <<",";
|
||||||
|
else std::cout <<"]\n";
|
||||||
|
|
||||||
|
U = Cshift(U,d,shift);
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
|
||||||
|
TheIntegrator.reset_timer();
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// set U and initialize P and phi's
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogMessage << "Refresh momenta and pseudofermions";
|
||||||
|
TheIntegrator.refresh(U, sRNG, pRNG);
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// initial state action
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogMessage << "Compute initial action";
|
||||||
|
RealD H0 = TheIntegrator.S(U);
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
|
||||||
std::streamsize current_precision = std::cout.precision();
|
std::streamsize current_precision = std::cout.precision();
|
||||||
std::cout.precision(15);
|
std::cout.precision(15);
|
||||||
std::cout << GridLogMessage << "Total H before trajectory = " << H0 << "\n";
|
std::cout << GridLogHMC << "Total H before trajectory = " << H0 << "\n";
|
||||||
std::cout.precision(current_precision);
|
std::cout.precision(current_precision);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogMessage << " Molecular Dynamics evolution ";
|
||||||
TheIntegrator.integrate(U);
|
TheIntegrator.integrate(U);
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// updated state action
|
||||||
|
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogMessage << "Compute final action";
|
||||||
|
RealD H1 = TheIntegrator.S(U);
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------------\n";
|
||||||
|
|
||||||
|
|
||||||
RealD H1 = TheIntegrator.S(U); // updated state action
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
if(0){
|
if(0){
|
||||||
@ -163,18 +208,17 @@ private:
|
|||||||
}
|
}
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
std::cout.precision(15);
|
std::cout.precision(15);
|
||||||
std::cout << GridLogMessage << "Total H after trajectory = " << H1
|
|
||||||
<< " dH = " << H1 - H0 << "\n";
|
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||||
|
std::cout << GridLogHMC << "Total H after trajectory = " << H1 << " dH = " << H1 - H0 << "\n";
|
||||||
|
std::cout << GridLogHMC << "--------------------------------------------------\n";
|
||||||
|
|
||||||
std::cout.precision(current_precision);
|
std::cout.precision(current_precision);
|
||||||
|
|
||||||
return (H1 - H0);
|
return (H1 - H0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/////////////////////////////////////////
|
/////////////////////////////////////////
|
||||||
// Constructor
|
// Constructor
|
||||||
@ -195,10 +239,13 @@ public:
|
|||||||
|
|
||||||
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
// Actual updates (evolve a copy Ucopy then copy back eventually)
|
||||||
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
unsigned int FinalTrajectory = Params.Trajectories + Params.NoMetropolisUntil + Params.StartTrajectory;
|
||||||
|
|
||||||
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
for (int traj = Params.StartTrajectory; traj < FinalTrajectory; ++traj) {
|
||||||
std::cout << GridLogMessage << "-- # Trajectory = " << traj << "\n";
|
|
||||||
|
std::cout << GridLogHMC << "-- # Trajectory = " << traj << "\n";
|
||||||
|
|
||||||
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
if (traj < Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||||
std::cout << GridLogMessage << "-- Thermalization" << std::endl;
|
std::cout << GridLogHMC << "-- Thermalization" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
@ -207,20 +254,19 @@ public:
|
|||||||
DeltaH = evolve_hmc_step(Ucopy);
|
DeltaH = evolve_hmc_step(Ucopy);
|
||||||
// Metropolis-Hastings test
|
// Metropolis-Hastings test
|
||||||
bool accept = true;
|
bool accept = true;
|
||||||
if (traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
if (Params.MetropolisTest && traj >= Params.StartTrajectory + Params.NoMetropolisUntil) {
|
||||||
accept = metropolis_test(DeltaH);
|
accept = metropolis_test(DeltaH);
|
||||||
} else {
|
} else {
|
||||||
std::cout << GridLogMessage << "Skipping Metropolis test" << std::endl;
|
std::cout << GridLogHMC << "Skipping Metropolis test" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (accept)
|
if (accept)
|
||||||
Ucur = Ucopy;
|
Ucur = Ucopy;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
double t1=usecond();
|
double t1=usecond();
|
||||||
std::cout << GridLogMessage << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
std::cout << GridLogHMC << "Total time for trajectory (s): " << (t1-t0)/1e6 << std::endl;
|
||||||
|
|
||||||
|
TheIntegrator.print_timer();
|
||||||
|
|
||||||
for (int obs = 0; obs < Observables.size(); obs++) {
|
for (int obs = 0; obs < Observables.size(); obs++) {
|
||||||
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
|
std::cout << GridLogDebug << "Observables # " << obs << std::endl;
|
||||||
@ -228,7 +274,7 @@ public:
|
|||||||
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
|
std::cout << GridLogDebug << "Observables pointer " << Observables[obs] << std::endl;
|
||||||
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
|
Observables[obs]->TrajectoryComplete(traj + 1, Ucur, sRNG, pRNG);
|
||||||
}
|
}
|
||||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
std::cout << GridLogHMC << ":::::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +72,8 @@ class HMCResourceManager {
|
|||||||
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
|
typedef HMCModuleBase< BaseHmcCheckpointer<ImplementationPolicy> > CheckpointerBaseModule;
|
||||||
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
|
typedef HMCModuleBase< HmcObservable<typename ImplementationPolicy::Field> > ObservableBaseModule;
|
||||||
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
|
typedef ActionModuleBase< Action<typename ImplementationPolicy::Field>, GridModule > ActionBaseModule;
|
||||||
|
typedef typename ImplementationPolicy::Field MomentaField;
|
||||||
|
typedef typename ImplementationPolicy::Field Field;
|
||||||
|
|
||||||
// Named storage for grid pairs (std + red-black)
|
// Named storage for grid pairs (std + red-black)
|
||||||
std::unordered_map<std::string, GridModule> Grids;
|
std::unordered_map<std::string, GridModule> Grids;
|
||||||
@ -80,6 +82,9 @@ class HMCResourceManager {
|
|||||||
// SmearingModule<ImplementationPolicy> Smearing;
|
// SmearingModule<ImplementationPolicy> Smearing;
|
||||||
std::unique_ptr<CheckpointerBaseModule> CP;
|
std::unique_ptr<CheckpointerBaseModule> CP;
|
||||||
|
|
||||||
|
// Momentum filter
|
||||||
|
std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> > Filter;
|
||||||
|
|
||||||
// A vector of HmcObservable modules
|
// A vector of HmcObservable modules
|
||||||
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
|
std::vector<std::unique_ptr<ObservableBaseModule> > ObservablesList;
|
||||||
|
|
||||||
@ -90,6 +95,7 @@ class HMCResourceManager {
|
|||||||
|
|
||||||
bool have_RNG;
|
bool have_RNG;
|
||||||
bool have_CheckPointer;
|
bool have_CheckPointer;
|
||||||
|
bool have_Filter;
|
||||||
|
|
||||||
// NOTE: operator << is not overloaded for std::vector<string>
|
// NOTE: operator << is not overloaded for std::vector<string>
|
||||||
// so this function is necessary
|
// so this function is necessary
|
||||||
@ -101,7 +107,7 @@ class HMCResourceManager {
|
|||||||
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
HMCResourceManager() : have_RNG(false), have_CheckPointer(false) {}
|
HMCResourceManager() : have_RNG(false), have_CheckPointer(false), have_Filter(false) {}
|
||||||
|
|
||||||
template <class ReaderClass, class vector_type = vComplex >
|
template <class ReaderClass, class vector_type = vComplex >
|
||||||
void initialize(ReaderClass &Read){
|
void initialize(ReaderClass &Read){
|
||||||
@ -129,6 +135,7 @@ public:
|
|||||||
RNGModuleParameters RNGpar(Read);
|
RNGModuleParameters RNGpar(Read);
|
||||||
SetRNGSeeds(RNGpar);
|
SetRNGSeeds(RNGpar);
|
||||||
|
|
||||||
|
|
||||||
// Observables
|
// Observables
|
||||||
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
|
auto &ObsFactory = HMC_ObservablesModuleFactory<observable_string, typename ImplementationPolicy::Field, ReaderClass>::getInstance();
|
||||||
Read.push(observable_string);// here must check if existing...
|
Read.push(observable_string);// here must check if existing...
|
||||||
@ -208,6 +215,16 @@ public:
|
|||||||
AddGrid(s, Mod);
|
AddGrid(s, Mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetMomentumFilter( MomentumFilterBase<typename ImplementationPolicy::Field> * MomFilter) {
|
||||||
|
assert(have_Filter==false);
|
||||||
|
Filter = std::unique_ptr<MomentumFilterBase<typename ImplementationPolicy::Field> >(MomFilter);
|
||||||
|
have_Filter = true;
|
||||||
|
}
|
||||||
|
MomentumFilterBase<typename ImplementationPolicy::Field> *GetMomentumFilter(void) {
|
||||||
|
if ( !have_Filter)
|
||||||
|
SetMomentumFilter(new MomentumFilterNone<typename ImplementationPolicy::Field>());
|
||||||
|
return Filter.get();
|
||||||
|
}
|
||||||
|
|
||||||
GridCartesian* GetCartesian(std::string s = "") {
|
GridCartesian* GetCartesian(std::string s = "") {
|
||||||
if (s.empty()) s = Grids.begin()->first;
|
if (s.empty()) s = Grids.begin()->first;
|
||||||
@ -227,6 +244,9 @@ public:
|
|||||||
// Random number generators
|
// Random number generators
|
||||||
//////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
//Return true if the RNG objects have been instantiated
|
||||||
|
bool haveRNGs() const{ return have_RNG; }
|
||||||
|
|
||||||
void AddRNGs(std::string s = "") {
|
void AddRNGs(std::string s = "") {
|
||||||
// Couple the RNGs to the GridModule tagged by s
|
// Couple the RNGs to the GridModule tagged by s
|
||||||
// the default is the first grid registered
|
// the default is the first grid registered
|
||||||
|
@ -33,7 +33,6 @@ directory
|
|||||||
#define INTEGRATOR_INCLUDED
|
#define INTEGRATOR_INCLUDED
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include "MomentumFilter.h"
|
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -67,6 +66,7 @@ public:
|
|||||||
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
|
template <class FieldImplementation, class SmearingPolicy, class RepresentationPolicy>
|
||||||
class Integrator {
|
class Integrator {
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
typedef typename FieldImplementation::Field MomentaField; //for readability
|
typedef typename FieldImplementation::Field MomentaField; //for readability
|
||||||
typedef typename FieldImplementation::Field Field;
|
typedef typename FieldImplementation::Field Field;
|
||||||
|
|
||||||
@ -119,36 +119,58 @@ protected:
|
|||||||
}
|
}
|
||||||
} update_P_hireps{};
|
} update_P_hireps{};
|
||||||
|
|
||||||
|
|
||||||
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
|
void update_P(MomentaField& Mom, Field& U, int level, double ep) {
|
||||||
// input U actually not used in the fundamental case
|
// input U actually not used in the fundamental case
|
||||||
// Fundamental updates, include smearing
|
// Fundamental updates, include smearing
|
||||||
|
|
||||||
for (int a = 0; a < as[level].actions.size(); ++a) {
|
for (int a = 0; a < as[level].actions.size(); ++a) {
|
||||||
|
|
||||||
double start_full = usecond();
|
double start_full = usecond();
|
||||||
Field force(U.Grid());
|
Field force(U.Grid());
|
||||||
conformable(U.Grid(), Mom.Grid());
|
conformable(U.Grid(), Mom.Grid());
|
||||||
|
|
||||||
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
Field& Us = Smearer.get_U(as[level].actions.at(a)->is_smeared);
|
||||||
double start_force = usecond();
|
double start_force = usecond();
|
||||||
|
as[level].actions.at(a)->deriv_timer_start();
|
||||||
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
as[level].actions.at(a)->deriv(Us, force); // deriv should NOT include Ta
|
||||||
|
as[level].actions.at(a)->deriv_timer_stop();
|
||||||
|
|
||||||
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
std::cout << GridLogIntegrator << "Smearing (on/off): " << as[level].actions.at(a)->is_smeared << std::endl;
|
||||||
|
auto name = as[level].actions.at(a)->action_name();
|
||||||
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
if (as[level].actions.at(a)->is_smeared) Smearer.smeared_force(force);
|
||||||
|
|
||||||
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
force = FieldImplementation::projectForce(force); // Ta for gauge fields
|
||||||
double end_force = usecond();
|
double end_force = usecond();
|
||||||
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites());
|
|
||||||
std::cout << GridLogIntegrator << "["<<level<<"]["<<a<<"] Force average: " << force_abs << std::endl;
|
MomFilter->applyFilter(force);
|
||||||
|
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<< std::endl;
|
||||||
|
DumpSliceNorm("force ",force,Nd-1);
|
||||||
|
|
||||||
|
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
|
||||||
|
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||||
|
|
||||||
|
Real force_max = std::sqrt(maxLocalNorm2(force));
|
||||||
|
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
|
||||||
|
|
||||||
|
as[level].actions.at(a)->deriv_log(force_abs,force_max);
|
||||||
|
|
||||||
|
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force average: " << force_abs <<" "<<name<<std::endl;
|
||||||
|
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Force max : " << force_max <<" "<<name<<std::endl;
|
||||||
|
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt average : " << impulse_abs <<" "<<name<<std::endl;
|
||||||
|
std::cout << GridLogIntegrator<< "["<<level<<"]["<<a<<"] Fdt max : " << impulse_max <<" "<<name<<std::endl;
|
||||||
|
|
||||||
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
Mom -= force * ep* HMC_MOMENTUM_DENOMINATOR;;
|
||||||
double end_full = usecond();
|
double end_full = usecond();
|
||||||
double time_full = (end_full - start_full) / 1e3;
|
double time_full = (end_full - start_full) / 1e3;
|
||||||
double time_force = (end_force - start_force) / 1e3;
|
double time_force = (end_force - start_force) / 1e3;
|
||||||
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
|
std::cout << GridLogMessage << "["<<level<<"]["<<a<<"] P update elapsed time: " << time_full << " ms (force: " << time_force << " ms)" << std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force from the other representations
|
// Force from the other representations
|
||||||
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
|
||||||
|
|
||||||
MomFilter->applyFilter(Mom);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_U(Field& U, double ep)
|
void update_U(Field& U, double ep)
|
||||||
@ -162,8 +184,12 @@ protected:
|
|||||||
|
|
||||||
void update_U(MomentaField& Mom, Field& U, double ep)
|
void update_U(MomentaField& Mom, Field& U, double ep)
|
||||||
{
|
{
|
||||||
|
MomentaField MomFiltered(Mom.Grid());
|
||||||
|
MomFiltered = Mom;
|
||||||
|
MomFilter->applyFilter(MomFiltered);
|
||||||
|
|
||||||
// exponential of Mom*U in the gauge fields case
|
// exponential of Mom*U in the gauge fields case
|
||||||
FieldImplementation::update_field(Mom, U, ep);
|
FieldImplementation::update_field(MomFiltered, U, ep);
|
||||||
|
|
||||||
// Update the smeared fields, can be implemented as observer
|
// Update the smeared fields, can be implemented as observer
|
||||||
Smearer.set_Field(U);
|
Smearer.set_Field(U);
|
||||||
@ -206,6 +232,66 @@ public:
|
|||||||
const MomentaField & getMomentum() const{ return P; }
|
const MomentaField & getMomentum() const{ return P; }
|
||||||
|
|
||||||
|
|
||||||
|
void reset_timer(void)
|
||||||
|
{
|
||||||
|
for (int level = 0; level < as.size(); ++level) {
|
||||||
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
|
as[level].actions.at(actionID)->reset_timer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
void print_timer(void)
|
||||||
|
{
|
||||||
|
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::" << std::endl;
|
||||||
|
std::cout << GridLogMessage << " Refresh cumulative timings "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||||
|
for (int level = 0; level < as.size(); ++level) {
|
||||||
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
|
std::cout << GridLogMessage
|
||||||
|
<< as[level].actions.at(actionID)->action_name()
|
||||||
|
<<"["<<level<<"]["<< actionID<<"] "
|
||||||
|
<< as[level].actions.at(actionID)->refresh_us*1.0e-6<<" s"<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Action cumulative timings "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||||
|
for (int level = 0; level < as.size(); ++level) {
|
||||||
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
|
std::cout << GridLogMessage
|
||||||
|
<< as[level].actions.at(actionID)->action_name()
|
||||||
|
<<"["<<level<<"]["<< actionID<<"] "
|
||||||
|
<< as[level].actions.at(actionID)->S_us*1.0e-6<<" s"<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Force cumulative timings "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||||
|
for (int level = 0; level < as.size(); ++level) {
|
||||||
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
|
std::cout << GridLogMessage
|
||||||
|
<< as[level].actions.at(actionID)->action_name()
|
||||||
|
<<"["<<level<<"]["<< actionID<<"] "
|
||||||
|
<< as[level].actions.at(actionID)->deriv_us*1.0e-6<<" s"<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << "--------------------------- "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << " Force average size "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "------------------------- "<<std::endl;
|
||||||
|
for (int level = 0; level < as.size(); ++level) {
|
||||||
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
|
std::cout << GridLogMessage
|
||||||
|
<< as[level].actions.at(actionID)->action_name()
|
||||||
|
<<"["<<level<<"]["<< actionID<<"] : "
|
||||||
|
<<" force max " << as[level].actions.at(actionID)->deriv_max_average()
|
||||||
|
<<" norm " << as[level].actions.at(actionID)->deriv_norm_average()
|
||||||
|
<<" calls " << as[level].actions.at(actionID)->deriv_num
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
void print_parameters()
|
void print_parameters()
|
||||||
{
|
{
|
||||||
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
|
std::cout << GridLogMessage << "[Integrator] Name : "<< integrator_name() << std::endl;
|
||||||
@ -224,7 +310,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void reverse_momenta()
|
void reverse_momenta()
|
||||||
@ -267,15 +352,19 @@ public:
|
|||||||
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
|
||||||
// get gauge field from the SmearingPolicy and
|
// get gauge field from the SmearingPolicy and
|
||||||
// based on the boolean is_smeared in actionID
|
// based on the boolean is_smeared in actionID
|
||||||
|
auto name = as[level].actions.at(actionID)->action_name();
|
||||||
|
std::cout << GridLogMessage << "refresh [" << level << "][" << actionID << "] "<<name << std::endl;
|
||||||
|
|
||||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||||
|
as[level].actions.at(actionID)->refresh_timer_start();
|
||||||
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
|
as[level].actions.at(actionID)->refresh(Us, sRNG, pRNG);
|
||||||
|
as[level].actions.at(actionID)->refresh_timer_stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Refresh the higher representation actions
|
// Refresh the higher representation actions
|
||||||
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
|
as[level].apply(refresh_hireps, Representations, sRNG, pRNG);
|
||||||
}
|
}
|
||||||
|
|
||||||
MomFilter->applyFilter(P);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// to be used by the actionlevel class to iterate
|
// to be used by the actionlevel class to iterate
|
||||||
@ -310,7 +399,9 @@ public:
|
|||||||
// based on the boolean is_smeared in actionID
|
// based on the boolean is_smeared in actionID
|
||||||
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
Field& Us = Smearer.get_U(as[level].actions.at(actionID)->is_smeared);
|
||||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
|
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] action eval " << std::endl;
|
||||||
|
as[level].actions.at(actionID)->S_timer_start();
|
||||||
Hterm = as[level].actions.at(actionID)->S(Us);
|
Hterm = as[level].actions.at(actionID)->S(Us);
|
||||||
|
as[level].actions.at(actionID)->S_timer_stop();
|
||||||
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
|
std::cout << GridLogMessage << "S [" << level << "][" << actionID << "] H = " << Hterm << std::endl;
|
||||||
H += Hterm;
|
H += Hterm;
|
||||||
}
|
}
|
||||||
|
@ -55,12 +55,12 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1,bool err_on_no_converge=true) {
|
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||||
GridBase *grid = Umu.Grid();
|
GridBase *grid = Umu.Grid();
|
||||||
GaugeMat xform(grid);
|
GaugeMat xform(grid);
|
||||||
SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier,orthog,err_on_no_converge);
|
SteepestDescentGaugeFix(Umu,xform,alpha,maxiter,Omega_tol,Phi_tol,Fourier,orthog);
|
||||||
}
|
}
|
||||||
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1,bool err_on_no_converge=true) {
|
static void SteepestDescentGaugeFix(GaugeLorentz &Umu,GaugeMat &xform,Real & alpha,int maxiter,Real Omega_tol, Real Phi_tol,bool Fourier=false,int orthog=-1) {
|
||||||
|
|
||||||
GridBase *grid = Umu.Grid();
|
GridBase *grid = Umu.Grid();
|
||||||
|
|
||||||
@ -122,8 +122,6 @@ public:
|
|||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << GridLogError << "Gauge fixing did not converge in " << maxiter << " iterations." << std::endl;
|
|
||||||
if (err_on_no_converge) assert(0);
|
|
||||||
};
|
};
|
||||||
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
|
static Real SteepestDescentStep(std::vector<GaugeMat> &U,GaugeMat &xform,Real & alpha, GaugeMat & dmuAmu,int orthog) {
|
||||||
GridBase *grid = U[0].Grid();
|
GridBase *grid = U[0].Grid();
|
||||||
|
@ -125,6 +125,7 @@ public:
|
|||||||
return sumplaq / vol / faces / Nc; // Nd , Nc dependent... FIXME
|
return sumplaq / vol / faces / Nc; // Nd , Nc dependent... FIXME
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
// average over all x,y,z the temporal loop
|
// average over all x,y,z the temporal loop
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
@ -164,7 +165,7 @@ public:
|
|||||||
|
|
||||||
double vol = Umu.Grid()->gSites();
|
double vol = Umu.Grid()->gSites();
|
||||||
|
|
||||||
return p.real() / vol / (4.0 * Nc ) ;
|
return p.real() / vol / 4.0 / 3.0;
|
||||||
};
|
};
|
||||||
|
|
||||||
//////////////////////////////////////////////////
|
//////////////////////////////////////////////////
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
*************************************************************************************/
|
*************************************************************************************/
|
||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#include <Grid/Grid.h>
|
#include <Grid/Grid.h>
|
||||||
#ifndef GRID_HIP
|
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ void JSONWriter::writeDefault(const std::string &s, const std::string &x)
|
|||||||
if (s.size())
|
if (s.size())
|
||||||
ss_ << "\""<< s << "\" : \"" << os.str() << "\" ," ;
|
ss_ << "\""<< s << "\" : \"" << os.str() << "\" ," ;
|
||||||
else
|
else
|
||||||
ss_ << "\""<< os.str() << "\" ," ;
|
ss_ << os.str() << " ," ;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reader implementation ///////////////////////////////////////////////////////
|
// Reader implementation ///////////////////////////////////////////////////////
|
||||||
|
@ -54,7 +54,7 @@ namespace Grid
|
|||||||
void pop(void);
|
void pop(void);
|
||||||
template <typename U>
|
template <typename U>
|
||||||
void writeDefault(const std::string &s, const U &x);
|
void writeDefault(const std::string &s, const U &x);
|
||||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
#ifdef __NVCC__
|
||||||
void writeDefault(const std::string &s, const Grid::ComplexD &x)
|
void writeDefault(const std::string &s, const Grid::ComplexD &x)
|
||||||
{
|
{
|
||||||
std::complex<double> z(real(x),imag(x));
|
std::complex<double> z(real(x),imag(x));
|
||||||
@ -101,7 +101,7 @@ namespace Grid
|
|||||||
void readDefault(const std::string &s, std::vector<U> &output);
|
void readDefault(const std::string &s, std::vector<U> &output);
|
||||||
template <typename U, typename P>
|
template <typename U, typename P>
|
||||||
void readDefault(const std::string &s, std::pair<U,P> &output);
|
void readDefault(const std::string &s, std::pair<U,P> &output);
|
||||||
#if defined(GRID_CUDA) || defined(GRID_HIP)
|
#ifdef __NVCC__
|
||||||
void readDefault(const std::string &s, ComplexD &output)
|
void readDefault(const std::string &s, ComplexD &output)
|
||||||
{
|
{
|
||||||
std::complex<double> z;
|
std::complex<double> z;
|
||||||
|
@ -36,7 +36,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include "BinaryIO.h"
|
#include "BinaryIO.h"
|
||||||
#include "TextIO.h"
|
#include "TextIO.h"
|
||||||
#include "XmlIO.h"
|
#include "XmlIO.h"
|
||||||
#ifndef GRID_HIP
|
#if (!defined(GRID_CUDA)) && (!defined(GRID_HIP))
|
||||||
#include "JSON_IO.h"
|
#include "JSON_IO.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -80,14 +80,11 @@ void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lat
|
|||||||
///////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////
|
||||||
template<class cobj,class vobj,class compressor>
|
template<class cobj,class vobj,class compressor>
|
||||||
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
|
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
|
||||||
commVector<cobj *> pointers,
|
commVector<cobj *> pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline));
|
||||||
int dimension,int plane,
|
|
||||||
int cbmask,compressor &compress,int type) __attribute__((noinline));
|
|
||||||
|
|
||||||
template<class cobj,class vobj,class compressor>
|
template<class cobj,class vobj,class compressor>
|
||||||
void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table,
|
void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,
|
||||||
const Lattice<vobj> &rhs,
|
Vector<cobj *> pointers,int dimension,int plane,int cbmask,
|
||||||
std::vector<cobj *> &pointers,int dimension,int plane,int cbmask,
|
|
||||||
compressor &compress,int type)
|
compressor &compress,int type)
|
||||||
{
|
{
|
||||||
assert( (table.size()&0x1)==0);
|
assert( (table.size()&0x1)==0);
|
||||||
@ -95,12 +92,11 @@ void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table,
|
|||||||
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
||||||
|
|
||||||
auto rhs_v = rhs.View(AcceleratorRead);
|
auto rhs_v = rhs.View(AcceleratorRead);
|
||||||
auto rhs_p = &rhs_v[0];
|
|
||||||
auto p0=&pointers[0][0];
|
auto p0=&pointers[0][0];
|
||||||
auto p1=&pointers[1][0];
|
auto p1=&pointers[1][0];
|
||||||
auto tp=&table[0];
|
auto tp=&table[0];
|
||||||
accelerator_forNB(j, num, vobj::Nsimd(), {
|
accelerator_forNB(j, num, vobj::Nsimd(), {
|
||||||
compress.CompressExchange(p0,p1, rhs_p, j,
|
compress.CompressExchange(p0,p1, &rhs_v[0], j,
|
||||||
so+tp[2*j ].second,
|
so+tp[2*j ].second,
|
||||||
so+tp[2*j+1].second,
|
so+tp[2*j+1].second,
|
||||||
type);
|
type);
|
||||||
@ -135,8 +131,11 @@ class CartesianStencilAccelerator {
|
|||||||
int _checkerboard;
|
int _checkerboard;
|
||||||
int _npoints; // Move to template param?
|
int _npoints; // Move to template param?
|
||||||
int _osites;
|
int _osites;
|
||||||
|
int _dirichlet;
|
||||||
StencilVector _directions;
|
StencilVector _directions;
|
||||||
StencilVector _distances;
|
StencilVector _distances;
|
||||||
|
StencilVector _comms_send;
|
||||||
|
StencilVector _comms_recv;
|
||||||
StencilVector _comm_buf_size;
|
StencilVector _comm_buf_size;
|
||||||
StencilVector _permute_type;
|
StencilVector _permute_type;
|
||||||
StencilVector same_node;
|
StencilVector same_node;
|
||||||
@ -230,12 +229,14 @@ public:
|
|||||||
void * recv_buf;
|
void * recv_buf;
|
||||||
Integer to_rank;
|
Integer to_rank;
|
||||||
Integer from_rank;
|
Integer from_rank;
|
||||||
|
Integer do_send;
|
||||||
|
Integer do_recv;
|
||||||
Integer bytes;
|
Integer bytes;
|
||||||
};
|
};
|
||||||
struct Merge {
|
struct Merge {
|
||||||
cobj * mpointer;
|
cobj * mpointer;
|
||||||
// std::vector<scalar_object *> rpointers;
|
Vector<scalar_object *> rpointers;
|
||||||
std::vector<cobj *> vpointers;
|
Vector<cobj *> vpointers;
|
||||||
Integer buffer_size;
|
Integer buffer_size;
|
||||||
Integer type;
|
Integer type;
|
||||||
};
|
};
|
||||||
@ -244,7 +245,20 @@ public:
|
|||||||
cobj * mpi_p;
|
cobj * mpi_p;
|
||||||
Integer buffer_size;
|
Integer buffer_size;
|
||||||
};
|
};
|
||||||
|
struct CopyReceiveBuffer {
|
||||||
|
void * from_p;
|
||||||
|
void * to_p;
|
||||||
|
Integer bytes;
|
||||||
|
};
|
||||||
|
struct CachedTransfer {
|
||||||
|
Integer direction;
|
||||||
|
Integer OrthogPlane;
|
||||||
|
Integer DestProc;
|
||||||
|
Integer bytes;
|
||||||
|
Integer lane;
|
||||||
|
Integer cb;
|
||||||
|
void *recv_buf;
|
||||||
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
GridBase * _grid;
|
GridBase * _grid;
|
||||||
@ -275,7 +289,8 @@ public:
|
|||||||
std::vector<Merge> MergersSHM;
|
std::vector<Merge> MergersSHM;
|
||||||
std::vector<Decompress> Decompressions;
|
std::vector<Decompress> Decompressions;
|
||||||
std::vector<Decompress> DecompressionsSHM;
|
std::vector<Decompress> DecompressionsSHM;
|
||||||
|
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
|
||||||
|
std::vector<CachedTransfer> CachedTransfers;
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
// Unified Comms buffers for all directions
|
// Unified Comms buffers for all directions
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
@ -288,29 +303,6 @@ public:
|
|||||||
int u_comm_offset;
|
int u_comm_offset;
|
||||||
int _unified_buffer_size;
|
int _unified_buffer_size;
|
||||||
|
|
||||||
/////////////////////////////////////////
|
|
||||||
// Timing info; ugly; possibly temporary
|
|
||||||
/////////////////////////////////////////
|
|
||||||
double commtime;
|
|
||||||
double mpi3synctime;
|
|
||||||
double mpi3synctime_g;
|
|
||||||
double shmmergetime;
|
|
||||||
double gathertime;
|
|
||||||
double gathermtime;
|
|
||||||
double halogtime;
|
|
||||||
double mergetime;
|
|
||||||
double decompresstime;
|
|
||||||
double comms_bytes;
|
|
||||||
double shm_bytes;
|
|
||||||
double splicetime;
|
|
||||||
double nosplicetime;
|
|
||||||
double calls;
|
|
||||||
std::vector<double> comm_bytes_thr;
|
|
||||||
std::vector<double> shm_bytes_thr;
|
|
||||||
std::vector<double> comm_time_thr;
|
|
||||||
std::vector<double> comm_enter_thr;
|
|
||||||
std::vector<double> comm_leave_thr;
|
|
||||||
|
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Stencil query
|
// Stencil query
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
@ -337,11 +329,12 @@ public:
|
|||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
// Comms packet queue for asynch thread
|
// Comms packet queue for asynch thread
|
||||||
// Use OpenMP Tasks for cleaner ???
|
// Use OpenMP Tasks for cleaner ???
|
||||||
|
// must be called *inside* parallel region
|
||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
|
/*
|
||||||
void CommunicateThreaded()
|
void CommunicateThreaded()
|
||||||
{
|
{
|
||||||
#ifdef GRID_OMP
|
#ifdef GRID_OMP
|
||||||
// must be called in parallel region
|
|
||||||
int mythread = omp_get_thread_num();
|
int mythread = omp_get_thread_num();
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
int nthreads = CartesianCommunicator::nCommThreads;
|
||||||
#else
|
#else
|
||||||
@ -350,67 +343,30 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
if (nthreads == -1) nthreads = 1;
|
if (nthreads == -1) nthreads = 1;
|
||||||
if (mythread < nthreads) {
|
if (mythread < nthreads) {
|
||||||
comm_enter_thr[mythread] = usecond();
|
|
||||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
||||||
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||||
Packets[i].to_rank,
|
Packets[i].to_rank,
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comm_bytes_thr[mythread] += bytes;
|
|
||||||
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
|
|
||||||
|
|
||||||
}
|
|
||||||
comm_leave_thr[mythread]= usecond();
|
|
||||||
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollateThreads(void)
|
|
||||||
{
|
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
|
||||||
double first=0.0;
|
|
||||||
double last =0.0;
|
|
||||||
|
|
||||||
for(int t=0;t<nthreads;t++) {
|
|
||||||
|
|
||||||
double t0 = comm_enter_thr[t];
|
|
||||||
double t1 = comm_leave_thr[t];
|
|
||||||
comms_bytes+=comm_bytes_thr[t];
|
|
||||||
shm_bytes +=shm_bytes_thr[t];
|
|
||||||
|
|
||||||
comm_enter_thr[t] = 0.0;
|
|
||||||
comm_leave_thr[t] = 0.0;
|
|
||||||
comm_time_thr[t] = 0.0;
|
|
||||||
comm_bytes_thr[t]=0;
|
|
||||||
shm_bytes_thr[t]=0;
|
|
||||||
|
|
||||||
if ( first == 0.0 ) first = t0; // first is t0
|
|
||||||
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
|
||||||
|
|
||||||
if ( t1 > last ) last = t1; // max time seen
|
|
||||||
|
|
||||||
}
|
|
||||||
commtime+= last-first;
|
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Non blocking send and receive. Necessarily parallel.
|
// Non blocking send and receive. Necessarily parallel.
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
{
|
{
|
||||||
reqs.resize(Packets.size());
|
reqs.resize(Packets.size());
|
||||||
commtime-=usecond();
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i],
|
_grid->StencilSendToRecvFromBegin(reqs[i],
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
Packets[i].to_rank,
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comms_bytes+=bytes;
|
|
||||||
shm_bytes +=2*Packets[i].bytes-bytes;
|
|
||||||
}
|
}
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
@ -418,36 +374,34 @@ public:
|
|||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
||||||
}
|
}
|
||||||
commtime+=usecond();
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
// Blocking send and receive. Either sequential or parallel.
|
// Blocking send and receive. Either sequential or parallel.
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
void Communicate(void)
|
void Communicate(void)
|
||||||
{
|
{
|
||||||
if ( 0 ){
|
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
|
||||||
thread_region {
|
/////////////////////////////////////////////////////////
|
||||||
// must be called in parallel region
|
// several way threaded on different communicators.
|
||||||
int mythread = thread_num();
|
// Cannot combine with Dirichlet operators
|
||||||
int maxthreads= thread_max();
|
// This scheme is needed on Intel Omnipath for best performance
|
||||||
|
// Deprecate once there are very few omnipath clusters
|
||||||
|
/////////////////////////////////////////////////////////
|
||||||
int nthreads = CartesianCommunicator::nCommThreads;
|
int nthreads = CartesianCommunicator::nCommThreads;
|
||||||
assert(nthreads <= maxthreads);
|
int old = GridThread::GetThreads();
|
||||||
if (nthreads == -1) nthreads = 1;
|
GridThread::SetThreads(nthreads);
|
||||||
if (mythread < nthreads) {
|
thread_for(i,Packets.size(),{
|
||||||
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
_grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
||||||
double start = usecond();
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
|
||||||
Packets[i].to_rank,
|
|
||||||
Packets[i].recv_buf,
|
Packets[i].recv_buf,
|
||||||
Packets[i].from_rank,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].bytes,i);
|
Packets[i].bytes,i);
|
||||||
comm_bytes_thr[mythread] += bytes;
|
});
|
||||||
shm_bytes_thr[mythread] += Packets[i].bytes - bytes;
|
GridThread::SetThreads(old);
|
||||||
comm_time_thr[mythread] += usecond() - start;
|
} else {
|
||||||
}
|
/////////////////////////////////////////////////////////
|
||||||
}
|
// Concurrent and non-threaded asynch calls to MPI
|
||||||
}
|
/////////////////////////////////////////////////////////
|
||||||
} else { // Concurrent and non-threaded asynch calls to MPI
|
|
||||||
std::vector<std::vector<CommsRequest_t> > reqs;
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
||||||
this->CommunicateBegin(reqs);
|
this->CommunicateBegin(reqs);
|
||||||
this->CommunicateComplete(reqs);
|
this->CommunicateComplete(reqs);
|
||||||
@ -489,31 +443,23 @@ public:
|
|||||||
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
|
||||||
if ( sshift[0] == sshift[1] ) {
|
if ( sshift[0] == sshift[1] ) {
|
||||||
if (splice_dim) {
|
if (splice_dim) {
|
||||||
splicetime-=usecond();
|
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
|
||||||
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp;
|
is_same_node = is_same_node && tmp;
|
||||||
splicetime+=usecond();
|
|
||||||
} else {
|
} else {
|
||||||
nosplicetime-=usecond();
|
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
|
||||||
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp;
|
is_same_node = is_same_node && tmp;
|
||||||
nosplicetime+=usecond();
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if(splice_dim){
|
if(splice_dim){
|
||||||
splicetime-=usecond();
|
|
||||||
// if checkerboard is unfavourable take two passes
|
// if checkerboard is unfavourable take two passes
|
||||||
// both with block stride loop iteration
|
// both with block stride loop iteration
|
||||||
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
|
||||||
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
|
||||||
is_same_node = is_same_node && tmp1 && tmp2;
|
is_same_node = is_same_node && tmp1 && tmp2;
|
||||||
splicetime+=usecond();
|
|
||||||
} else {
|
} else {
|
||||||
nosplicetime-=usecond();
|
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
|
||||||
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
|
||||||
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
|
||||||
is_same_node = is_same_node && tmp1 && tmp2;
|
is_same_node = is_same_node && tmp1 && tmp2;
|
||||||
nosplicetime+=usecond();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -523,13 +469,10 @@ public:
|
|||||||
template<class compressor>
|
template<class compressor>
|
||||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
mpi3synctime_g-=usecond();
|
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||||
mpi3synctime_g+=usecond();
|
|
||||||
|
|
||||||
// conformable(source.Grid(),_grid);
|
// conformable(source.Grid(),_grid);
|
||||||
assert(source.Grid()==_grid);
|
assert(source.Grid()==_grid);
|
||||||
halogtime-=usecond();
|
|
||||||
|
|
||||||
u_comm_offset=0;
|
u_comm_offset=0;
|
||||||
|
|
||||||
@ -543,7 +486,6 @@ public:
|
|||||||
assert(u_comm_offset==_unified_buffer_size);
|
assert(u_comm_offset==_unified_buffer_size);
|
||||||
|
|
||||||
accelerator_barrier();
|
accelerator_barrier();
|
||||||
halogtime+=usecond();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////
|
/////////////////////////
|
||||||
@ -556,14 +498,72 @@ public:
|
|||||||
Mergers.resize(0);
|
Mergers.resize(0);
|
||||||
MergersSHM.resize(0);
|
MergersSHM.resize(0);
|
||||||
Packets.resize(0);
|
Packets.resize(0);
|
||||||
calls++;
|
CopyReceiveBuffers.resize(0);
|
||||||
|
CachedTransfers.resize(0);
|
||||||
}
|
}
|
||||||
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
void AddCopy(void *from,void * to, Integer bytes)
|
||||||
|
{
|
||||||
|
// std::cout << "Adding CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<bytes<<std::endl;
|
||||||
|
CopyReceiveBuffer obj;
|
||||||
|
obj.from_p = from;
|
||||||
|
obj.to_p = to;
|
||||||
|
obj.bytes= bytes;
|
||||||
|
CopyReceiveBuffers.push_back(obj);
|
||||||
|
}
|
||||||
|
void CommsCopy()
|
||||||
|
{
|
||||||
|
// These are device resident MPI buffers.
|
||||||
|
for(int i=0;i<CopyReceiveBuffers.size();i++){
|
||||||
|
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
|
||||||
|
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
|
||||||
|
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
|
||||||
|
// std::cout << "CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<words*sizeof(cobj)<<std::endl;
|
||||||
|
accelerator_forNB(j, words, cobj::Nsimd(), {
|
||||||
|
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
|
||||||
|
{
|
||||||
|
CachedTransfer obj;
|
||||||
|
obj.direction = direction;
|
||||||
|
obj.OrthogPlane = OrthogPlane;
|
||||||
|
obj.DestProc = DestProc;
|
||||||
|
obj.recv_buf = recv_buf;
|
||||||
|
obj.lane = lane;
|
||||||
|
obj.bytes = bytes;
|
||||||
|
obj.cb = cb;
|
||||||
|
|
||||||
|
for(int i=0;i<CachedTransfers.size();i++){
|
||||||
|
if ( (CachedTransfers[i].direction ==direction)
|
||||||
|
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
|
||||||
|
&&(CachedTransfers[i].DestProc ==DestProc)
|
||||||
|
&&(CachedTransfers[i].bytes ==bytes)
|
||||||
|
&&(CachedTransfers[i].lane ==lane)
|
||||||
|
&&(CachedTransfers[i].cb ==cb)
|
||||||
|
){
|
||||||
|
// std::cout << "Found duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<< " bytes "<<bytes <<std::endl;
|
||||||
|
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// std::cout << "No duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<<" bytes "<<bytes<<std::endl;
|
||||||
|
CachedTransfers.push_back(obj);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
void AddPacket(void *xmit,void * rcv,
|
||||||
|
Integer to, Integer do_send,
|
||||||
|
Integer from, Integer do_recv,
|
||||||
|
Integer bytes){
|
||||||
Packet p;
|
Packet p;
|
||||||
p.send_buf = xmit;
|
p.send_buf = xmit;
|
||||||
p.recv_buf = rcv;
|
p.recv_buf = rcv;
|
||||||
p.to_rank = to;
|
p.to_rank = to;
|
||||||
p.from_rank= from;
|
p.from_rank= from;
|
||||||
|
p.do_send = do_send;
|
||||||
|
p.do_recv = do_recv;
|
||||||
p.bytes = bytes;
|
p.bytes = bytes;
|
||||||
Packets.push_back(p);
|
Packets.push_back(p);
|
||||||
}
|
}
|
||||||
@ -574,7 +574,7 @@ public:
|
|||||||
d.buffer_size = buffer_size;
|
d.buffer_size = buffer_size;
|
||||||
dv.push_back(d);
|
dv.push_back(d);
|
||||||
}
|
}
|
||||||
void AddMerge(cobj *merge_p,std::vector<cobj *> &rpointers,Integer buffer_size,Integer type,std::vector<Merge> &mv) {
|
void AddMerge(cobj *merge_p,Vector<cobj *> &rpointers,Integer buffer_size,Integer type,std::vector<Merge> &mv) {
|
||||||
Merge m;
|
Merge m;
|
||||||
m.type = type;
|
m.type = type;
|
||||||
m.mpointer = merge_p;
|
m.mpointer = merge_p;
|
||||||
@ -583,23 +583,17 @@ public:
|
|||||||
mv.push_back(m);
|
mv.push_back(m);
|
||||||
}
|
}
|
||||||
template<class decompressor> void CommsMerge(decompressor decompress) {
|
template<class decompressor> void CommsMerge(decompressor decompress) {
|
||||||
|
CommsCopy();
|
||||||
CommsMerge(decompress,Mergers,Decompressions);
|
CommsMerge(decompress,Mergers,Decompressions);
|
||||||
}
|
}
|
||||||
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
||||||
mpi3synctime-=usecond();
|
|
||||||
accelerator_barrier();
|
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||||
mpi3synctime+=usecond();
|
|
||||||
shmmergetime-=usecond();
|
|
||||||
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
||||||
shmmergetime+=usecond();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class decompressor>
|
template<class decompressor>
|
||||||
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
|
||||||
|
{
|
||||||
|
|
||||||
mergetime-=usecond();
|
|
||||||
for(int i=0;i<mm.size();i++){
|
for(int i=0;i<mm.size();i++){
|
||||||
auto mp = &mm[i].mpointer[0];
|
auto mp = &mm[i].mpointer[0];
|
||||||
auto vp0= &mm[i].vpointers[0][0];
|
auto vp0= &mm[i].vpointers[0][0];
|
||||||
@ -609,9 +603,7 @@ public:
|
|||||||
decompress.Exchange(mp,vp0,vp1,type,o);
|
decompress.Exchange(mp,vp0,vp1,type,o);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
mergetime+=usecond();
|
|
||||||
|
|
||||||
decompresstime-=usecond();
|
|
||||||
for(int i=0;i<dd.size();i++){
|
for(int i=0;i<dd.size();i++){
|
||||||
auto kp = dd[i].kernel_p;
|
auto kp = dd[i].kernel_p;
|
||||||
auto mp = dd[i].mpi_p;
|
auto mp = dd[i].mpi_p;
|
||||||
@ -619,7 +611,6 @@ public:
|
|||||||
decompress.Decompress(kp,mp,o);
|
decompress.Decompress(kp,mp,o);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
decompresstime+=usecond();
|
|
||||||
}
|
}
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
// Set up routines
|
// Set up routines
|
||||||
@ -656,19 +647,58 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// Introduce a block structure and switch off comms on boundaries
|
||||||
|
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||||
|
{
|
||||||
|
this->_dirichlet = 1;
|
||||||
|
for(int ii=0;ii<this->_npoints;ii++){
|
||||||
|
int dimension = this->_directions[ii];
|
||||||
|
int displacement = this->_distances[ii];
|
||||||
|
int shift = displacement;
|
||||||
|
int gd = _grid->_gdimensions[dimension];
|
||||||
|
int fd = _grid->_fdimensions[dimension];
|
||||||
|
int pd = _grid->_processors [dimension];
|
||||||
|
int ld = gd/pd;
|
||||||
|
int pc = _grid->_processor_coor[dimension];
|
||||||
|
///////////////////////////////////////////
|
||||||
|
// Figure out dirichlet send and receive
|
||||||
|
// on this leg of stencil.
|
||||||
|
///////////////////////////////////////////
|
||||||
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||||
|
int block = dirichlet_block[dimension];
|
||||||
|
this->_comms_send[ii] = comm_dim;
|
||||||
|
this->_comms_recv[ii] = comm_dim;
|
||||||
|
if ( block ) {
|
||||||
|
assert(abs(displacement) < ld );
|
||||||
|
|
||||||
|
if( displacement > 0 ) {
|
||||||
|
// High side, low side
|
||||||
|
// | <--B--->|
|
||||||
|
// | | |
|
||||||
|
// noR
|
||||||
|
// noS
|
||||||
|
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||||
|
if ( ( ld*pc ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||||
|
} else {
|
||||||
|
// High side, low side
|
||||||
|
// | <--B--->|
|
||||||
|
// | | |
|
||||||
|
// noS
|
||||||
|
// noR
|
||||||
|
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_send[ii] = 0;
|
||||||
|
if ( ( ld*pc ) % block == 0 ) this->_comms_recv[ii] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
CartesianStencil(GridBase *grid,
|
CartesianStencil(GridBase *grid,
|
||||||
int npoints,
|
int npoints,
|
||||||
int checkerboard,
|
int checkerboard,
|
||||||
const std::vector<int> &directions,
|
const std::vector<int> &directions,
|
||||||
const std::vector<int> &distances,
|
const std::vector<int> &distances,
|
||||||
Parameters p)
|
Parameters p)
|
||||||
: shm_bytes_thr(npoints),
|
|
||||||
comm_bytes_thr(npoints),
|
|
||||||
comm_enter_thr(npoints),
|
|
||||||
comm_leave_thr(npoints),
|
|
||||||
comm_time_thr(npoints)
|
|
||||||
{
|
{
|
||||||
|
this->_dirichlet = 0;
|
||||||
face_table_computed=0;
|
face_table_computed=0;
|
||||||
_grid = grid;
|
_grid = grid;
|
||||||
this->parameters=p;
|
this->parameters=p;
|
||||||
@ -681,6 +711,8 @@ public:
|
|||||||
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
|
||||||
this->_directions = StencilVector(directions);
|
this->_directions = StencilVector(directions);
|
||||||
this->_distances = StencilVector(distances);
|
this->_distances = StencilVector(distances);
|
||||||
|
this->_comms_send.resize(npoints);
|
||||||
|
this->_comms_recv.resize(npoints);
|
||||||
this->same_node.resize(npoints);
|
this->same_node.resize(npoints);
|
||||||
|
|
||||||
_unified_buffer_size=0;
|
_unified_buffer_size=0;
|
||||||
@ -699,24 +731,27 @@ public:
|
|||||||
int displacement = distances[i];
|
int displacement = distances[i];
|
||||||
int shift = displacement;
|
int shift = displacement;
|
||||||
|
|
||||||
|
int gd = _grid->_gdimensions[dimension];
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
|
int pd = _grid->_processors [dimension];
|
||||||
|
int ld = gd/pd;
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
|
int pc = _grid->_processor_coor[dimension];
|
||||||
this->_permute_type[point]=_grid->PermuteType(dimension);
|
this->_permute_type[point]=_grid->PermuteType(dimension);
|
||||||
|
|
||||||
this->_checkerboard = checkerboard;
|
this->_checkerboard = checkerboard;
|
||||||
|
|
||||||
//////////////////////////
|
|
||||||
// the permute type
|
|
||||||
//////////////////////////
|
|
||||||
int simd_layout = _grid->_simd_layout[dimension];
|
int simd_layout = _grid->_simd_layout[dimension];
|
||||||
int comm_dim = _grid->_processors[dimension] >1 ;
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
||||||
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
||||||
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
||||||
|
|
||||||
|
this->_comms_send[ii] = comm_dim;
|
||||||
|
this->_comms_recv[ii] = comm_dim;
|
||||||
|
|
||||||
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
||||||
|
|
||||||
int sshift[2];
|
int sshift[2];
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
// Underlying approach. For each local site build
|
// Underlying approach. For each local site build
|
||||||
// up a table containing the npoint "neighbours" and whether they
|
// up a table containing the npoint "neighbours" and whether they
|
||||||
@ -817,6 +852,7 @@ public:
|
|||||||
GridBase *grid=_grid;
|
GridBase *grid=_grid;
|
||||||
const int Nsimd = grid->Nsimd();
|
const int Nsimd = grid->Nsimd();
|
||||||
|
|
||||||
|
int comms_recv = this->_comms_recv[point];
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
int ld = _grid->_ldimensions[dimension];
|
int ld = _grid->_ldimensions[dimension];
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
@ -873,7 +909,9 @@ public:
|
|||||||
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
||||||
wraparound = 1;
|
wraparound = 1;
|
||||||
}
|
}
|
||||||
if (!offnode) {
|
|
||||||
|
// Wrap locally dirichlet support case OR node local
|
||||||
|
if ( (offnode==0) || (comms_recv==0) ) {
|
||||||
|
|
||||||
int permute_slice=0;
|
int permute_slice=0;
|
||||||
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
||||||
@ -990,11 +1028,14 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<class compressor>
|
template<class compressor>
|
||||||
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
|
||||||
{
|
{
|
||||||
typedef typename cobj::vector_type vector_type;
|
typedef typename cobj::vector_type vector_type;
|
||||||
typedef typename cobj::scalar_type scalar_type;
|
typedef typename cobj::scalar_type scalar_type;
|
||||||
|
|
||||||
|
int comms_send = this->_comms_send[point] ;
|
||||||
|
int comms_recv = this->_comms_recv[point] ;
|
||||||
|
|
||||||
assert(rhs.Grid()==_grid);
|
assert(rhs.Grid()==_grid);
|
||||||
// conformable(_grid,rhs.Grid());
|
// conformable(_grid,rhs.Grid());
|
||||||
|
|
||||||
@ -1020,6 +1061,8 @@ public:
|
|||||||
|
|
||||||
if (comm_proc) {
|
if (comm_proc) {
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int words = buffer_size;
|
int words = buffer_size;
|
||||||
if (cbmask != 0x3) words=words>>1;
|
if (cbmask != 0x3) words=words>>1;
|
||||||
|
|
||||||
@ -1051,16 +1094,20 @@ public:
|
|||||||
recv_buf=this->u_recv_buf_p;
|
recv_buf=this->u_recv_buf_p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
cobj *send_buf;
|
cobj *send_buf;
|
||||||
send_buf = this->u_send_buf_p; // Gather locally, must send
|
send_buf = this->u_send_buf_p; // Gather locally, must send
|
||||||
|
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
// Gather locally
|
// Gather locally
|
||||||
////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////
|
||||||
gathertime-=usecond();
|
|
||||||
assert(send_buf!=NULL);
|
assert(send_buf!=NULL);
|
||||||
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
if ( comms_send )
|
||||||
gathertime+=usecond();
|
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);
|
||||||
|
face_idx++;
|
||||||
|
|
||||||
|
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[u_comm_offset],0,bytes,cbmask);
|
||||||
|
if ( (!duplicate) ) { // Force comms for now
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
// Build a list of things to do after we synchronise GPUs
|
// Build a list of things to do after we synchronise GPUs
|
||||||
@ -1068,9 +1115,10 @@ public:
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
AddPacket((void *)&send_buf[u_comm_offset],
|
AddPacket((void *)&send_buf[u_comm_offset],
|
||||||
(void *)&recv_buf[u_comm_offset],
|
(void *)&recv_buf[u_comm_offset],
|
||||||
xmit_to_rank,
|
xmit_to_rank, comms_send,
|
||||||
recv_from_rank,
|
recv_from_rank, comms_recv,
|
||||||
bytes);
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
if ( compress.DecompressionStep() ) {
|
if ( compress.DecompressionStep() ) {
|
||||||
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
AddDecompress(&this->u_recv_buf_p[u_comm_offset],
|
||||||
@ -1084,11 +1132,15 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<class compressor>
|
template<class compressor>
|
||||||
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
|
||||||
{
|
{
|
||||||
const int Nsimd = _grid->Nsimd();
|
const int Nsimd = _grid->Nsimd();
|
||||||
|
|
||||||
const int maxl =2;// max layout in a direction
|
const int maxl =2;// max layout in a direction
|
||||||
|
|
||||||
|
int comms_send = this->_comms_send[point] ;
|
||||||
|
int comms_recv = this->_comms_recv[point] ;
|
||||||
|
|
||||||
int fd = _grid->_fdimensions[dimension];
|
int fd = _grid->_fdimensions[dimension];
|
||||||
int rd = _grid->_rdimensions[dimension];
|
int rd = _grid->_rdimensions[dimension];
|
||||||
int ld = _grid->_ldimensions[dimension];
|
int ld = _grid->_ldimensions[dimension];
|
||||||
@ -1120,8 +1172,8 @@ public:
|
|||||||
int bytes = (reduced_buffer_size*datum_bytes)/simd_layout;
|
int bytes = (reduced_buffer_size*datum_bytes)/simd_layout;
|
||||||
assert(bytes*simd_layout == reduced_buffer_size*datum_bytes);
|
assert(bytes*simd_layout == reduced_buffer_size*datum_bytes);
|
||||||
|
|
||||||
std::vector<cobj *> rpointers(maxl);
|
Vector<cobj *> rpointers(maxl);
|
||||||
std::vector<cobj *> spointers(maxl);
|
Vector<cobj *> spointers(maxl);
|
||||||
|
|
||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
// Work out what to send where
|
// Work out what to send where
|
||||||
@ -1153,12 +1205,11 @@ public:
|
|||||||
&face_table[face_idx][0],
|
&face_table[face_idx][0],
|
||||||
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
face_table[face_idx].size()*sizeof(face_table_host[0]));
|
||||||
}
|
}
|
||||||
gathermtime-=usecond();
|
|
||||||
|
|
||||||
|
// if ( comms_send )
|
||||||
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
|
||||||
face_idx++;
|
face_idx++;
|
||||||
|
|
||||||
gathermtime+=usecond();
|
|
||||||
//spointers[0] -- low
|
//spointers[0] -- low
|
||||||
//spointers[1] -- high
|
//spointers[1] -- high
|
||||||
|
|
||||||
@ -1187,8 +1238,13 @@ public:
|
|||||||
|
|
||||||
rpointers[i] = rp;
|
rpointers[i] = rp;
|
||||||
|
|
||||||
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
|
||||||
|
if ( !duplicate ) {
|
||||||
|
AddPacket((void *)sp,(void *)rp,
|
||||||
|
xmit_to_rank,comms_send,
|
||||||
|
recv_from_rank,comms_recv,
|
||||||
|
bytes);
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
|
|||||||
|
|
||||||
|
|
||||||
// Specialisation: Cayley-Hamilton exponential for SU(3)
|
// Specialisation: Cayley-Hamilton exponential for SU(3)
|
||||||
#ifndef GRID_CUDA
|
#ifndef GRID_ACCELERATED
|
||||||
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
|
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>
|
||||||
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
|
accelerator_inline iMatrix<vtype,3> Exponentiate(const iMatrix<vtype,3> &arg, RealD alpha , Integer Nexp = DEFAULT_MAT_EXP )
|
||||||
{
|
{
|
||||||
|
@ -195,15 +195,12 @@ void acceleratorInit(void)
|
|||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
|
|
||||||
cl::sycl::queue *theGridAccelerator;
|
cl::sycl::queue *theGridAccelerator;
|
||||||
cl::sycl::queue *theCopyAccelerator;
|
|
||||||
void acceleratorInit(void)
|
void acceleratorInit(void)
|
||||||
{
|
{
|
||||||
int nDevices = 1;
|
int nDevices = 1;
|
||||||
cl::sycl::gpu_selector selector;
|
cl::sycl::gpu_selector selector;
|
||||||
cl::sycl::device selectedDevice { selector };
|
cl::sycl::device selectedDevice { selector };
|
||||||
theGridAccelerator = new sycl::queue (selectedDevice);
|
theGridAccelerator = new sycl::queue (selectedDevice);
|
||||||
// theCopyAccelerator = new sycl::queue (selectedDevice);
|
|
||||||
theCopyAccelerator = theGridAccelerator; // Should proceed concurrenlty anyway.
|
|
||||||
|
|
||||||
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
|
||||||
zeInit(0);
|
zeInit(0);
|
||||||
|
@ -247,6 +247,7 @@ inline int acceleratorIsCommunicable(void *ptr)
|
|||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
// SyCL acceleration
|
// SyCL acceleration
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
|
|
||||||
#ifdef GRID_SYCL
|
#ifdef GRID_SYCL
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
#include <CL/sycl.hpp>
|
#include <CL/sycl.hpp>
|
||||||
@ -261,7 +262,6 @@ NAMESPACE_END(Grid);
|
|||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
extern cl::sycl::queue *theGridAccelerator;
|
extern cl::sycl::queue *theGridAccelerator;
|
||||||
extern cl::sycl::queue *theCopyAccelerator;
|
|
||||||
|
|
||||||
#ifdef __SYCL_DEVICE_ONLY__
|
#ifdef __SYCL_DEVICE_ONLY__
|
||||||
#define GRID_SIMT
|
#define GRID_SIMT
|
||||||
@ -289,7 +289,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
|||||||
cgh.parallel_for( \
|
cgh.parallel_for( \
|
||||||
cl::sycl::nd_range<3>(global,local), \
|
cl::sycl::nd_range<3>(global,local), \
|
||||||
[=] (cl::sycl::nd_item<3> item) /*mutable*/ \
|
[=] (cl::sycl::nd_item<3> item) /*mutable*/ \
|
||||||
[[intel::reqd_sub_group_size(16)]] \
|
[[intel::reqd_sub_group_size(8)]] \
|
||||||
{ \
|
{ \
|
||||||
auto iter1 = item.get_global_id(0); \
|
auto iter1 = item.get_global_id(0); \
|
||||||
auto iter2 = item.get_global_id(1); \
|
auto iter2 = item.get_global_id(1); \
|
||||||
@ -298,19 +298,19 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
|
|||||||
}); \
|
}); \
|
||||||
});
|
});
|
||||||
|
|
||||||
#define accelerator_barrier(dummy) { theGridAccelerator->wait(); }
|
#define accelerator_barrier(dummy) theGridAccelerator->wait();
|
||||||
|
|
||||||
inline void *acceleratorAllocShared(size_t bytes){ return malloc_shared(bytes,*theGridAccelerator);};
|
inline void *acceleratorAllocShared(size_t bytes){ return malloc_shared(bytes,*theGridAccelerator);};
|
||||||
inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theGridAccelerator);};
|
inline void *acceleratorAllocDevice(size_t bytes){ return malloc_device(bytes,*theGridAccelerator);};
|
||||||
inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
|
inline void acceleratorFreeShared(void *ptr){free(ptr,*theGridAccelerator);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
||||||
|
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) {
|
||||||
inline void acceleratorCopySynchronise(void) { theCopyAccelerator->wait(); }
|
theGridAccelerator->memcpy(to,from,bytes);
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes);}
|
}
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
inline void acceleratorCopySynchronise(void) { theGridAccelerator->wait(); std::cout<<"acceleratorCopySynchronise() wait "<<std::endl; }
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theCopyAccelerator->memset(base,value,bytes); theCopyAccelerator->wait();}
|
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theGridAccelerator->memcpy(to,from,bytes); theGridAccelerator->wait();}
|
||||||
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theGridAccelerator->memset(base,value,bytes); theGridAccelerator->wait();}
|
||||||
inline int acceleratorIsCommunicable(void *ptr)
|
inline int acceleratorIsCommunicable(void *ptr)
|
||||||
{
|
{
|
||||||
#if 0
|
#if 0
|
||||||
@ -441,7 +441,7 @@ inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(bas
|
|||||||
|
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
||||||
{
|
{
|
||||||
hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToDevice,copyStream);
|
hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);
|
||||||
}
|
}
|
||||||
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
|
||||||
|
|
||||||
@ -461,6 +461,8 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
|
|||||||
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
accelerator_for2dNB(iter1, num1, iter2, num2, nsimd, { __VA_ARGS__ } ); \
|
||||||
accelerator_barrier(dummy);
|
accelerator_barrier(dummy);
|
||||||
|
|
||||||
|
#define GRID_ACCELERATED
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
@ -511,16 +513,7 @@ inline void *acceleratorAllocCpu(size_t bytes){return memalign(GRID_ALLOC_ALIGN,
|
|||||||
inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
inline void acceleratorFreeCpu (void *ptr){free(ptr);};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//////////////////////////////////////////////
|
|
||||||
// Fencing needed ONLY for SYCL
|
|
||||||
//////////////////////////////////////////////
|
|
||||||
|
|
||||||
#ifdef GRID_SYCL
|
|
||||||
inline void acceleratorFenceComputeStream(void){ accelerator_barrier();};
|
|
||||||
#else
|
|
||||||
// Ordering within a stream guaranteed on Nvidia & AMD
|
|
||||||
inline void acceleratorFenceComputeStream(void){ };
|
|
||||||
#endif
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////
|
///////////////////////////////////////////////////
|
||||||
// Synchronise across local threads for divergence resynch
|
// Synchronise across local threads for divergence resynch
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#include <openssl/sha.h>
|
#include <openssl/sha.h>
|
||||||
#include <openssl/evp.h>
|
|
||||||
}
|
}
|
||||||
#ifdef USE_IPP
|
#ifdef USE_IPP
|
||||||
#include "ipp.h"
|
#include "ipp.h"
|
||||||
@ -71,8 +70,10 @@ public:
|
|||||||
static inline std::vector<unsigned char> sha256(const void *data,size_t bytes)
|
static inline std::vector<unsigned char> sha256(const void *data,size_t bytes)
|
||||||
{
|
{
|
||||||
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
|
std::vector<unsigned char> hash(SHA256_DIGEST_LENGTH);
|
||||||
auto digest = EVP_get_digestbyname("SHA256");
|
SHA256_CTX sha256;
|
||||||
EVP_Digest(data, bytes, &hash[0], NULL, digest, NULL);
|
SHA256_Init (&sha256);
|
||||||
|
SHA256_Update(&sha256, data,bytes);
|
||||||
|
SHA256_Final (&hash[0], &sha256);
|
||||||
return hash;
|
return hash;
|
||||||
}
|
}
|
||||||
static inline std::vector<int> sha256_seeds(const std::string &s)
|
static inline std::vector<int> sha256_seeds(const std::string &s)
|
||||||
|
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
232
HMC/Mobius2p1f_DD_RHMC.cc
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_hmc_EODWFRatio.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015-2016
|
||||||
|
|
||||||
|
Author: Peter Boyle <pabobyle@ph.ed.ac.uk>
|
||||||
|
Author: Guido Cossu <guido.cossu@ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution
|
||||||
|
directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
Grid_init(&argc, &argv);
|
||||||
|
int threads = GridThread::GetThreads();
|
||||||
|
|
||||||
|
// Typedefs to simplify notation
|
||||||
|
typedef WilsonImplR FermionImplPolicy;
|
||||||
|
typedef MobiusFermionR FermionAction;
|
||||||
|
typedef typename FermionAction::FermionField FermionField;
|
||||||
|
|
||||||
|
typedef Grid::XmlReader Serialiser;
|
||||||
|
|
||||||
|
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
|
||||||
|
IntegratorParameters MD;
|
||||||
|
// typedef GenericHMCRunner<LeapFrog> HMCWrapper;
|
||||||
|
// MD.name = std::string("Leap Frog");
|
||||||
|
// typedef GenericHMCRunner<ForceGradient> HMCWrapper;
|
||||||
|
// MD.name = std::string("Force Gradient");
|
||||||
|
typedef GenericHMCRunner<MinimumNorm2> HMCWrapper;
|
||||||
|
MD.name = std::string("MinimumNorm2");
|
||||||
|
MD.MDsteps = 4;
|
||||||
|
MD.trajL = 1.0;
|
||||||
|
|
||||||
|
HMCparameters HMCparams;
|
||||||
|
HMCparams.StartTrajectory = 8;
|
||||||
|
HMCparams.Trajectories = 200;
|
||||||
|
HMCparams.NoMetropolisUntil= 0;
|
||||||
|
// "[HotStart, ColdStart, TepidStart, CheckpointStart]\n";
|
||||||
|
// HMCparams.StartingType =std::string("ColdStart");
|
||||||
|
HMCparams.StartingType =std::string("CheckpointStart");
|
||||||
|
HMCparams.MD = MD;
|
||||||
|
HMCWrapper TheHMC(HMCparams);
|
||||||
|
|
||||||
|
// Grid from the command line arguments --grid and --mpi
|
||||||
|
TheHMC.Resources.AddFourDimGrid("gauge"); // use default simd lanes decomposition
|
||||||
|
|
||||||
|
CheckpointerParameters CPparams;
|
||||||
|
CPparams.config_prefix = "ckpoint_EODWF_lat";
|
||||||
|
CPparams.rng_prefix = "ckpoint_EODWF_rng";
|
||||||
|
CPparams.saveInterval = 1;
|
||||||
|
CPparams.format = "IEEE64BIG";
|
||||||
|
TheHMC.Resources.LoadNerscCheckpointer(CPparams);
|
||||||
|
|
||||||
|
RNGModuleParameters RNGpar;
|
||||||
|
RNGpar.serial_seeds = "1 2 3 4 5";
|
||||||
|
RNGpar.parallel_seeds = "6 7 8 9 10";
|
||||||
|
TheHMC.Resources.SetRNGSeeds(RNGpar);
|
||||||
|
|
||||||
|
// Construct observables
|
||||||
|
// here there is too much indirection
|
||||||
|
typedef PlaquetteMod<HMCWrapper::ImplPolicy> PlaqObs;
|
||||||
|
TheHMC.Resources.AddObservable<PlaqObs>();
|
||||||
|
//////////////////////////////////////////////
|
||||||
|
|
||||||
|
const int Ls = 16;
|
||||||
|
Real beta = 2.13;
|
||||||
|
Real light_mass = 0.01;
|
||||||
|
Real strange_mass = 0.04;
|
||||||
|
Real pv_mass = 1.0;
|
||||||
|
RealD M5 = 1.8;
|
||||||
|
RealD b = 1.0;
|
||||||
|
RealD c = 0.0;
|
||||||
|
|
||||||
|
// FIXME:
|
||||||
|
// Same in MC and MD
|
||||||
|
// Need to mix precision too
|
||||||
|
OneFlavourRationalParams OFRp;
|
||||||
|
OFRp.lo = 4.0e-3;
|
||||||
|
OFRp.hi = 30.0;
|
||||||
|
OFRp.MaxIter = 10000;
|
||||||
|
OFRp.tolerance= 1.0e-10;
|
||||||
|
OFRp.degree = 16;
|
||||||
|
OFRp.precision= 50;
|
||||||
|
|
||||||
|
std::vector<Real> hasenbusch({ 0.01, 0.04, 0.2 , pv_mass });
|
||||||
|
std::vector<bool> dirichlet ({ true, true, true });
|
||||||
|
|
||||||
|
auto GridPtr = TheHMC.Resources.GetCartesian();
|
||||||
|
auto GridRBPtr = TheHMC.Resources.GetRBCartesian();
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
// Domain decomposed
|
||||||
|
////////////////////////////////////////////////////////////////
|
||||||
|
Coordinate latt4 = GridPtr->GlobalDimensions();
|
||||||
|
Coordinate mpi = GridPtr->ProcessorGrid();
|
||||||
|
Coordinate shm;
|
||||||
|
|
||||||
|
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||||
|
|
||||||
|
Coordinate CommDim(Nd);
|
||||||
|
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||||
|
|
||||||
|
Coordinate Dirichlet(Nd+1,0);
|
||||||
|
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||||
|
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||||
|
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||||
|
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||||
|
|
||||||
|
Coordinate Block4(Nd);
|
||||||
|
Block4[0] = Dirichlet[1];
|
||||||
|
Block4[1] = Dirichlet[2];
|
||||||
|
Block4[2] = Dirichlet[3];
|
||||||
|
Block4[3] = Dirichlet[4];
|
||||||
|
TheHMC.Resources.SetMomentumFilter(new DDHMCFilter<WilsonImplR::Field>(Block4));
|
||||||
|
|
||||||
|
//////////////////////////
|
||||||
|
// Fermion Grid
|
||||||
|
//////////////////////////
|
||||||
|
auto FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,GridPtr);
|
||||||
|
auto FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,GridPtr);
|
||||||
|
|
||||||
|
IwasakiGaugeActionR GaugeAction(beta);
|
||||||
|
|
||||||
|
// temporarily need a gauge field
|
||||||
|
LatticeGaugeField U(GridPtr);
|
||||||
|
|
||||||
|
// These lines are unecessary if BC are all periodic
|
||||||
|
std::vector<Complex> boundary = {1,1,1,-1};
|
||||||
|
FermionAction::ImplParams Params(boundary);
|
||||||
|
|
||||||
|
double StoppingCondition = 1e-10;
|
||||||
|
double MaxCGIterations = 30000;
|
||||||
|
ConjugateGradient<FermionField> CG(StoppingCondition,MaxCGIterations);
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// Collect actions
|
||||||
|
////////////////////////////////////
|
||||||
|
ActionLevel<HMCWrapper::Field> Level1(1);
|
||||||
|
ActionLevel<HMCWrapper::Field> Level2(2);
|
||||||
|
ActionLevel<HMCWrapper::Field> Level3(8);
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// Strange action
|
||||||
|
////////////////////////////////////
|
||||||
|
FermionAction StrangeOp (U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,strange_mass,M5,b,c, Params);
|
||||||
|
FermionAction StrangePauliVillarsOp(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,pv_mass, M5,b,c, Params);
|
||||||
|
|
||||||
|
OneFlavourEvenOddRatioRationalPseudoFermionAction<FermionImplPolicy> StrangePseudoFermion(StrangePauliVillarsOp,StrangeOp,OFRp);
|
||||||
|
// Level1.push_back(&StrangePseudoFermion);
|
||||||
|
|
||||||
|
////////////////////////////////////
|
||||||
|
// up down action
|
||||||
|
////////////////////////////////////
|
||||||
|
std::vector<Real> light_den;
|
||||||
|
std::vector<Real> light_num;
|
||||||
|
std::vector<int> dirichlet_den;
|
||||||
|
std::vector<int> dirichlet_num;
|
||||||
|
|
||||||
|
int n_hasenbusch = hasenbusch.size();
|
||||||
|
light_den.push_back(light_mass);
|
||||||
|
dirichlet_den.push_back(0);
|
||||||
|
for(int h=0;h<n_hasenbusch;h++){
|
||||||
|
light_den.push_back(hasenbusch[h]);
|
||||||
|
light_num.push_back(hasenbusch[h]);
|
||||||
|
dirichlet_num.push_back(1);
|
||||||
|
dirichlet_den.push_back(1);
|
||||||
|
}
|
||||||
|
light_num.push_back(pv_mass);
|
||||||
|
dirichlet_num.push_back(0);
|
||||||
|
|
||||||
|
std::vector<FermionAction *> Numerators;
|
||||||
|
std::vector<FermionAction *> Denominators;
|
||||||
|
std::vector<TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy> *> Quotients;
|
||||||
|
|
||||||
|
for(int h=0;h<n_hasenbusch+1;h++){
|
||||||
|
std::cout << GridLogMessage << " 2f quotient Action "<< light_num[h]<< " (" << dirichlet_num[h]
|
||||||
|
<<") / " << light_den[h]<< " (" << dirichlet_den[h]<<")"<< std::endl;
|
||||||
|
Numerators.push_back (new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_num[h],M5,b,c, Params));
|
||||||
|
Denominators.push_back(new FermionAction(U,*FGrid,*FrbGrid,*GridPtr,*GridRBPtr,light_den[h],M5,b,c, Params));
|
||||||
|
Quotients.push_back (new TwoFlavourEvenOddRatioPseudoFermionAction<FermionImplPolicy>(*Numerators[h],*Denominators[h],CG,CG));
|
||||||
|
if ( dirichlet_den[h]==1) Denominators[h]->DirichletBlock(Dirichlet);
|
||||||
|
if ( dirichlet_num[h]==1) Numerators[h]->DirichletBlock(Dirichlet);
|
||||||
|
}
|
||||||
|
|
||||||
|
int nquo=Quotients.size();
|
||||||
|
Level1.push_back(Quotients[0]);
|
||||||
|
Level1.push_back(Quotients[nquo-1]);
|
||||||
|
for(int h=1;h<nquo-1;h++){
|
||||||
|
Level2.push_back(Quotients[h]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
// Gauge action
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
Level3.push_back(&GaugeAction);
|
||||||
|
TheHMC.TheAction.push_back(Level1);
|
||||||
|
TheHMC.TheAction.push_back(Level2);
|
||||||
|
TheHMC.TheAction.push_back(Level3);
|
||||||
|
std::cout << GridLogMessage << " Action complete "<< std::endl;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << " Running the HMC "<< std::endl;
|
||||||
|
TheHMC.Run(); // no smearing
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
} // main
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -148,7 +148,7 @@ If you want to build all the tests at once just use `make tests`.
|
|||||||
- `--enable-mkl[=<path>]`: use Intel MKL for FFT (and LAPACK if enabled) routines. A UNIX prefix containing the library can be specified (optional).
|
- `--enable-mkl[=<path>]`: use Intel MKL for FFT (and LAPACK if enabled) routines. A UNIX prefix containing the library can be specified (optional).
|
||||||
- `--enable-numa`: enable NUMA first touch optimisation
|
- `--enable-numa`: enable NUMA first touch optimisation
|
||||||
- `--enable-simd=<code>`: setup Grid for the SIMD target `<code>` (default: `GEN`). A list of possible SIMD targets is detailed in a section below.
|
- `--enable-simd=<code>`: setup Grid for the SIMD target `<code>` (default: `GEN`). A list of possible SIMD targets is detailed in a section below.
|
||||||
- `--enable-gen-simd-width=<size>`: select the size (in bytes) of the generic SIMD vector type (default: 64 bytes).
|
- `--enable-gen-simd-width=<size>`: select the size (in bytes) of the generic SIMD vector type (default: 32 bytes).
|
||||||
- `--enable-comms=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
|
- `--enable-comms=<comm>`: Use `<comm>` for message passing (default: `none`). A list of possible SIMD targets is detailed in a section below.
|
||||||
- `--enable-rng={sitmo|ranlux48|mt19937}`: choose the RNG (default: `sitmo `).
|
- `--enable-rng={sitmo|ranlux48|mt19937}`: choose the RNG (default: `sitmo `).
|
||||||
- `--disable-timers`: disable system dependent high-resolution timers.
|
- `--disable-timers`: disable system dependent high-resolution timers.
|
||||||
|
@ -217,9 +217,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu][0],
|
(void *)&xbuf[mu][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu);
|
bytes,mu);
|
||||||
|
|
||||||
comm_proc = mpi_layout[mu]-1;
|
comm_proc = mpi_layout[mu]-1;
|
||||||
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu+4][0],
|
(void *)&xbuf[mu+4][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu+4][0],
|
(void *)&rbuf[mu+4][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu+4);
|
bytes,mu+4);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu][0],
|
(void *)&xbuf[mu][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu][0],
|
(void *)&rbuf[mu][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu);
|
bytes,mu);
|
||||||
Grid.StencilSendToRecvFromComplete(requests,mu);
|
Grid.StencilSendToRecvFromComplete(requests,mu);
|
||||||
requests.resize(0);
|
requests.resize(0);
|
||||||
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
|
|||||||
dbytes+=
|
dbytes+=
|
||||||
Grid.StencilSendToRecvFromBegin(requests,
|
Grid.StencilSendToRecvFromBegin(requests,
|
||||||
(void *)&xbuf[mu+4][0],
|
(void *)&xbuf[mu+4][0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,1,
|
||||||
(void *)&rbuf[mu+4][0],
|
(void *)&rbuf[mu+4][0],
|
||||||
recv_from_rank,
|
recv_from_rank,1,
|
||||||
bytes,mu+4);
|
bytes,mu+4);
|
||||||
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
Grid.StencilSendToRecvFromComplete(requests,mu+4);
|
||||||
requests.resize(0);
|
requests.resize(0);
|
||||||
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
|
|||||||
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
}
|
}
|
||||||
int tid = omp_get_thread_num();
|
int tid = omp_get_thread_num();
|
||||||
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,
|
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
|
||||||
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid);
|
(void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
|
||||||
|
|
||||||
thread_critical { dbytes+=tbytes; }
|
thread_critical { dbytes+=tbytes; }
|
||||||
}
|
}
|
||||||
|
@ -32,18 +32,18 @@
|
|||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace Grid;
|
using namespace Grid;
|
||||||
|
|
||||||
template<class d>
|
////////////////////////
|
||||||
struct scal {
|
/// Move to domains ////
|
||||||
d internal;
|
////////////////////////
|
||||||
};
|
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
Gamma::Algebra Gmu [] = {
|
||||||
Gamma::Algebra::GammaX,
|
Gamma::Algebra::GammaX,
|
||||||
Gamma::Algebra::GammaY,
|
Gamma::Algebra::GammaY,
|
||||||
Gamma::Algebra::GammaZ,
|
Gamma::Algebra::GammaZ,
|
||||||
Gamma::Algebra::GammaT
|
Gamma::Algebra::GammaT
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void Benchmark(int Ls, Coordinate Dirichlet);
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
@ -52,24 +52,82 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
|
|
||||||
Coordinate latt4 = GridDefaultLatt();
|
|
||||||
int Ls=16;
|
int Ls=16;
|
||||||
for(int i=0;i<argc;i++)
|
for(int i=0;i<argc;i++) {
|
||||||
if(std::string(argv[i]) == "-Ls"){
|
if(std::string(argv[i]) == "-Ls"){
|
||||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////
|
||||||
|
// With comms
|
||||||
|
//////////////////
|
||||||
|
Coordinate Dirichlet(Nd+1,0);
|
||||||
|
|
||||||
|
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< " Testing with full communication " <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
|
||||||
|
Benchmark(Ls,Dirichlet);
|
||||||
|
|
||||||
|
//////////////////
|
||||||
|
// Domain decomposed
|
||||||
|
//////////////////
|
||||||
|
Coordinate latt4 = GridDefaultLatt();
|
||||||
|
Coordinate mpi = GridDefaultMpi();
|
||||||
|
Coordinate CommDim(Nd);
|
||||||
|
Coordinate shm;
|
||||||
|
GlobalSharedMemory::GetShmDims(mpi,shm);
|
||||||
|
|
||||||
|
|
||||||
|
//////////////////////
|
||||||
|
// Node level
|
||||||
|
//////////////////////
|
||||||
|
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< " Testing without internode communication " <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
|
||||||
|
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
|
||||||
|
Dirichlet[0] = 0;
|
||||||
|
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
|
||||||
|
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
|
||||||
|
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
|
||||||
|
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
|
||||||
|
|
||||||
|
Benchmark(Ls,Dirichlet);
|
||||||
|
|
||||||
|
std::cout << "\n\n\n\n\n\n" <<std::endl;
|
||||||
|
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< " Testing without intranode communication " <<std::endl;
|
||||||
|
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
|
||||||
|
|
||||||
|
for(int d=0;d<Nd;d++) CommDim[d]= mpi[d]>1 ? 1 : 0;
|
||||||
|
Dirichlet[0] = 0;
|
||||||
|
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0];
|
||||||
|
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1];
|
||||||
|
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2];
|
||||||
|
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3];
|
||||||
|
|
||||||
|
Benchmark(Ls,Dirichlet);
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
void Benchmark(int Ls, Coordinate Dirichlet)
|
||||||
|
{
|
||||||
|
Coordinate latt4 = GridDefaultLatt();
|
||||||
GridLogLayout();
|
GridLogLayout();
|
||||||
|
|
||||||
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
|
||||||
|
|
||||||
|
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
||||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
|
||||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
||||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
||||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
||||||
@ -80,9 +138,9 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
||||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
||||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
|
||||||
|
|
||||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
LatticeFermionF src (FGrid); random(RNG5,src);
|
||||||
#if 0
|
#if 0
|
||||||
@ -100,7 +158,6 @@ int main (int argc, char ** argv)
|
|||||||
src = src*N2;
|
src = src*N2;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LatticeFermionF result(FGrid); result=Zero();
|
LatticeFermionF result(FGrid); result=Zero();
|
||||||
LatticeFermionF ref(FGrid); ref=Zero();
|
LatticeFermionF ref(FGrid); ref=Zero();
|
||||||
LatticeFermionF tmp(FGrid);
|
LatticeFermionF tmp(FGrid);
|
||||||
@ -108,29 +165,31 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
||||||
LatticeGaugeFieldF Umu(UGrid);
|
LatticeGaugeFieldF Umu(UGrid);
|
||||||
|
LatticeGaugeFieldF UmuCopy(UGrid);
|
||||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
SU<Nc>::HotConfiguration(RNG4,Umu);
|
||||||
|
UmuCopy=Umu;
|
||||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
||||||
#if 0
|
|
||||||
Umu=1.0;
|
////////////////////////////////////
|
||||||
for(int mu=0;mu<Nd;mu++){
|
// Apply BCs
|
||||||
LatticeColourMatrixF ttmp(UGrid);
|
////////////////////////////////////
|
||||||
ttmp = PeekIndex<LorentzIndex>(Umu,mu);
|
Coordinate Block(4);
|
||||||
// if (mu !=2 ) ttmp = 0;
|
for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
|
||||||
// ttmp = ttmp* pow(10.0,mu);
|
|
||||||
PokeIndex<LorentzIndex>(Umu,ttmp,mu);
|
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block5 " << Dirichlet << std::endl;
|
||||||
}
|
std::cout << GridLogMessage << "Applying BCs for Dirichlet Block4 " << Block << std::endl;
|
||||||
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
|
|
||||||
#endif
|
DirichletFilter<LatticeGaugeFieldF> Filter(Block);
|
||||||
|
Filter.applyFilter(Umu);
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// Naive wilson implementation
|
// Naive wilson implementation
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// replicate across fifth dimension
|
|
||||||
// LatticeGaugeFieldF Umu5d(FGrid);
|
|
||||||
std::vector<LatticeColourMatrixF> U(4,UGrid);
|
std::vector<LatticeColourMatrixF> U(4,UGrid);
|
||||||
for(int mu=0;mu<Nd;mu++){
|
for(int mu=0;mu<Nd;mu++){
|
||||||
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
@ -191,11 +250,13 @@ int main (int argc, char ** argv)
|
|||||||
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
|
||||||
|
|
||||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||||
|
Dw.DirichletBlock(Dirichlet);
|
||||||
|
Dw.ImportGauge(Umu);
|
||||||
|
|
||||||
int ncall =300;
|
int ncall =300;
|
||||||
|
|
||||||
if (1) {
|
if (1) {
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.ZeroCounters();
|
|
||||||
Dw.Dhop(src,result,0);
|
Dw.Dhop(src,result,0);
|
||||||
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
@ -220,29 +281,20 @@ int main (int argc, char ** argv)
|
|||||||
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
|
||||||
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
|
|
||||||
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
|
||||||
std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
// std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
|
||||||
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
// std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
//exit(0);
|
|
||||||
|
|
||||||
if(( norm2(err)>1.0e-4) ) {
|
if(( norm2(err)>1.0e-4) ) {
|
||||||
/*
|
|
||||||
std::cout << "RESULT\n " << result<<std::endl;
|
|
||||||
std::cout << "REF \n " << ref <<std::endl;
|
|
||||||
std::cout << "ERR \n " << err <<std::endl;
|
|
||||||
*/
|
|
||||||
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
assert (norm2(err)< 1.0e-4 );
|
assert (norm2(err)< 1.0e-4 );
|
||||||
Dw.Report();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (1)
|
if (1)
|
||||||
@ -286,21 +338,20 @@ int main (int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
ref = -0.5*ref;
|
ref = -0.5*ref;
|
||||||
}
|
}
|
||||||
// dump=1;
|
|
||||||
Dw.Dhop(src,result,1);
|
Dw.Dhop(src,result,DaggerYes);
|
||||||
|
|
||||||
|
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
|
||||||
err = ref-result;
|
err = ref-result;
|
||||||
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
|
||||||
if((norm2(err)>1.0e-4)){
|
assert((norm2(err)<1.0e-4));
|
||||||
/*
|
|
||||||
std::cout<< "DAG RESULT\n " <<ref << std::endl;
|
|
||||||
std::cout<< "DAG sRESULT\n " <<result << std::endl;
|
|
||||||
std::cout<< "DAG ERR \n " << err <<std::endl;
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
LatticeFermionF src_e (FrbGrid);
|
LatticeFermionF src_e (FrbGrid);
|
||||||
LatticeFermionF src_o (FrbGrid);
|
LatticeFermionF src_o (FrbGrid);
|
||||||
LatticeFermionF r_e (FrbGrid);
|
LatticeFermionF r_e (FrbGrid);
|
||||||
@ -330,7 +381,6 @@ int main (int argc, char ** argv)
|
|||||||
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
||||||
{
|
{
|
||||||
Dw.ZeroCounters();
|
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
double t0=usecond();
|
double t0=usecond();
|
||||||
@ -352,7 +402,6 @@ int main (int argc, char ** argv)
|
|||||||
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
|
||||||
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
|
||||||
Dw.Report();
|
|
||||||
}
|
}
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
Dw.DhopOE(src_e,r_o,DaggerNo);
|
Dw.DhopOE(src_e,r_o,DaggerNo);
|
||||||
@ -367,13 +416,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
err = r_eo-result;
|
err = r_eo-result;
|
||||||
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
|
||||||
if((norm2(err)>1.0e-4)){
|
assert(norm2(err)<1.0e-4);
|
||||||
/*
|
|
||||||
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
|
|
||||||
std::cout<< "Deo REF\n " <<result << std::endl;
|
|
||||||
std::cout<< "Deo ERR \n " << err <<std::endl;
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
pickCheckerboard(Even,src_e,err);
|
pickCheckerboard(Even,src_e,err);
|
||||||
pickCheckerboard(Odd,src_o,err);
|
pickCheckerboard(Odd,src_o,err);
|
||||||
@ -382,6 +425,4 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
assert(norm2(src_e)<1.0e-4);
|
assert(norm2(src_e)<1.0e-4);
|
||||||
assert(norm2(src_o)<1.0e-4);
|
assert(norm2(src_o)<1.0e-4);
|
||||||
Grid_finalize();
|
|
||||||
exit(0);
|
|
||||||
}
|
}
|
||||||
|
@ -1,131 +0,0 @@
|
|||||||
/*************************************************************************************
|
|
||||||
Grid physics library, www.github.com/paboyle/Grid
|
|
||||||
Source file: ./benchmarks/Benchmark_dwf.cc
|
|
||||||
Copyright (C) 2015
|
|
||||||
|
|
||||||
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
||||||
Author: paboyle <paboyle@ph.ed.ac.uk>
|
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
You should have received a copy of the GNU General Public License along
|
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
See the full license in the file "LICENSE" in the top level distribution directory
|
|
||||||
*************************************************************************************/
|
|
||||||
/* END LEGAL */
|
|
||||||
#include <Grid/Grid.h>
|
|
||||||
#ifdef GRID_CUDA
|
|
||||||
#define CUDA_PROFILE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CUDA_PROFILE
|
|
||||||
#include <cuda_profiler_api.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
using namespace std;
|
|
||||||
using namespace Grid;
|
|
||||||
|
|
||||||
template<class d>
|
|
||||||
struct scal {
|
|
||||||
d internal;
|
|
||||||
};
|
|
||||||
|
|
||||||
Gamma::Algebra Gmu [] = {
|
|
||||||
Gamma::Algebra::GammaX,
|
|
||||||
Gamma::Algebra::GammaY,
|
|
||||||
Gamma::Algebra::GammaZ,
|
|
||||||
Gamma::Algebra::GammaT
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
|
||||||
{
|
|
||||||
Grid_init(&argc,&argv);
|
|
||||||
|
|
||||||
Coordinate latt4= GridDefaultLatt();
|
|
||||||
Coordinate mpi = GridDefaultMpi();
|
|
||||||
Coordinate simd = GridDefaultSimd(Nd,vComplexF::Nsimd());
|
|
||||||
|
|
||||||
GridLogLayout();
|
|
||||||
|
|
||||||
int Ls=16;
|
|
||||||
for(int i=0;i<argc;i++)
|
|
||||||
if(std::string(argv[i]) == "-Ls"){
|
|
||||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(latt4,simd ,mpi);
|
|
||||||
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
|
|
||||||
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
|
|
||||||
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
|
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
|
|
||||||
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
|
|
||||||
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
|
|
||||||
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
|
|
||||||
GridRedBlackCartesian * sFrbGrid = SpaceTimeGrid::makeFiveDimDWFRedBlackGrid(Ls,UGrid);
|
|
||||||
|
|
||||||
std::vector<int> seeds4({1,2,3,4});
|
|
||||||
std::vector<int> seeds5({5,6,7,8});
|
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
|
|
||||||
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
|
|
||||||
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
|
|
||||||
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
|
|
||||||
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
|
|
||||||
|
|
||||||
LatticeFermionF src (FGrid); random(RNG5,src);
|
|
||||||
RealD N2 = 1.0/::sqrt(norm2(src));
|
|
||||||
src = src*N2;
|
|
||||||
|
|
||||||
std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
|
|
||||||
LatticeGaugeFieldF Umu(UGrid);
|
|
||||||
SU<Nc>::HotConfiguration(RNG4,Umu);
|
|
||||||
std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
|
|
||||||
|
|
||||||
RealD mass=0.1;
|
|
||||||
RealD M5 =1.8;
|
|
||||||
|
|
||||||
RealD NP = UGrid->_Nprocessors;
|
|
||||||
RealD NN = UGrid->NodeCount();
|
|
||||||
|
|
||||||
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
|
||||||
|
|
||||||
const int ncall = 500;
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "* Benchmarking DomainWallFermionF::HaloGatherOpt "<<std::endl;
|
|
||||||
std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
|
|
||||||
{
|
|
||||||
typename DomainWallFermionF::Compressor compressor(0);
|
|
||||||
FGrid->Barrier();
|
|
||||||
Dw.Stencil.HaloExchangeOptGather(src,compressor);
|
|
||||||
double t0=usecond();
|
|
||||||
for(int i=0;i<ncall;i++){
|
|
||||||
Dw.Stencil.HaloExchangeOptGather(src,compressor);
|
|
||||||
}
|
|
||||||
double t1=usecond();
|
|
||||||
FGrid->Barrier();
|
|
||||||
|
|
||||||
double bytes=0.0;
|
|
||||||
if(mpi[0]) bytes+=latt4[1]*latt4[2]*latt4[3];
|
|
||||||
if(mpi[1]) bytes+=latt4[0]*latt4[2]*latt4[3];
|
|
||||||
if(mpi[2]) bytes+=latt4[0]*latt4[1]*latt4[3];
|
|
||||||
if(mpi[3]) bytes+=latt4[0]*latt4[1]*latt4[2];
|
|
||||||
bytes = bytes * Ls * 8.* (24.+12.)* 2.0;
|
|
||||||
|
|
||||||
std::cout<<GridLogMessage << "Gather us /call = "<< (t1-t0)/ncall<<std::endl;
|
|
||||||
std::cout<<GridLogMessage << "Gather MBs /call = "<< bytes*ncall/(t1-t0)<<std::endl;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
Grid_finalize();
|
|
||||||
exit(0);
|
|
||||||
}
|
|
@ -81,8 +81,8 @@ int main (int argc, char ** argv)
|
|||||||
Vector<Coeff_t> diag = Dw.bs;
|
Vector<Coeff_t> diag = Dw.bs;
|
||||||
Vector<Coeff_t> upper= Dw.cs;
|
Vector<Coeff_t> upper= Dw.cs;
|
||||||
Vector<Coeff_t> lower= Dw.cs;
|
Vector<Coeff_t> lower= Dw.cs;
|
||||||
upper[Ls-1]=-Dw.mass_minus*upper[Ls-1];
|
upper[Ls-1]=-Dw.mass*upper[Ls-1];
|
||||||
lower[0] =-Dw.mass_plus*lower[0];
|
lower[0] =-Dw.mass*lower[0];
|
||||||
|
|
||||||
LatticeFermion r_eo(FGrid);
|
LatticeFermion r_eo(FGrid);
|
||||||
LatticeFermion src_e (FrbGrid);
|
LatticeFermion src_e (FrbGrid);
|
||||||
|
@ -44,13 +44,6 @@ void bench_wilson (
|
|||||||
double const volume,
|
double const volume,
|
||||||
int const dag );
|
int const dag );
|
||||||
|
|
||||||
void bench_wilson_eo (
|
|
||||||
LatticeFermion & src,
|
|
||||||
LatticeFermion & result,
|
|
||||||
WilsonFermionR & Dw,
|
|
||||||
double const volume,
|
|
||||||
int const dag );
|
|
||||||
|
|
||||||
int main (int argc, char ** argv)
|
int main (int argc, char ** argv)
|
||||||
{
|
{
|
||||||
Grid_init(&argc,&argv);
|
Grid_init(&argc,&argv);
|
||||||
@ -117,8 +110,8 @@ int main (int argc, char ** argv)
|
|||||||
bench_wilson(src,result,Dw,volume,DaggerYes);
|
bench_wilson(src,result,Dw,volume,DaggerYes);
|
||||||
std::cout << "\t";
|
std::cout << "\t";
|
||||||
// EO
|
// EO
|
||||||
bench_wilson_eo(src_o,result_e,Dw,volume,DaggerNo);
|
bench_wilson(src,result,Dw,volume,DaggerNo);
|
||||||
bench_wilson_eo(src_o,result_e,Dw,volume,DaggerYes);
|
bench_wilson(src,result,Dw,volume,DaggerYes);
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ case ${ac_ZMOBIUS} in
|
|||||||
esac
|
esac
|
||||||
############### Nc
|
############### Nc
|
||||||
AC_ARG_ENABLE([Nc],
|
AC_ARG_ENABLE([Nc],
|
||||||
[AC_HELP_STRING([--enable-Nc=2|3|4|5], [enable number of colours])],
|
[AC_HELP_STRING([--enable-Nc=2|3|4], [enable number of colours])],
|
||||||
[ac_Nc=${enable_Nc}], [ac_Nc=3])
|
[ac_Nc=${enable_Nc}], [ac_Nc=3])
|
||||||
|
|
||||||
case ${ac_Nc} in
|
case ${ac_Nc} in
|
||||||
|
@ -3,28 +3,28 @@
|
|||||||
#SBATCH -A LGT104
|
#SBATCH -A LGT104
|
||||||
#SBATCH -t 01:00:00
|
#SBATCH -t 01:00:00
|
||||||
##SBATCH -U openmpThu
|
##SBATCH -U openmpThu
|
||||||
##SBATCH -p ecp
|
|
||||||
#SBATCH -J DWF
|
#SBATCH -J DWF
|
||||||
#SBATCH -o DWF.%J
|
#SBATCH -o DWF.%J
|
||||||
#SBATCH -e DWF.%J
|
#SBATCH -e DWF.%J
|
||||||
#SBATCH -N 1
|
#SBATCH -N 1
|
||||||
#SBATCH -n 1
|
#SBATCH -n 8
|
||||||
#SBATCH --exclusive
|
#SBATCH --exclusive
|
||||||
|
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||||
|
|
||||||
DIR=.
|
DIR=.
|
||||||
module list
|
module list
|
||||||
#export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||||
export OMP_NUM_THREADS=1
|
export OMP_NUM_THREADS=1
|
||||||
|
|
||||||
AT=8
|
|
||||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||||
|
|
||||||
PARAMS=" --accelerator-threads ${AT} --grid 24.24.24.24 --shm-mpi 0 --mpi 1.1.1.1"
|
PARAMS=" --accelerator-threads 16 --grid 32.32.32.256 --mpi 1.1.1.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
echo $PARAMS
|
||||||
srun --gpus-per-task 1 -n1 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
srun --gpus-per-task 1 -n8 ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,22 +6,43 @@
|
|||||||
#SBATCH -J DWF
|
#SBATCH -J DWF
|
||||||
#SBATCH -o DWF.%J
|
#SBATCH -o DWF.%J
|
||||||
#SBATCH -e DWF.%J
|
#SBATCH -e DWF.%J
|
||||||
#SBATCH -N 1
|
#SBATCH -N 8
|
||||||
#SBATCH -n 8
|
#SBATCH -n 64
|
||||||
#SBATCH --exclusive
|
#SBATCH --exclusive
|
||||||
|
#SBATCH --gpu-bind=map_gpu:0,1,2,3,7,6,5,4
|
||||||
|
|
||||||
DIR=.
|
DIR=.
|
||||||
module list
|
module list
|
||||||
|
export MPICH_OFI_NIC_POLICY=GPU
|
||||||
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
export MPIR_CVAR_GPU_EAGER_DEVICE_MEM=0
|
||||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
#export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
#export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
|
||||||
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
#export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||||
|
export MPICH_SMP_SINGLE_COPY_MODE=NONE
|
||||||
export OMP_NUM_THREADS=1
|
export OMP_NUM_THREADS=1
|
||||||
|
|
||||||
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
echo MPICH_SMP_SINGLE_COPY_MODE $MPICH_SMP_SINGLE_COPY_MODE
|
||||||
PARAMS=" --accelerator-threads 8 --grid 32.64.64.64 --mpi 1.2.2.2 --comms-overlap --shm 2048 --shm-mpi 0"
|
|
||||||
|
|
||||||
srun --gpus-per-task 1 -n8 ./mpiwrapper.sh ./benchmarks/Benchmark_dwf_fp32 $PARAMS
|
PARAMS=" --accelerator-threads 16 --grid 64.64.64.256 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
echo $PARAMS
|
||||||
|
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.256.8node
|
||||||
|
|
||||||
|
|
||||||
|
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||||
|
echo $PARAMS
|
||||||
|
srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node
|
||||||
|
|
||||||
|
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 4.4.4.1 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
echo $PARAMS
|
||||||
|
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_dwf_fp32 $PARAMS > dwf.64.64.64.32.8node.shm0
|
||||||
|
|
||||||
|
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 1"
|
||||||
|
echo $PARAMS
|
||||||
|
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node
|
||||||
|
|
||||||
|
PARAMS=" --accelerator-threads 16 --grid 64.64.64.32 --mpi 2.2.2.8 --comms-overlap --shm 2048 --shm-mpi 0"
|
||||||
|
echo $PARAMS
|
||||||
|
#srun --gpus-per-task 1 -N8 -n64 ./benchmarks/Benchmark_ITT $PARAMS > itt.8node_shm0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user