mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-14 22:07:05 +01:00
Compare commits
15 Commits
feature/fe
...
feature/fe
Author | SHA1 | Date | |
---|---|---|---|
8b91b61b61 | |||
4ca1bf7cca | |||
2ff868f7a5 | |||
ede02b6883 | |||
1822ced302 | |||
37ba32776f | |||
99b3697b03 | |||
43a45ec97b | |||
b00a4142e5 | |||
3791bc527b | |||
d8c29f5fcf | |||
281f8101fe | |||
07acfe89f2 | |||
40234f531f | |||
d49694f38f |
@ -40,7 +40,7 @@ void MemoryManager::PrintBytes(void)
|
|||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
||||||
int MemoryManager::Victim[MemoryManager::NallocType];
|
int MemoryManager::Victim[MemoryManager::NallocType];
|
||||||
int MemoryManager::Ncache[MemoryManager::NallocType] = { 2, 8, 2, 8, 2, 8 };
|
int MemoryManager::Ncache[MemoryManager::NallocType] = { 2, 8, 8, 16, 8, 16 };
|
||||||
uint64_t MemoryManager::CacheBytes[MemoryManager::NallocType];
|
uint64_t MemoryManager::CacheBytes[MemoryManager::NallocType];
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
// Actual allocation and deallocation utils
|
// Actual allocation and deallocation utils
|
||||||
|
@ -36,6 +36,11 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
#define GRID_ALLOC_SMALL_LIMIT (4096)
|
||||||
|
|
||||||
|
#define STRINGIFY(x) #x
|
||||||
|
#define TOSTRING(x) STRINGIFY(x)
|
||||||
|
#define FILE_LINE __FILE__ ":" TOSTRING(__LINE__)
|
||||||
|
#define AUDIT(a) MemoryManager::Audit(FILE_LINE)
|
||||||
|
|
||||||
/*Pinning pages is costly*/
|
/*Pinning pages is costly*/
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
// Advise the LatticeAccelerator class
|
// Advise the LatticeAccelerator class
|
||||||
@ -92,8 +97,9 @@ private:
|
|||||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim,uint64_t &cbytes) ;
|
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim,uint64_t &cbytes) ;
|
||||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t &cbytes) ;
|
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache,uint64_t &cbytes) ;
|
||||||
|
|
||||||
static void PrintBytes(void);
|
|
||||||
public:
|
public:
|
||||||
|
static void PrintBytes(void);
|
||||||
|
static void Audit(std::string s);
|
||||||
static void Init(void);
|
static void Init(void);
|
||||||
static void InitMessage(void);
|
static void InitMessage(void);
|
||||||
static void *AcceleratorAllocate(size_t bytes);
|
static void *AcceleratorAllocate(size_t bytes);
|
||||||
@ -113,6 +119,8 @@ private:
|
|||||||
static uint64_t DeviceToHostBytes;
|
static uint64_t DeviceToHostBytes;
|
||||||
static uint64_t HostToDeviceXfer;
|
static uint64_t HostToDeviceXfer;
|
||||||
static uint64_t DeviceToHostXfer;
|
static uint64_t DeviceToHostXfer;
|
||||||
|
static uint64_t DeviceEvictions;
|
||||||
|
static uint64_t DeviceDestroy;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
#ifndef GRID_UVM
|
#ifndef GRID_UVM
|
||||||
@ -170,6 +178,7 @@ private:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
static void Print(void);
|
static void Print(void);
|
||||||
|
static void PrintAll(void);
|
||||||
static void PrintState( void* CpuPtr);
|
static void PrintState( void* CpuPtr);
|
||||||
static int isOpen (void* CpuPtr);
|
static int isOpen (void* CpuPtr);
|
||||||
static void ViewClose(void* CpuPtr,ViewMode mode);
|
static void ViewClose(void* CpuPtr,ViewMode mode);
|
||||||
|
@ -3,8 +3,13 @@
|
|||||||
|
|
||||||
#warning "Using explicit device memory copies"
|
#warning "Using explicit device memory copies"
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
//#define dprintf(...) printf ( __VA_ARGS__ ); fflush(stdout);
|
|
||||||
#define dprintf(...)
|
#define MAXLINE 512
|
||||||
|
static char print_buffer [ MAXLINE ];
|
||||||
|
|
||||||
|
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
|
||||||
|
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
|
||||||
|
//#define dprintf(...)
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
@ -23,6 +28,8 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
|
uint64_t MemoryManager::DeviceEvictions;
|
||||||
|
uint64_t MemoryManager::DeviceDestroy;
|
||||||
|
|
||||||
////////////////////////////////////
|
////////////////////////////////////
|
||||||
// Priority ordering for unlocked entries
|
// Priority ordering for unlocked entries
|
||||||
@ -104,15 +111,17 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
dprintf("MemoryManager: Discard(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
mprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
if(AccCache.AccPtr) {
|
if(AccCache.AccPtr) {
|
||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
|
DeviceDestroy++;
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
dprintf("MemoryManager: Free(%llx) LRU %lld Total %lld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
AccCache.AccPtr=(uint64_t) NULL;
|
||||||
|
dprintf("MemoryManager: Free(%lx) LRU %ld Total %ld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
EntryErase(CpuPtr);
|
||||||
@ -121,26 +130,36 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
||||||
{
|
{
|
||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
// Make CPU consistent, remove from Accelerator, remove entry
|
// Make CPU consistent, remove from Accelerator, remove from LRU, LEAVE CPU only entry
|
||||||
// Cannot be locked. If allocated must be in LRU pool.
|
// Cannot be acclocked. If allocated must be in LRU pool.
|
||||||
|
//
|
||||||
|
// Nov 2022... Felix issue: Allocating two CpuPtrs, can have an entry in LRU-q with CPUlock.
|
||||||
|
// and require to evict the AccPtr copy. Eviction was a mistake in CpuViewOpen
|
||||||
|
// but there is a weakness where CpuLock entries are attempted for erase
|
||||||
|
// Take these OUT LRU queue when CPU locked?
|
||||||
|
// Cannot take out the table as cpuLock data is important.
|
||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
dprintf("MemoryManager: Evict(%llx) %llx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
mprintf("MemoryManager: Evict cpu %lx acc %lx cpuLock %ld accLock %ld\n",
|
||||||
assert(AccCache.accLock==0);
|
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
|
||||||
assert(AccCache.cpuLock==0);
|
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
|
||||||
|
assert(AccCache.accLock==0); // Cannot evict so logic bomb
|
||||||
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
if(AccCache.state==AccDirty) {
|
if(AccCache.state==AccDirty) {
|
||||||
Flush(AccCache);
|
Flush(AccCache);
|
||||||
}
|
}
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
|
||||||
if(AccCache.AccPtr) {
|
if(AccCache.AccPtr) {
|
||||||
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
AcceleratorFree((void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
DeviceBytes -=AccCache.bytes;
|
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
dprintf("MemoryManager: Free(%llx) footprint now %lld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
AccCache.AccPtr=(uint64_t)NULL;
|
||||||
|
AccCache.state=CpuDirty; // CPU primary now
|
||||||
|
DeviceBytes -=AccCache.bytes;
|
||||||
|
dprintf("MemoryManager: Free(%lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
// uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
DeviceEvictions++;
|
||||||
|
// EntryErase(CpuPtr);
|
||||||
}
|
}
|
||||||
void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
||||||
{
|
{
|
||||||
@ -150,7 +169,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
|||||||
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
||||||
dprintf("MemoryManager: Flush %llx -> %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
mprintf("MemoryManager: Flush %lx -> %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
DeviceToHostBytes+=AccCache.bytes;
|
DeviceToHostBytes+=AccCache.bytes;
|
||||||
DeviceToHostXfer++;
|
DeviceToHostXfer++;
|
||||||
AccCache.state=Consistent;
|
AccCache.state=Consistent;
|
||||||
@ -165,7 +184,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
||||||
DeviceBytes+=AccCache.bytes;
|
DeviceBytes+=AccCache.bytes;
|
||||||
}
|
}
|
||||||
dprintf("MemoryManager: Clone %llx <- %llx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
mprintf("MemoryManager: Clone %lx <- %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
HostToDeviceBytes+=AccCache.bytes;
|
HostToDeviceBytes+=AccCache.bytes;
|
||||||
HostToDeviceXfer++;
|
HostToDeviceXfer++;
|
||||||
@ -191,6 +210,7 @@ void MemoryManager::CpuDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
||||||
{
|
{
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
|
dprintf("AcceleratorViewClose %lx\n",(uint64_t)Ptr);
|
||||||
AcceleratorViewClose((uint64_t)Ptr);
|
AcceleratorViewClose((uint64_t)Ptr);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
CpuViewClose((uint64_t)Ptr);
|
CpuViewClose((uint64_t)Ptr);
|
||||||
@ -202,6 +222,7 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
|
|||||||
{
|
{
|
||||||
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
|
dprintf("AcceleratorViewOpen %lx\n",(uint64_t)CpuPtr);
|
||||||
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
@ -212,13 +233,16 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
|
|||||||
}
|
}
|
||||||
void MemoryManager::EvictVictims(uint64_t bytes)
|
void MemoryManager::EvictVictims(uint64_t bytes)
|
||||||
{
|
{
|
||||||
|
assert(bytes<DeviceMaxBytes);
|
||||||
while(bytes+DeviceLRUBytes > DeviceMaxBytes){
|
while(bytes+DeviceLRUBytes > DeviceMaxBytes){
|
||||||
if ( DeviceLRUBytes > 0){
|
if ( DeviceLRUBytes > 0){
|
||||||
assert(LRU.size()>0);
|
assert(LRU.size()>0);
|
||||||
uint64_t victim = LRU.back();
|
uint64_t victim = LRU.back(); // From the LRU
|
||||||
auto AccCacheIterator = EntryLookup(victim);
|
auto AccCacheIterator = EntryLookup(victim);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
Evict(AccCache);
|
Evict(AccCache);
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -241,11 +265,12 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
assert(AccCache.cpuLock==0); // Programming error
|
assert(AccCache.cpuLock==0); // Programming error
|
||||||
|
|
||||||
if(AccCache.state!=Empty) {
|
if(AccCache.state!=Empty) {
|
||||||
dprintf("ViewOpen found entry %llx %llx : %lld %lld\n",
|
dprintf("ViewOpen found entry %lx %lx : %ld %ld accLock %ld\n",
|
||||||
(uint64_t)AccCache.CpuPtr,
|
(uint64_t)AccCache.CpuPtr,
|
||||||
(uint64_t)CpuPtr,
|
(uint64_t)CpuPtr,
|
||||||
(uint64_t)AccCache.bytes,
|
(uint64_t)AccCache.bytes,
|
||||||
(uint64_t)bytes);
|
(uint64_t)bytes,
|
||||||
|
(uint64_t)AccCache.accLock);
|
||||||
assert(AccCache.CpuPtr == CpuPtr);
|
assert(AccCache.CpuPtr == CpuPtr);
|
||||||
assert(AccCache.bytes ==bytes);
|
assert(AccCache.bytes ==bytes);
|
||||||
}
|
}
|
||||||
@ -280,6 +305,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock= 1;
|
AccCache.accLock= 1;
|
||||||
|
dprintf("Copied Empty entry into device accLock= %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==CpuDirty ){
|
} else if(AccCache.state==CpuDirty ){
|
||||||
if(mode==AcceleratorWriteDiscard) {
|
if(mode==AcceleratorWriteDiscard) {
|
||||||
CpuDiscard(AccCache);
|
CpuDiscard(AccCache);
|
||||||
@ -292,28 +318,30 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("Copied CpuDirty entry into device accLock %d\n",AccCache.accLock);
|
dprintf("CpuDirty entry into device ++accLock= %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==Consistent) {
|
} else if(AccCache.state==Consistent) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("Consistent entry into device accLock %d\n",AccCache.accLock);
|
dprintf("Consistent entry into device ++accLock= %d\n",AccCache.accLock);
|
||||||
} else if(AccCache.state==AccDirty) {
|
} else if(AccCache.state==AccDirty) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("AccDirty entry into device accLock %d\n",AccCache.accLock);
|
dprintf("AccDirty entry ++accLock= %d\n",AccCache.accLock);
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If view is opened on device remove from LRU
|
assert(AccCache.accLock>0);
|
||||||
|
// If view is opened on device must remove from LRU
|
||||||
if(AccCache.LRU_valid==1){
|
if(AccCache.LRU_valid==1){
|
||||||
// must possibly remove from LRU as now locked on GPU
|
// must possibly remove from LRU as now locked on GPU
|
||||||
|
dprintf("AccCache entry removed from LRU \n");
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,10 +362,12 @@ void MemoryManager::AcceleratorViewClose(uint64_t CpuPtr)
|
|||||||
assert(AccCache.accLock>0);
|
assert(AccCache.accLock>0);
|
||||||
|
|
||||||
AccCache.accLock--;
|
AccCache.accLock--;
|
||||||
|
|
||||||
// Move to LRU queue if not locked and close on device
|
// Move to LRU queue if not locked and close on device
|
||||||
if(AccCache.accLock==0) {
|
if(AccCache.accLock==0) {
|
||||||
|
dprintf("AccleratorViewClose %lx AccLock decremented to %ld move to LRU queue\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
||||||
LRUinsert(AccCache);
|
LRUinsert(AccCache);
|
||||||
|
} else {
|
||||||
|
dprintf("AccleratorViewClose %lx AccLock decremented to %ld\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
||||||
@ -374,9 +404,10 @@ uint64_t MemoryManager::CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,V
|
|||||||
auto AccCacheIterator = EntryLookup(CpuPtr);
|
auto AccCacheIterator = EntryLookup(CpuPtr);
|
||||||
auto & AccCache = AccCacheIterator->second;
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
|
||||||
if (!AccCache.AccPtr) {
|
// CPU doesn't need to free space
|
||||||
EvictVictims(bytes);
|
// if (!AccCache.AccPtr) {
|
||||||
}
|
// EvictVictims(bytes);
|
||||||
|
// }
|
||||||
|
|
||||||
assert((mode==CpuRead)||(mode==CpuWrite));
|
assert((mode==CpuRead)||(mode==CpuWrite));
|
||||||
assert(AccCache.accLock==0); // Programming error
|
assert(AccCache.accLock==0); // Programming error
|
||||||
@ -430,20 +461,28 @@ void MemoryManager::NotifyDeletion(void *_ptr)
|
|||||||
void MemoryManager::Print(void)
|
void MemoryManager::Print(void)
|
||||||
{
|
{
|
||||||
PrintBytes();
|
PrintBytes();
|
||||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogDebug << "Memory Manager " << std::endl;
|
std::cout << GridLogMessage << "Memory Manager " << std::endl;
|
||||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
std::cout << GridLogDebug << DeviceBytes << " bytes allocated on device " << std::endl;
|
std::cout << GridLogMessage << DeviceBytes << " bytes allocated on device " << std::endl;
|
||||||
std::cout << GridLogDebug << DeviceLRUBytes<< " bytes evictable on device " << std::endl;
|
std::cout << GridLogMessage << DeviceLRUBytes<< " bytes evictable on device " << std::endl;
|
||||||
std::cout << GridLogDebug << DeviceMaxBytes<< " bytes max on device " << std::endl;
|
std::cout << GridLogMessage << DeviceMaxBytes<< " bytes max on device " << std::endl;
|
||||||
std::cout << GridLogDebug << HostToDeviceXfer << " transfers to device " << std::endl;
|
std::cout << GridLogMessage << HostToDeviceXfer << " transfers to device " << std::endl;
|
||||||
std::cout << GridLogDebug << DeviceToHostXfer << " transfers from device " << std::endl;
|
std::cout << GridLogMessage << DeviceToHostXfer << " transfers from device " << std::endl;
|
||||||
std::cout << GridLogDebug << HostToDeviceBytes<< " bytes transfered to device " << std::endl;
|
std::cout << GridLogMessage << HostToDeviceBytes<< " bytes transfered to device " << std::endl;
|
||||||
std::cout << GridLogDebug << DeviceToHostBytes<< " bytes transfered from device " << std::endl;
|
std::cout << GridLogMessage << DeviceToHostBytes<< " bytes transfered from device " << std::endl;
|
||||||
std::cout << GridLogDebug << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
|
std::cout << GridLogMessage << DeviceEvictions << " Evictions from device " << std::endl;
|
||||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
std::cout << GridLogMessage << DeviceDestroy << " Destroyed vectors on device " << std::endl;
|
||||||
std::cout << GridLogDebug << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
std::cout << GridLogMessage << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
|
||||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
|
}
|
||||||
|
void MemoryManager::PrintAll(void)
|
||||||
|
{
|
||||||
|
Print();
|
||||||
|
std::cout << GridLogMessage << std::endl;
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
|
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
||||||
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
||||||
auto &AccCache = it->second;
|
auto &AccCache = it->second;
|
||||||
|
|
||||||
@ -453,13 +492,13 @@ void MemoryManager::Print(void)
|
|||||||
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
||||||
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
||||||
|
|
||||||
std::cout << GridLogDebug << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||||
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||||
<< "\t" << AccCache.cpuLock
|
<< "\t" << AccCache.cpuLock
|
||||||
<< "\t" << AccCache.accLock
|
<< "\t" << AccCache.accLock
|
||||||
<< "\t" << AccCache.LRU_valid<<std::endl;
|
<< "\t" << AccCache.LRU_valid<<std::endl;
|
||||||
}
|
}
|
||||||
std::cout << GridLogDebug << "--------------------------------------------" << std::endl;
|
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
|
||||||
|
|
||||||
};
|
};
|
||||||
int MemoryManager::isOpen (void* _CpuPtr)
|
int MemoryManager::isOpen (void* _CpuPtr)
|
||||||
@ -473,6 +512,61 @@ int MemoryManager::isOpen (void* _CpuPtr)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
void MemoryManager::Audit(std::string s)
|
||||||
|
{
|
||||||
|
uint64_t CpuBytes=0;
|
||||||
|
uint64_t AccBytes=0;
|
||||||
|
uint64_t LruBytes1=0;
|
||||||
|
uint64_t LruBytes2=0;
|
||||||
|
uint64_t LruCnt=0;
|
||||||
|
uint64_t LockedBytes=0;
|
||||||
|
|
||||||
|
std::cout << " Memory Manager::Audit() from "<<s<<std::endl;
|
||||||
|
for(auto it=LRU.begin();it!=LRU.end();it++){
|
||||||
|
uint64_t cpuPtr = *it;
|
||||||
|
assert(EntryPresent(cpuPtr));
|
||||||
|
auto AccCacheIterator = EntryLookup(cpuPtr);
|
||||||
|
auto & AccCache = AccCacheIterator->second;
|
||||||
|
LruBytes2+=AccCache.bytes;
|
||||||
|
assert(AccCache.LRU_valid==1);
|
||||||
|
assert(AccCache.LRU_entry==it);
|
||||||
|
}
|
||||||
|
std::cout << " Memory Manager::Audit() LRU queue matches table entries "<<std::endl;
|
||||||
|
for(auto it=AccViewTable.begin();it!=AccViewTable.end();it++){
|
||||||
|
auto &AccCache = it->second;
|
||||||
|
|
||||||
|
std::string str;
|
||||||
|
if ( AccCache.state==Empty ) str = std::string("Empty");
|
||||||
|
if ( AccCache.state==CpuDirty ) str = std::string("CpuDirty");
|
||||||
|
if ( AccCache.state==AccDirty ) str = std::string("AccDirty");
|
||||||
|
if ( AccCache.state==Consistent)str = std::string("Consistent");
|
||||||
|
|
||||||
|
CpuBytes+=AccCache.bytes;
|
||||||
|
if( AccCache.AccPtr ) AccBytes+=AccCache.bytes;
|
||||||
|
if( AccCache.LRU_valid ) LruBytes1+=AccCache.bytes;
|
||||||
|
if( AccCache.LRU_valid ) LruCnt++;
|
||||||
|
|
||||||
|
if ( AccCache.cpuLock || AccCache.accLock ) {
|
||||||
|
assert(AccCache.LRU_valid==0);
|
||||||
|
std::cout << GridLogError << s<< "\n\t 0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||||
|
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||||
|
<< "\t cpuLock " << AccCache.cpuLock
|
||||||
|
<< "\t accLock " << AccCache.accLock
|
||||||
|
<< "\t LRUvalid " << AccCache.LRU_valid<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert( AccCache.cpuLock== 0 ) ;
|
||||||
|
assert( AccCache.accLock== 0 ) ;
|
||||||
|
}
|
||||||
|
std::cout << " Memory Manager::Audit() no locked table entries "<<std::endl;
|
||||||
|
assert(LruBytes1==LruBytes2);
|
||||||
|
assert(LruBytes1==DeviceLRUBytes);
|
||||||
|
std::cout << " Memory Manager::Audit() evictable bytes matches sum over table "<<std::endl;
|
||||||
|
assert(AccBytes==DeviceBytes);
|
||||||
|
std::cout << " Memory Manager::Audit() device bytes matches sum over table "<<std::endl;
|
||||||
|
assert(LruCnt == LRU.size());
|
||||||
|
std::cout << " Memory Manager::Audit() LRU entry count matches "<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
void MemoryManager::PrintState(void* _CpuPtr)
|
void MemoryManager::PrintState(void* _CpuPtr)
|
||||||
{
|
{
|
||||||
@ -489,8 +583,8 @@ void MemoryManager::PrintState(void* _CpuPtr)
|
|||||||
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
|
if ( AccCache.state==EvictNext) str = std::string("EvictNext");
|
||||||
|
|
||||||
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
std::cout << GridLogMessage << "CpuAddr\t\tAccAddr\t\tState\t\tcpuLock\taccLock\tLRU_valid "<<std::endl;
|
||||||
std::cout << GridLogMessage << "0x"<<std::hex<<AccCache.CpuPtr<<std::dec
|
std::cout << GridLogMessage << "\tx"<<std::hex<<AccCache.CpuPtr<<std::dec
|
||||||
<< "\t0x"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
<< "\tx"<<std::hex<<AccCache.AccPtr<<std::dec<<"\t" <<str
|
||||||
<< "\t" << AccCache.cpuLock
|
<< "\t" << AccCache.cpuLock
|
||||||
<< "\t" << AccCache.accLock
|
<< "\t" << AccCache.accLock
|
||||||
<< "\t" << AccCache.LRU_valid<<std::endl;
|
<< "\t" << AccCache.LRU_valid<<std::endl;
|
||||||
|
@ -12,7 +12,10 @@ uint64_t MemoryManager::HostToDeviceBytes;
|
|||||||
uint64_t MemoryManager::DeviceToHostBytes;
|
uint64_t MemoryManager::DeviceToHostBytes;
|
||||||
uint64_t MemoryManager::HostToDeviceXfer;
|
uint64_t MemoryManager::HostToDeviceXfer;
|
||||||
uint64_t MemoryManager::DeviceToHostXfer;
|
uint64_t MemoryManager::DeviceToHostXfer;
|
||||||
|
uint64_t MemoryManager::DeviceEvictions;
|
||||||
|
uint64_t MemoryManager::DeviceDestroy;
|
||||||
|
|
||||||
|
void MemoryManager::Audit(std::string s){};
|
||||||
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
||||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
||||||
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
|
int MemoryManager::isOpen (void* CpuPtr) { return 0;}
|
||||||
@ -21,6 +24,7 @@ void MemoryManager::PrintState(void* CpuPtr)
|
|||||||
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
|
std::cout << GridLogMessage << "Host<->Device memory movement not currently managed by Grid." << std::endl;
|
||||||
};
|
};
|
||||||
void MemoryManager::Print(void){};
|
void MemoryManager::Print(void){};
|
||||||
|
void MemoryManager::PrintAll(void){};
|
||||||
void MemoryManager::NotifyDeletion(void *ptr){};
|
void MemoryManager::NotifyDeletion(void *ptr){};
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
@ -46,3 +46,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
#include <Grid/lattice/Lattice_unary.h>
|
#include <Grid/lattice/Lattice_unary.h>
|
||||||
#include <Grid/lattice/Lattice_transfer.h>
|
#include <Grid/lattice/Lattice_transfer.h>
|
||||||
#include <Grid/lattice/Lattice_basis.h>
|
#include <Grid/lattice/Lattice_basis.h>
|
||||||
|
#include <Grid/lattice/Lattice_crc.h>
|
||||||
|
@ -129,7 +129,7 @@ public:
|
|||||||
|
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWriteDiscard);
|
auto me = View(AcceleratorWrite);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -152,7 +152,7 @@ public:
|
|||||||
|
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWriteDiscard);
|
auto me = View(AcceleratorWrite);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -174,7 +174,7 @@ public:
|
|||||||
this->checkerboard=cb;
|
this->checkerboard=cb;
|
||||||
auto exprCopy = expr;
|
auto exprCopy = expr;
|
||||||
ExpressionViewOpen(exprCopy);
|
ExpressionViewOpen(exprCopy);
|
||||||
auto me = View(AcceleratorWriteDiscard);
|
auto me = View(AcceleratorWrite);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
auto tmp = eval(ss,exprCopy);
|
auto tmp = eval(ss,exprCopy);
|
||||||
coalescedWrite(me[ss],tmp);
|
coalescedWrite(me[ss],tmp);
|
||||||
@ -245,7 +245,7 @@ public:
|
|||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
// user defined constructor
|
// user defined constructor
|
||||||
///////////////////////////////////////////
|
///////////////////////////////////////////
|
||||||
Lattice(GridBase *grid,ViewMode mode=AcceleratorWriteDiscard) {
|
Lattice(GridBase *grid,ViewMode mode=AcceleratorWrite) {
|
||||||
this->_grid = grid;
|
this->_grid = grid;
|
||||||
resize(this->_grid->oSites());
|
resize(this->_grid->oSites());
|
||||||
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
|
assert((((uint64_t)&this->_odata[0])&0xF) ==0);
|
||||||
@ -288,7 +288,7 @@ public:
|
|||||||
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
|
typename std::enable_if<!std::is_same<robj,vobj>::value,int>::type i=0;
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
this->checkerboard = r.Checkerboard();
|
this->checkerboard = r.Checkerboard();
|
||||||
auto me = View(AcceleratorWriteDiscard);
|
auto me = View(AcceleratorWrite);
|
||||||
auto him= r.View(AcceleratorRead);
|
auto him= r.View(AcceleratorRead);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
coalescedWrite(me[ss],him(ss));
|
coalescedWrite(me[ss],him(ss));
|
||||||
@ -303,7 +303,7 @@ public:
|
|||||||
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
|
inline Lattice<vobj> & operator = (const Lattice<vobj> & r){
|
||||||
this->checkerboard = r.Checkerboard();
|
this->checkerboard = r.Checkerboard();
|
||||||
conformable(*this,r);
|
conformable(*this,r);
|
||||||
auto me = View(AcceleratorWriteDiscard);
|
auto me = View(AcceleratorWrite);
|
||||||
auto him= r.View(AcceleratorRead);
|
auto him= r.View(AcceleratorRead);
|
||||||
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
accelerator_for(ss,me.size(),vobj::Nsimd(),{
|
||||||
coalescedWrite(me[ss],him(ss));
|
coalescedWrite(me[ss],him(ss));
|
||||||
|
55
Grid/lattice/Lattice_crc.h
Normal file
55
Grid/lattice/Lattice_crc.h
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./lib/lattice/Lattice_crc.h
|
||||||
|
|
||||||
|
Copyright (C) 2021
|
||||||
|
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
|
||||||
|
{
|
||||||
|
auto ff = localNorm2(f);
|
||||||
|
if ( mu==-1 ) mu = f.Grid()->Nd()-1;
|
||||||
|
typedef typename vobj::tensor_reduced normtype;
|
||||||
|
typedef typename normtype::scalar_object scalar;
|
||||||
|
std::vector<scalar> sff;
|
||||||
|
sliceSum(ff,sff,mu);
|
||||||
|
for(int t=0;t<sff.size();t++){
|
||||||
|
std::cout << s<<" "<<t<<" "<<sff[t]<<std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
|
||||||
|
{
|
||||||
|
autoView( buf_v , buf, CpuRead);
|
||||||
|
return ::crc32(0L,(unsigned char *)&buf_v[0],(size_t)sizeof(vobj)*buf.oSites());
|
||||||
|
}
|
||||||
|
|
||||||
|
#define CRC(U) std::cout << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
|
126
Grid/lattice/Lattice_slice_gpu.h
Normal file
126
Grid/lattice/Lattice_slice_gpu.h
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
// If NOT CUDA or HIP -- we should provide
|
||||||
|
// -- atomicAdd(float *,float)
|
||||||
|
// -- atomicAdd(double *,double)
|
||||||
|
//
|
||||||
|
// Augment CUDA with complex atomics
|
||||||
|
#if !defined(GRID_HIP) || !defined(GRID_CUDA)
|
||||||
|
inline void atomicAdd(float *acc,float elem)
|
||||||
|
{
|
||||||
|
*acc += elem;
|
||||||
|
}
|
||||||
|
inline void atomicAdd(double *acc,double elem)
|
||||||
|
{
|
||||||
|
*acc += elem;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
inline void atomicAdd(ComplexD *accum,ComplexD & elem)
|
||||||
|
{
|
||||||
|
double *a_p = (double *)accum;
|
||||||
|
double *e_p = (double *)&elem;
|
||||||
|
for(int w=0;w<2;w++){
|
||||||
|
atomicAdd(&a_p[w],e_p[w]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
inline void atomicAdd(ComplexF *accum,ComplexF & elem)
|
||||||
|
{
|
||||||
|
float *a_p = (float *)accum;
|
||||||
|
float *e_p = (float *)&elem;
|
||||||
|
for(int w=0;w<2;w++){
|
||||||
|
atomicAdd(&a_p[w],e_p[w]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Augment CUDA with vobj atomics
|
||||||
|
template<class vobj> accelerator_inline void atomicAdd(vobj *accum, vobj & elem)
|
||||||
|
{
|
||||||
|
typedef typename vobj::scalar_type scalar_type;
|
||||||
|
scalar_type *a_p= (scalar_type *)accum;
|
||||||
|
scalar_type *e_p= (scalar_type *)& elem;
|
||||||
|
for(int w=0;w<vobj::Nsimd();w++){
|
||||||
|
atomicAdd(&a_p[w],e_p[w]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Atomics based slice sum
|
||||||
|
template<class vobj> inline void sliceSumGpu(const Lattice<vobj> &Data,std::vector<typename vobj::scalar_object> &result,int orthogdim)
|
||||||
|
{
|
||||||
|
typedef typename vobj::scalar_object sobj;
|
||||||
|
typedef typename vobj::scalar_object::scalar_type scalar_type;
|
||||||
|
GridBase *grid = Data.Grid();
|
||||||
|
assert(grid!=NULL);
|
||||||
|
|
||||||
|
const int Nd = grid->_ndimension;
|
||||||
|
const int Nsimd = grid->Nsimd();
|
||||||
|
|
||||||
|
assert(orthogdim >= 0);
|
||||||
|
assert(orthogdim < Nd);
|
||||||
|
|
||||||
|
int fd=grid->_fdimensions[orthogdim];
|
||||||
|
int ld=grid->_ldimensions[orthogdim];
|
||||||
|
int rd=grid->_rdimensions[orthogdim];
|
||||||
|
|
||||||
|
// Move to device memory and copy in / out
|
||||||
|
Vector<vobj> lvSum(rd); // will locally sum vectors first
|
||||||
|
Vector<sobj> lsSum(ld,Zero()); // sum across these down to scalars
|
||||||
|
ExtractBuffer<sobj> extracted(Nsimd); // splitting the SIMD
|
||||||
|
|
||||||
|
result.resize(fd); // And then global sum to return the same vector to every node
|
||||||
|
for(int r=0;r<rd;r++){
|
||||||
|
lvSum[r]=Zero();
|
||||||
|
}
|
||||||
|
|
||||||
|
int e1= grid->_slice_nblock[orthogdim];
|
||||||
|
int e2= grid->_slice_block [orthogdim];
|
||||||
|
int stride=grid->_slice_stride[orthogdim];
|
||||||
|
|
||||||
|
// sum over reduced dimension planes, breaking out orthog dir
|
||||||
|
// Parallel over orthog direction
|
||||||
|
autoView( Data_v, Data, AcceleratorRead);
|
||||||
|
auto lvSum_p=&lvSum[0];
|
||||||
|
int ostride = grid->_ostride[orthogdim];
|
||||||
|
accelerator_for( ree,rd*e1*e2,1, {
|
||||||
|
int b = ree%e2;
|
||||||
|
int re= ree/e2;
|
||||||
|
int n=re%e1;
|
||||||
|
int r=re/e1;
|
||||||
|
int so=r*ostride;
|
||||||
|
int ss=so+n*stride+b;
|
||||||
|
atomicAdd(&lvSum_p[r],Data_v[ss]);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Sum across simd lanes in the plane, breaking out orthog dir.
|
||||||
|
Coordinate icoor(Nd);
|
||||||
|
|
||||||
|
for(int rt=0;rt<rd;rt++){
|
||||||
|
|
||||||
|
extract(lvSum[rt],extracted);
|
||||||
|
|
||||||
|
for(int idx=0;idx<Nsimd;idx++){
|
||||||
|
|
||||||
|
grid->iCoorFromIindex(icoor,idx);
|
||||||
|
|
||||||
|
int ldx =rt+icoor[orthogdim]*rd;
|
||||||
|
|
||||||
|
lsSum[ldx]=lsSum[ldx]+extracted[idx];
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sum over nodes.
|
||||||
|
for(int t=0;t<fd;t++){
|
||||||
|
int pt = t/ld; // processor plane
|
||||||
|
int lt = t%ld;
|
||||||
|
if ( pt == grid->_processor_coor[orthogdim] ) {
|
||||||
|
result[t]=lsSum[lt];
|
||||||
|
} else {
|
||||||
|
result[t]=Zero();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
scalar_type * ptr = (scalar_type *) &result[0];
|
||||||
|
int words = fd*sizeof(sobj)/sizeof(scalar_type);
|
||||||
|
grid->GlobalSumVector(ptr, words);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
NAMESPACE_END(Grid);
|
@ -65,29 +65,40 @@ GridLogger GridLogSolver (1, "Solver", GridLogColours, "NORMAL");
|
|||||||
GridLogger GridLogError (1, "Error" , GridLogColours, "RED");
|
GridLogger GridLogError (1, "Error" , GridLogColours, "RED");
|
||||||
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
|
GridLogger GridLogWarning(1, "Warning", GridLogColours, "YELLOW");
|
||||||
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
|
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL");
|
||||||
|
GridLogger GridLogMemory (1, "Memory", GridLogColours, "NORMAL");
|
||||||
|
GridLogger GridLogTracing(1, "Tracing", GridLogColours, "NORMAL");
|
||||||
GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
GridLogger GridLogDebug (1, "Debug", GridLogColours, "PURPLE");
|
||||||
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN");
|
||||||
|
GridLogger GridLogDslash (1, "Dslash", GridLogColours, "BLUE");
|
||||||
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
GridLogger GridLogIterative (1, "Iterative", GridLogColours, "BLUE");
|
||||||
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
GridLogger GridLogIntegrator (1, "Integrator", GridLogColours, "BLUE");
|
||||||
|
GridLogger GridLogHMC (1, "HMC", GridLogColours, "BLUE");
|
||||||
|
|
||||||
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
void GridLogConfigure(std::vector<std::string> &logstreams) {
|
||||||
GridLogError.Active(0);
|
GridLogError.Active(1);
|
||||||
GridLogWarning.Active(0);
|
GridLogWarning.Active(0);
|
||||||
GridLogMessage.Active(1); // at least the messages should be always on
|
GridLogMessage.Active(1); // at least the messages should be always on
|
||||||
|
GridLogMemory.Active(0);
|
||||||
|
GridLogTracing.Active(0);
|
||||||
GridLogIterative.Active(0);
|
GridLogIterative.Active(0);
|
||||||
GridLogDebug.Active(0);
|
GridLogDebug.Active(0);
|
||||||
GridLogPerformance.Active(0);
|
GridLogPerformance.Active(0);
|
||||||
|
GridLogDslash.Active(0);
|
||||||
GridLogIntegrator.Active(1);
|
GridLogIntegrator.Active(1);
|
||||||
GridLogColours.Active(0);
|
GridLogColours.Active(0);
|
||||||
|
GridLogHMC.Active(1);
|
||||||
|
|
||||||
for (int i = 0; i < logstreams.size(); i++) {
|
for (int i = 0; i < logstreams.size(); i++) {
|
||||||
if (logstreams[i] == std::string("Error")) GridLogError.Active(1);
|
if (logstreams[i] == std::string("Tracing")) GridLogTracing.Active(1);
|
||||||
|
if (logstreams[i] == std::string("Memory")) GridLogMemory.Active(1);
|
||||||
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
|
if (logstreams[i] == std::string("Warning")) GridLogWarning.Active(1);
|
||||||
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
|
if (logstreams[i] == std::string("NoMessage")) GridLogMessage.Active(0);
|
||||||
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
if (logstreams[i] == std::string("Iterative")) GridLogIterative.Active(1);
|
||||||
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
if (logstreams[i] == std::string("Debug")) GridLogDebug.Active(1);
|
||||||
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
if (logstreams[i] == std::string("Performance")) GridLogPerformance.Active(1);
|
||||||
if (logstreams[i] == std::string("Integrator")) GridLogIntegrator.Active(1);
|
if (logstreams[i] == std::string("Dslash")) GridLogDslash.Active(1);
|
||||||
|
if (logstreams[i] == std::string("NoIntegrator"))GridLogIntegrator.Active(0);
|
||||||
|
if (logstreams[i] == std::string("NoHMC")) GridLogHMC.Active(0);
|
||||||
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
if (logstreams[i] == std::string("Colours")) GridLogColours.Active(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,8 @@ public:
|
|||||||
stream << std::setw(log.topWidth);
|
stream << std::setw(log.topWidth);
|
||||||
}
|
}
|
||||||
stream << log.topName << log.background()<< " : ";
|
stream << log.topName << log.background()<< " : ";
|
||||||
stream << log.colour() << std::left;
|
// stream << log.colour() << std::left;
|
||||||
|
stream << std::left;
|
||||||
if (log.chanWidth > 0)
|
if (log.chanWidth > 0)
|
||||||
{
|
{
|
||||||
stream << std::setw(log.chanWidth);
|
stream << std::setw(log.chanWidth);
|
||||||
@ -153,9 +154,9 @@ public:
|
|||||||
stream << log.evidence()
|
stream << log.evidence()
|
||||||
<< now << log.background() << " : " ;
|
<< now << log.background() << " : " ;
|
||||||
}
|
}
|
||||||
stream << log.colour();
|
// stream << log.colour();
|
||||||
|
stream << std::right;
|
||||||
stream.flags(f);
|
stream.flags(f);
|
||||||
|
|
||||||
return stream;
|
return stream;
|
||||||
} else {
|
} else {
|
||||||
return devnull;
|
return devnull;
|
||||||
@ -180,8 +181,12 @@ extern GridLogger GridLogWarning;
|
|||||||
extern GridLogger GridLogMessage;
|
extern GridLogger GridLogMessage;
|
||||||
extern GridLogger GridLogDebug ;
|
extern GridLogger GridLogDebug ;
|
||||||
extern GridLogger GridLogPerformance;
|
extern GridLogger GridLogPerformance;
|
||||||
|
extern GridLogger GridLogDslash;
|
||||||
extern GridLogger GridLogIterative ;
|
extern GridLogger GridLogIterative ;
|
||||||
extern GridLogger GridLogIntegrator ;
|
extern GridLogger GridLogIntegrator ;
|
||||||
|
extern GridLogger GridLogHMC;
|
||||||
|
extern GridLogger GridLogMemory;
|
||||||
|
extern GridLogger GridLogTracing;
|
||||||
extern Colours GridLogColours;
|
extern Colours GridLogColours;
|
||||||
|
|
||||||
std::string demangle(const char* name) ;
|
std::string demangle(const char* name) ;
|
||||||
|
@ -27,10 +27,13 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
|
|
||||||
#include <Grid/GridCore.h>
|
#include <Grid/GridCore.h>
|
||||||
#include <Grid/perfmon/PerfCount.h>
|
|
||||||
|
|
||||||
|
#include <Grid/perfmon/Timer.h>
|
||||||
|
#include <Grid/perfmon/PerfCount.h>
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
GridTimePoint theProgramStart = GridClock::now();
|
||||||
|
|
||||||
#define CacheControl(L,O,R) ((PERF_COUNT_HW_CACHE_##L)|(PERF_COUNT_HW_CACHE_OP_##O<<8)| (PERF_COUNT_HW_CACHE_RESULT_##R<<16))
|
#define CacheControl(L,O,R) ((PERF_COUNT_HW_CACHE_##L)|(PERF_COUNT_HW_CACHE_OP_##O<<8)| (PERF_COUNT_HW_CACHE_RESULT_##R<<16))
|
||||||
#define RawConfig(A,B) (A<<8|B)
|
#define RawConfig(A,B) (A<<8|B)
|
||||||
const PerformanceCounter::PerformanceCounterConfig PerformanceCounter::PerformanceCounterConfigs [] = {
|
const PerformanceCounter::PerformanceCounterConfig PerformanceCounter::PerformanceCounterConfigs [] = {
|
||||||
|
@ -30,6 +30,12 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#ifndef GRID_PERFCOUNT_H
|
#ifndef GRID_PERFCOUNT_H
|
||||||
#define GRID_PERFCOUNT_H
|
#define GRID_PERFCOUNT_H
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef __SSC_START
|
||||||
|
#define __SSC_START
|
||||||
|
#define __SSC_STOP
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@ -72,17 +78,9 @@ static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid,
|
|||||||
inline uint64_t cyclecount(void){
|
inline uint64_t cyclecount(void){
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#define __SSC_MARK(mark) __asm__ __volatile__ ("movl %0, %%ebx; .byte 0x64, 0x67, 0x90 " ::"i"(mark):"%ebx")
|
|
||||||
#define __SSC_STOP __SSC_MARK(0x110)
|
|
||||||
#define __SSC_START __SSC_MARK(0x111)
|
|
||||||
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define __SSC_MARK(mark)
|
|
||||||
#define __SSC_STOP
|
|
||||||
#define __SSC_START
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* cycle counters arch dependent
|
* cycle counters arch dependent
|
||||||
*/
|
*/
|
||||||
|
@ -35,17 +35,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid)
|
NAMESPACE_BEGIN(Grid)
|
||||||
|
|
||||||
// Dress the output; use std::chrono
|
//typedef std::chrono::system_clock GridClock;
|
||||||
// C++11 time facilities better?
|
typedef std::chrono::high_resolution_clock GridClock;
|
||||||
inline double usecond(void) {
|
|
||||||
struct timeval tv;
|
|
||||||
#ifdef TIMERS_ON
|
|
||||||
gettimeofday(&tv,NULL);
|
|
||||||
#endif
|
|
||||||
return 1.0*tv.tv_usec + 1.0e6*tv.tv_sec;
|
|
||||||
}
|
|
||||||
|
|
||||||
typedef std::chrono::system_clock GridClock;
|
|
||||||
typedef std::chrono::time_point<GridClock> GridTimePoint;
|
typedef std::chrono::time_point<GridClock> GridTimePoint;
|
||||||
|
|
||||||
typedef std::chrono::seconds GridSecs;
|
typedef std::chrono::seconds GridSecs;
|
||||||
@ -53,6 +44,15 @@ typedef std::chrono::milliseconds GridMillisecs;
|
|||||||
typedef std::chrono::microseconds GridUsecs;
|
typedef std::chrono::microseconds GridUsecs;
|
||||||
typedef std::chrono::microseconds GridTime;
|
typedef std::chrono::microseconds GridTime;
|
||||||
|
|
||||||
|
extern GridTimePoint theProgramStart;
|
||||||
|
// Dress the output; use std::chrono
|
||||||
|
// C++11 time facilities better?
|
||||||
|
inline double usecond(void) {
|
||||||
|
auto usecs = std::chrono::duration_cast<GridUsecs>(GridClock::now()-theProgramStart);
|
||||||
|
return 1.0*usecs.count();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
|
inline std::ostream& operator<< (std::ostream & stream, const GridSecs & time)
|
||||||
{
|
{
|
||||||
stream << time.count()<<" s";
|
stream << time.count()<<" s";
|
||||||
|
70
Grid/perfmon/Tracing.h
Normal file
70
Grid/perfmon/Tracing.h
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
#ifdef GRID_TRACING_NVTX
|
||||||
|
#include <nvToolsExt.h>
|
||||||
|
class GridTracer {
|
||||||
|
public:
|
||||||
|
GridTracer(const char* name) {
|
||||||
|
nvtxRangePushA(name);
|
||||||
|
}
|
||||||
|
~GridTracer() {
|
||||||
|
nvtxRangePop();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
inline void tracePush(const char *name) { nvtxRangePushA(name); }
|
||||||
|
inline void tracePop(const char *name) { nvtxRangePop(); }
|
||||||
|
inline int traceStart(const char *name) { }
|
||||||
|
inline void traceStop(int ID) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GRID_TRACING_ROCTX
|
||||||
|
#include <roctracer/roctx.h>
|
||||||
|
class GridTracer {
|
||||||
|
public:
|
||||||
|
GridTracer(const char* name) {
|
||||||
|
roctxRangePushA(name);
|
||||||
|
std::cout << "roctxRangePush "<<name<<std::endl;
|
||||||
|
}
|
||||||
|
~GridTracer() {
|
||||||
|
roctxRangePop();
|
||||||
|
std::cout << "roctxRangePop "<<std::endl;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
inline void tracePush(const char *name) { roctxRangePushA(name); }
|
||||||
|
inline void tracePop(const char *name) { roctxRangePop(); }
|
||||||
|
inline int traceStart(const char *name) { roctxRangeStart(name); }
|
||||||
|
inline void traceStop(int ID) { roctxRangeStop(ID); }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GRID_TRACING_TIMER
|
||||||
|
class GridTracer {
|
||||||
|
public:
|
||||||
|
const char *name;
|
||||||
|
double elapsed;
|
||||||
|
GridTracer(const char* _name) {
|
||||||
|
name = _name;
|
||||||
|
elapsed=-usecond();
|
||||||
|
}
|
||||||
|
~GridTracer() {
|
||||||
|
elapsed+=usecond();
|
||||||
|
std::cout << GridLogTracing << name << " took " <<elapsed<< " us" <<std::endl;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
inline void tracePush(const char *name) { }
|
||||||
|
inline void tracePop(const char *name) { }
|
||||||
|
inline int traceStart(const char *name) { return 0; }
|
||||||
|
inline void traceStop(int ID) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef GRID_TRACING_NONE
|
||||||
|
#define GRID_TRACE(name)
|
||||||
|
inline void tracePush(const char *name) { }
|
||||||
|
inline void tracePop(const char *name) { }
|
||||||
|
inline int traceStart(const char *name) { return 0; }
|
||||||
|
inline void traceStop(int ID) { }
|
||||||
|
#else
|
||||||
|
#define GRID_TRACE(name) GridTracer uniq_name_using_macros##__COUNTER__(name);
|
||||||
|
#endif
|
||||||
|
NAMESPACE_END(Grid);
|
@ -456,9 +456,9 @@ template <class Fimpl>
|
|||||||
void FermToProp(typename Fimpl::PropagatorField &p, const typename Fimpl::FermionField &f, const int s, const int c)
|
void FermToProp(typename Fimpl::PropagatorField &p, const typename Fimpl::FermionField &f, const int s, const int c)
|
||||||
{
|
{
|
||||||
#ifdef FAST_FERM_TO_PROP
|
#ifdef FAST_FERM_TO_PROP
|
||||||
autoView(p_v,p,AcceleratorWrite);
|
autoView(p_v,p,CpuWrite);
|
||||||
autoView(f_v,f,AcceleratorRead);
|
autoView(f_v,f,CpuRead);
|
||||||
accelerator_for(idx,p_v.oSites(),1,{
|
thread_for(idx,p_v.oSites(),{
|
||||||
for(int ss = 0; ss < Ns; ++ss) {
|
for(int ss = 0; ss < Ns; ++ss) {
|
||||||
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
||||||
p_v[idx]()(ss,s)(cc,c) = f_v[idx]()(ss)(cc); // Propagator sink index is LEFT, suitable for left mult by gauge link (e.g.)
|
p_v[idx]()(ss,s)(cc,c) = f_v[idx]()(ss)(cc); // Propagator sink index is LEFT, suitable for left mult by gauge link (e.g.)
|
||||||
@ -484,9 +484,9 @@ template <class Fimpl>
|
|||||||
void PropToFerm(typename Fimpl::FermionField &f, const typename Fimpl::PropagatorField &p, const int s, const int c)
|
void PropToFerm(typename Fimpl::FermionField &f, const typename Fimpl::PropagatorField &p, const int s, const int c)
|
||||||
{
|
{
|
||||||
#ifdef FAST_FERM_TO_PROP
|
#ifdef FAST_FERM_TO_PROP
|
||||||
autoView(p_v,p,AcceleratorWrite);
|
autoView(p_v,p,CpuRead);
|
||||||
autoView(f_v,f,AcceleratorRead);
|
autoView(f_v,f,CpuWrite);
|
||||||
accelerator_for(idx,p_v.oSites(),1,{
|
thread_for(idx,p_v.oSites(),{
|
||||||
for(int ss = 0; ss < Ns; ++ss) {
|
for(int ss = 0; ss < Ns; ++ss) {
|
||||||
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
for(int cc = 0; cc < Fimpl::Dimension; ++cc) {
|
||||||
f_v[idx]()(ss)(cc) = p_v[idx]()(ss,s)(cc,c); // LEFT index is copied across for s,c right index
|
f_v[idx]()(ss)(cc) = p_v[idx]()(ss,s)(cc,c); // LEFT index is copied across for s,c right index
|
||||||
|
@ -1 +1 @@
|
|||||||
CXX=mpicxx-openmpi-mp CXXFLAGS=-I/opt/local/include/ LDFLAGS=-L/opt/local/lib/ ../../configure --enable-simd=GEN --enable-debug --enable-comms=mpi
|
CXX=mpicxx-openmpi-mp CXXFLAGS=-I/opt/local/include/ LDFLAGS=-L/opt/local/lib/ ../../configure --enable-simd=GEN --enable-debug --enable-comms=mpi --enable-unified=no
|
||||||
|
270
tests/core/Test_fft_matt.cc
Normal file
270
tests/core/Test_fft_matt.cc
Normal file
@ -0,0 +1,270 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
grid` physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_cshift.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
Gamma::Algebra Gmu [] = {
|
||||||
|
Gamma::Algebra::GammaX,
|
||||||
|
Gamma::Algebra::GammaY,
|
||||||
|
Gamma::Algebra::GammaZ,
|
||||||
|
Gamma::Algebra::GammaT,
|
||||||
|
Gamma::Algebra::Gamma5
|
||||||
|
};
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
int threads = GridThread::GetThreads();
|
||||||
|
std::cout<<GridLogMessage << "Grid is setup to use "<<threads<<" threads"<<std::endl;
|
||||||
|
|
||||||
|
Coordinate latt_size = GridDefaultLatt();
|
||||||
|
Coordinate simd_layout = GridDefaultSimd(Nd,vComplexD::Nsimd());
|
||||||
|
Coordinate mpi_layout = GridDefaultMpi();
|
||||||
|
|
||||||
|
int vol = 1;
|
||||||
|
for(int d=0;d<latt_size.size();d++){
|
||||||
|
vol = vol * latt_size[d];
|
||||||
|
}
|
||||||
|
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
|
||||||
|
GridRedBlackCartesian RBGRID(&GRID);
|
||||||
|
|
||||||
|
LatticeComplexD coor(&GRID);
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
|
||||||
|
std::vector<int> seeds({1,2,3,4});
|
||||||
|
GridSerialRNG sRNG; sRNG.SeedFixedIntegers(seeds); // naughty seeding
|
||||||
|
GridParallelRNG pRNG(&GRID);
|
||||||
|
pRNG.SeedFixedIntegers(seeds);
|
||||||
|
|
||||||
|
LatticeGaugeFieldD Umu(&GRID);
|
||||||
|
SU<Nc>::ColdConfiguration(pRNG,Umu); // Unit gauge
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// Wilson test
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
{
|
||||||
|
LatticeFermionD src(&GRID); gaussian(pRNG,src);
|
||||||
|
LatticeFermionD src_p(&GRID);
|
||||||
|
LatticeFermionD tmp(&GRID);
|
||||||
|
LatticeFermionD ref(&GRID);
|
||||||
|
LatticeFermionD result(&GRID);
|
||||||
|
|
||||||
|
RealD mass=0.1;
|
||||||
|
WilsonFermionD Dw(Umu,GRID,RBGRID,mass);
|
||||||
|
|
||||||
|
Dw.M(src,ref);
|
||||||
|
std::cout << "Norm src "<<norm2(src)<<std::endl;
|
||||||
|
std::cout << "Norm Dw x src "<<norm2(ref)<<std::endl;
|
||||||
|
{
|
||||||
|
FFT theFFT(&GRID);
|
||||||
|
|
||||||
|
////////////////
|
||||||
|
// operator in Fourier space
|
||||||
|
////////////////
|
||||||
|
tmp =ref;
|
||||||
|
theFFT.FFT_all_dim(result,tmp,FFT::forward);
|
||||||
|
std::cout<<"FFT[ Dw x src ] "<< norm2(result)<<std::endl;
|
||||||
|
|
||||||
|
tmp = src;
|
||||||
|
theFFT.FFT_all_dim(src_p,tmp,FFT::forward);
|
||||||
|
std::cout<<"FFT[ src ] "<< norm2(src_p)<<std::endl;
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
// work out the predicted FT from Fourier
|
||||||
|
/////////////////////////////////////////////////////////////////
|
||||||
|
auto FGrid = &GRID;
|
||||||
|
LatticeFermionD Kinetic(FGrid); Kinetic = Zero();
|
||||||
|
LatticeComplexD kmu(FGrid);
|
||||||
|
LatticeInteger scoor(FGrid);
|
||||||
|
LatticeComplexD sk (FGrid); sk = Zero();
|
||||||
|
LatticeComplexD sk2(FGrid); sk2= Zero();
|
||||||
|
LatticeComplexD W(FGrid); W= Zero();
|
||||||
|
LatticeComplexD one(FGrid); one =ComplexD(1.0,0.0);
|
||||||
|
ComplexD ci(0.0,1.0);
|
||||||
|
|
||||||
|
for(int mu=0;mu<Nd;mu++) {
|
||||||
|
|
||||||
|
RealD TwoPiL = M_PI * 2.0/ latt_size[mu];
|
||||||
|
|
||||||
|
LatticeCoordinate(kmu,mu);
|
||||||
|
|
||||||
|
kmu = TwoPiL * kmu;
|
||||||
|
|
||||||
|
sk2 = sk2 + 2.0*sin(kmu*0.5)*sin(kmu*0.5);
|
||||||
|
sk = sk + sin(kmu) *sin(kmu);
|
||||||
|
|
||||||
|
// -1/2 Dw -> 1/2 gmu (eip - emip) = i sinp gmu
|
||||||
|
Kinetic = Kinetic + sin(kmu)*ci*(Gamma(Gmu[mu])*src_p);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
W = mass + sk2;
|
||||||
|
Kinetic = Kinetic + W * src_p;
|
||||||
|
|
||||||
|
std::cout<<"Momentum space src "<< norm2(src_p)<<std::endl;
|
||||||
|
std::cout<<"Momentum space Dw x src "<< norm2(Kinetic)<<std::endl;
|
||||||
|
std::cout<<"FT[Coordinate space Dw] "<< norm2(result)<<std::endl;
|
||||||
|
|
||||||
|
result = result - Kinetic;
|
||||||
|
std::cout<<"diff "<< norm2(result)<<std::endl;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << " =======================================" <<std::endl;
|
||||||
|
std::cout << " Checking FourierFreePropagator x Dw = 1" <<std::endl;
|
||||||
|
std::cout << " =======================================" <<std::endl;
|
||||||
|
std::cout << "Dw src = " <<norm2(src)<<std::endl;
|
||||||
|
std::cout << "Dw tmp = " <<norm2(tmp)<<std::endl;
|
||||||
|
Dw.M(src,tmp);
|
||||||
|
Dw.FreePropagator(tmp,ref,mass);
|
||||||
|
|
||||||
|
std::cout << "Dw ref = " <<norm2(ref)<<std::endl;
|
||||||
|
|
||||||
|
ref = ref - src;
|
||||||
|
|
||||||
|
std::cout << "Dw ref-src = " <<norm2(ref)<<std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
// Wilson prop
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
{
|
||||||
|
std::cout<<"****************************************"<<std::endl;
|
||||||
|
std::cout << "Wilson Mom space 4d propagator \n";
|
||||||
|
std::cout<<"****************************************"<<std::endl;
|
||||||
|
|
||||||
|
LatticeFermionD src(&GRID); gaussian(pRNG,src);
|
||||||
|
LatticeFermionD tmp(&GRID);
|
||||||
|
LatticeFermionD ref(&GRID);
|
||||||
|
LatticeFermionD diff(&GRID);
|
||||||
|
|
||||||
|
src=Zero();
|
||||||
|
Coordinate point(4,0); // 0,0,0,0
|
||||||
|
SpinColourVectorD ferm;
|
||||||
|
ferm=Zero();
|
||||||
|
ferm()(0)(0) = ComplexD(1.0);
|
||||||
|
pokeSite(ferm,src,point);
|
||||||
|
|
||||||
|
RealD mass=0.1;
|
||||||
|
WilsonFermionD Dw(Umu,GRID,RBGRID,mass);
|
||||||
|
|
||||||
|
// Momentum space prop
|
||||||
|
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
|
||||||
|
Dw.FreePropagator(src,ref,mass) ;
|
||||||
|
|
||||||
|
Gamma G5(Gamma::Algebra::Gamma5);
|
||||||
|
|
||||||
|
LatticeFermionD result(&GRID);
|
||||||
|
const int sdir=0;
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Conjugate gradient on normal equations system
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
|
||||||
|
Dw.Mdag(src,tmp);
|
||||||
|
src=tmp;
|
||||||
|
MdagMLinearOperator<WilsonFermionD,LatticeFermionD> HermOp(Dw);
|
||||||
|
ConjugateGradient<LatticeFermionD> CG(1.0e-10,10000);
|
||||||
|
CG(HermOp,src,result);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << " Taking difference" <<std::endl;
|
||||||
|
std::cout << "Dw result "<<norm2(result)<<std::endl;
|
||||||
|
std::cout << "Dw ref "<<norm2(ref)<<std::endl;
|
||||||
|
|
||||||
|
diff = ref - result;
|
||||||
|
std::cout << "result - ref "<<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
DumpSliceNorm("Slice Norm Solution ",result,Nd-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
//Gauge invariance test
|
||||||
|
////////////////////////////////////////////////////
|
||||||
|
{
|
||||||
|
std::cout<<"****************************************"<<std::endl;
|
||||||
|
std::cout << "Gauge invariance test \n";
|
||||||
|
std::cout<<"****************************************"<<std::endl;
|
||||||
|
LatticeGaugeField U_GT(&GRID); // Gauge transformed field
|
||||||
|
LatticeColourMatrix g(&GRID); // local Gauge xform matrix
|
||||||
|
U_GT = Umu;
|
||||||
|
// Make a random xform to teh gauge field
|
||||||
|
SU<Nc>::RandomGaugeTransform(pRNG,U_GT,g); // Unit gauge
|
||||||
|
|
||||||
|
LatticeFermionD src(&GRID);
|
||||||
|
LatticeFermionD tmp(&GRID);
|
||||||
|
LatticeFermionD ref(&GRID);
|
||||||
|
LatticeFermionD diff(&GRID);
|
||||||
|
|
||||||
|
// could loop over colors
|
||||||
|
src=Zero();
|
||||||
|
Coordinate point(4,0); // 0,0,0,0
|
||||||
|
SpinColourVectorD ferm;
|
||||||
|
ferm=Zero();
|
||||||
|
ferm()(0)(0) = ComplexD(1.0);
|
||||||
|
pokeSite(ferm,src,point);
|
||||||
|
|
||||||
|
RealD mass=0.1;
|
||||||
|
WilsonFermionD Dw(U_GT,GRID,RBGRID,mass);
|
||||||
|
|
||||||
|
// Momentum space prop
|
||||||
|
std::cout << " Solving by FFT and Feynman rules" <<std::endl;
|
||||||
|
Dw.FreePropagator(src,ref,mass) ;
|
||||||
|
|
||||||
|
Gamma G5(Gamma::Algebra::Gamma5);
|
||||||
|
|
||||||
|
LatticeFermionD result(&GRID);
|
||||||
|
const int sdir=0;
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
// Conjugate gradient on normal equations system
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << " Solving by Conjugate Gradient (CGNE)" <<std::endl;
|
||||||
|
Dw.Mdag(src,tmp);
|
||||||
|
src=tmp;
|
||||||
|
MdagMLinearOperator<WilsonFermionD,LatticeFermionD> HermOp(Dw);
|
||||||
|
ConjugateGradient<LatticeFermionD> CG(1.0e-10,10000);
|
||||||
|
CG(HermOp,src,result);
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////
|
||||||
|
std::cout << " Taking difference" <<std::endl;
|
||||||
|
std::cout << "Dw result "<<norm2(result)<<std::endl;
|
||||||
|
std::cout << "Dw ref "<<norm2(ref)<<std::endl;
|
||||||
|
|
||||||
|
diff = ref - result;
|
||||||
|
std::cout << "result - ref "<<norm2(diff)<<std::endl;
|
||||||
|
|
||||||
|
DumpSliceNorm("Slice Norm Solution ",result,Nd-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
110
tests/core/Test_memory_manager.cc
Normal file
110
tests/core/Test_memory_manager.cc
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_memory_manager.cc
|
||||||
|
|
||||||
|
Copyright (C) 2022
|
||||||
|
|
||||||
|
Author: Peter Boyle <pboyle@bnl.gov>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
void MemoryTest(GridCartesian * FGrid,int N);
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());
|
||||||
|
|
||||||
|
int N=100;
|
||||||
|
for(int i=0;i<N;i++){
|
||||||
|
std::cout << "============================"<<std::endl;
|
||||||
|
std::cout << "Epoch "<<i<<"/"<<N<<std::endl;
|
||||||
|
std::cout << "============================"<<std::endl;
|
||||||
|
MemoryTest(UGrid,256);
|
||||||
|
MemoryManager::Print();
|
||||||
|
AUDIT();
|
||||||
|
}
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MemoryTest(GridCartesian * FGrid, int N)
|
||||||
|
{
|
||||||
|
LatticeComplexD zero(FGrid); zero=Zero();
|
||||||
|
std::vector<LatticeComplexD> A(N,zero);//FGrid);
|
||||||
|
|
||||||
|
std::vector<ComplexD> B(N,ComplexD(0.0)); // Update sequentially on host
|
||||||
|
|
||||||
|
for(int v=0;v<N;v++) A[v] = Zero();
|
||||||
|
|
||||||
|
uint64_t counter = 0;
|
||||||
|
for(int epoch = 0;epoch<10000;epoch++){
|
||||||
|
|
||||||
|
int v = random() %N; // Which vec
|
||||||
|
int w = random() %2; // Write or read
|
||||||
|
int e = random() %3; // expression or for loop
|
||||||
|
int dev= random() %2; // On device?
|
||||||
|
// int e=1;
|
||||||
|
ComplexD zc = counter++;
|
||||||
|
|
||||||
|
if ( w ) {
|
||||||
|
B[v] = B[v] + zc;
|
||||||
|
if ( e == 0 ) {
|
||||||
|
A[v] = A[v] + zc - A[v] + A[v];
|
||||||
|
} else {
|
||||||
|
if ( dev ) {
|
||||||
|
autoView(A_v,A[v],AcceleratorWrite);
|
||||||
|
accelerator_for(ss,FGrid->oSites(),1,{
|
||||||
|
A_v[ss] = A_v[ss] + zc;
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
autoView(A_v,A[v],CpuWrite);
|
||||||
|
thread_for(ss,FGrid->oSites(),{
|
||||||
|
A_v[ss] = A_v[ss] + zc;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ( e == 0 ) {
|
||||||
|
A[v] = A[v] + A[v] - A[v];
|
||||||
|
} else {
|
||||||
|
if ( dev ) {
|
||||||
|
autoView(A_v,A[v],AcceleratorRead);
|
||||||
|
accelerator_for(ss,FGrid->oSites(),1,{
|
||||||
|
assert(B[v]==A_v[ss]()()().getlane(0));
|
||||||
|
});
|
||||||
|
// std::cout << "["<<v<<"] checked on GPU"<<B[v]<<std::endl;
|
||||||
|
} else {
|
||||||
|
autoView(A_v,A[v],CpuRead);
|
||||||
|
thread_for(ss,FGrid->oSites(),{
|
||||||
|
assert(B[v]==A_v[ss]()()().getlane(0));
|
||||||
|
});
|
||||||
|
// std::cout << "["<<v<<"] checked on CPU"<<B[v]<<std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
73
tests/core/Test_slicesum.cc
Normal file
73
tests/core/Test_slicesum.cc
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
/*************************************************************************************
|
||||||
|
|
||||||
|
Grid physics library, www.github.com/paboyle/Grid
|
||||||
|
|
||||||
|
Source file: ./tests/Test_poisson_fft.cc
|
||||||
|
|
||||||
|
Copyright (C) 2015
|
||||||
|
|
||||||
|
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
|
||||||
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
See the full license in the file "LICENSE" in the top level distribution directory
|
||||||
|
*************************************************************************************/
|
||||||
|
/* END LEGAL */
|
||||||
|
#include <Grid/Grid.h>
|
||||||
|
#include <Grid/lattice/Lattice_slice_gpu.h>
|
||||||
|
|
||||||
|
using namespace Grid;
|
||||||
|
|
||||||
|
int main (int argc, char ** argv)
|
||||||
|
{
|
||||||
|
Grid_init(&argc,&argv);
|
||||||
|
|
||||||
|
int N=16;
|
||||||
|
std::vector<int> latt_size ({N,N,N,N});
|
||||||
|
std::vector<int> simd_layout({vComplexD::Nsimd(),1,1,1});
|
||||||
|
std::vector<int> mpi_layout ({1,1,1,1});
|
||||||
|
|
||||||
|
GridCartesian GRID(latt_size,simd_layout,mpi_layout);
|
||||||
|
|
||||||
|
LatticeComplexD rn(&GRID);
|
||||||
|
|
||||||
|
GridParallelRNG RNG(&GRID);
|
||||||
|
RNG.SeedFixedIntegers(std::vector<int>({45,12,81,9}));
|
||||||
|
gaussian(RNG,rn);
|
||||||
|
|
||||||
|
std::vector<TComplex> reduced_ref;
|
||||||
|
std::vector<TComplex> reduced_gpu;
|
||||||
|
for(int d=0;d<4;d++){
|
||||||
|
{
|
||||||
|
RealD t=-usecond();
|
||||||
|
sliceSum(rn,reduced_ref,d);
|
||||||
|
t+=usecond();
|
||||||
|
std::cout << " sliceSum took "<<t<<" usecs"<<std::endl;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
RealD t=-usecond();
|
||||||
|
sliceSumGpu(rn,reduced_gpu,d);
|
||||||
|
t+=usecond();
|
||||||
|
std::cout << " sliceSumGpu took "<<t<<" usecs"<<std::endl;
|
||||||
|
}
|
||||||
|
for(int t=0;t<reduced_ref.size();t++){
|
||||||
|
std::cout << t<<" ref "<< reduced_ref[t] <<" opt " << reduced_gpu[t] << " diff "<<reduced_ref[t]-reduced_gpu[t]<<std::endl;
|
||||||
|
TComplex diff = reduced_ref[t]-reduced_gpu[t];
|
||||||
|
assert(abs(TensorRemove(diff)) < 1e-8 );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Grid_finalize();
|
||||||
|
}
|
Reference in New Issue
Block a user