mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-10 07:55:35 +00:00
Reorg memory manager for O(1) hash table
This commit is contained in:
parent
1d252d0922
commit
32237895bd
@ -55,7 +55,7 @@ public:
|
||||
|
||||
profilerAllocate(bytes);
|
||||
|
||||
_Tp *ptr = (_Tp*) AllocationCache::CpuAllocate(bytes);
|
||||
_Tp *ptr = (_Tp*) MemoryManager::CpuAllocate(bytes);
|
||||
|
||||
assert( ( (_Tp*)ptr != (_Tp *)NULL ) );
|
||||
|
||||
@ -68,7 +68,7 @@ public:
|
||||
|
||||
profilerFree(bytes);
|
||||
|
||||
AllocationCache::CpuFree((void *)__p,bytes);
|
||||
MemoryManager::CpuFree((void *)__p,bytes);
|
||||
}
|
||||
|
||||
// FIXME: hack for the copy constructor, eventually it must be avoided
|
||||
|
@ -1,4 +1,4 @@
|
||||
#pragma once
|
||||
#include <Grid/allocator/MemoryStats.h>
|
||||
#include <Grid/allocator/AllocationCache.h>
|
||||
#include <Grid/allocator/MemoryManager.h>
|
||||
#include <Grid/allocator/AlignedAllocator.h>
|
||||
|
@ -1,28 +0,0 @@
|
||||
#include <Grid/GridCore.h>
|
||||
#ifdef GRID_UVM
|
||||
|
||||
#warning "Grid is assuming unified virtual memory address space"
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// View management is 1:1 address space mapping
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void AllocationCache::AcceleratorViewClose(void* AccPtr){};
|
||||
void *AllocationCache::AcceleratorViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; }
|
||||
void AllocationCache::CpuViewClose(void* Ptr){};
|
||||
void *AllocationCache::CpuViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; }
|
||||
int AllocationCache::CpuViewLookup(void *CpuPtr){ return 0;}
|
||||
/////////////////////////////////////
|
||||
// Dummy stubs
|
||||
/////////////////////////////////////
|
||||
void AllocationCache::CpuDiscard(int e) { return;}
|
||||
void AllocationCache::Discard(int e) { return;}
|
||||
void AllocationCache::Evict(int e) { return; }
|
||||
void AllocationCache::Flush(int e) { assert(0);}
|
||||
void AllocationCache::Clone(int e) { assert(0);}
|
||||
int AllocationCache::ViewVictim(void) { assert(0); return 0;}
|
||||
void AllocationCache::ViewClose(void* AccPtr,ViewMode mode){};
|
||||
void *AllocationCache::ViewOpen (void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){return CpuPtr;};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#endif
|
@ -11,14 +11,14 @@ NAMESPACE_BEGIN(Grid);
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Data tables for recently freed pooiniter caches
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
AllocationCache::AllocationCacheEntry AllocationCache::Entries[AllocationCache::NallocType][AllocationCache::NallocCacheMax];
|
||||
int AllocationCache::Victim[AllocationCache::NallocType];
|
||||
int AllocationCache::Ncache[AllocationCache::NallocType];
|
||||
MemoryManager::AllocationCacheEntry MemoryManager::Entries[MemoryManager::NallocType][MemoryManager::NallocCacheMax];
|
||||
int MemoryManager::Victim[MemoryManager::NallocType];
|
||||
int MemoryManager::Ncache[MemoryManager::NallocType];
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Actual allocation and deallocation utils
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
void *AllocationCache::AcceleratorAllocate(size_t bytes)
|
||||
void *MemoryManager::AcceleratorAllocate(size_t bytes)
|
||||
{
|
||||
void *ptr = (void *) Lookup(bytes,Acc);
|
||||
|
||||
@ -29,13 +29,13 @@ void *AllocationCache::AcceleratorAllocate(size_t bytes)
|
||||
|
||||
return ptr;
|
||||
}
|
||||
void AllocationCache::AcceleratorFree (void *ptr,size_t bytes)
|
||||
void MemoryManager::AcceleratorFree (void *ptr,size_t bytes)
|
||||
{
|
||||
void *__freeme = Insert(ptr,bytes,Acc);
|
||||
|
||||
if ( __freeme ) acceleratorFreeDevice(__freeme);
|
||||
}
|
||||
void *AllocationCache::CpuAllocate(size_t bytes)
|
||||
void *MemoryManager::CpuAllocate(size_t bytes)
|
||||
{
|
||||
void *ptr = (void *) Lookup(bytes,Cpu);
|
||||
|
||||
@ -46,23 +46,19 @@ void *AllocationCache::CpuAllocate(size_t bytes)
|
||||
|
||||
return ptr;
|
||||
}
|
||||
void AllocationCache::CpuFree (void *ptr,size_t bytes)
|
||||
void MemoryManager::CpuFree (void *_ptr,size_t bytes)
|
||||
{
|
||||
// Look up in ViewCache
|
||||
int e=CpuViewLookup(ptr);
|
||||
if(e>=0){ Discard(e); }
|
||||
NotifyDeletion(_ptr);
|
||||
|
||||
// If present remove entry and free accelerator too.
|
||||
// Can we ever hit a free event with a view still in scope?
|
||||
void *__freeme = Insert(ptr,bytes,Cpu);
|
||||
// std::cout <<"CpuFree cached pointer "<<std::hex<<ptr<<std::endl;
|
||||
// std::cout <<"CpuFree deallocating pointer "<<std::hex<<__freeme<<std::endl;
|
||||
void *__freeme = Insert(_ptr,bytes,Cpu);
|
||||
if ( __freeme ) acceleratorFreeShared(__freeme);
|
||||
}
|
||||
//////////////////////////////////////////
|
||||
// call only once
|
||||
//////////////////////////////////////////
|
||||
void AllocationCache::Init(void)
|
||||
void MemoryManager::Init(void)
|
||||
{
|
||||
Ncache[Cpu] = 8;
|
||||
Ncache[Acc] = 8;
|
||||
@ -93,7 +89,7 @@ void AllocationCache::Init(void)
|
||||
std::cout << "MemoryManager::Init() SMALL "<<Ncache[CpuSmall]<<" LARGE "<<Ncache[Cpu]<<std::endl;
|
||||
}
|
||||
|
||||
void *AllocationCache::Insert(void *ptr,size_t bytes,int type)
|
||||
void *MemoryManager::Insert(void *ptr,size_t bytes,int type)
|
||||
{
|
||||
#ifdef ALLOCATION_CACHE
|
||||
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
|
||||
@ -103,7 +99,8 @@ void *AllocationCache::Insert(void *ptr,size_t bytes,int type)
|
||||
return ptr;
|
||||
#endif
|
||||
}
|
||||
void *AllocationCache::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim)
|
||||
|
||||
void *MemoryManager::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim)
|
||||
{
|
||||
assert(ncache>0);
|
||||
#ifdef GRID_OMP
|
||||
@ -139,7 +136,7 @@ void *AllocationCache::Insert(void *ptr,size_t bytes,AllocationCacheEntry *entri
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *AllocationCache::Lookup(size_t bytes,int type)
|
||||
void *MemoryManager::Lookup(size_t bytes,int type)
|
||||
{
|
||||
#ifdef ALLOCATION_CACHE
|
||||
bool small = (bytes < GRID_ALLOC_SMALL_LIMIT);
|
||||
@ -149,7 +146,8 @@ void *AllocationCache::Lookup(size_t bytes,int type)
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
void *AllocationCache::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache)
|
||||
|
||||
void *MemoryManager::Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache)
|
||||
{
|
||||
assert(ncache>0);
|
||||
#ifdef GRID_OMP
|
||||
@ -164,5 +162,6 @@ void *AllocationCache::Lookup(size_t bytes,AllocationCacheEntry *entries,int nca
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Grid physics library, www.github.com/paboyle/Grid
|
||||
|
||||
Source file: ./lib/AllocationCache.h
|
||||
Source file: ./lib/MemoryManager.h
|
||||
|
||||
Copyright (C) 2015
|
||||
|
||||
@ -27,6 +27,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
||||
*************************************************************************************/
|
||||
/* END LEGAL */
|
||||
#pragma once
|
||||
#include <list>
|
||||
#include <unordered_map>
|
||||
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
|
||||
@ -65,7 +67,7 @@ enum ViewMode {
|
||||
CpuWriteDiscard = 0x10 // same for now
|
||||
};
|
||||
|
||||
class AllocationCache {
|
||||
class MemoryManager {
|
||||
private:
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
@ -87,36 +89,89 @@ private:
|
||||
// Free pool
|
||||
/////////////////////////////////////////////////
|
||||
static void *Insert(void *ptr,size_t bytes,int type) ;
|
||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
|
||||
static void *Lookup(size_t bytes,int type) ;
|
||||
static void *Insert(void *ptr,size_t bytes,AllocationCacheEntry *entries,int ncache,int &victim) ;
|
||||
static void *Lookup(size_t bytes,AllocationCacheEntry *entries,int ncache) ;
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Internal device view
|
||||
/////////////////////////////////////////////////
|
||||
static void *AcceleratorAllocate(size_t bytes);
|
||||
static void AcceleratorFree (void *ptr,size_t bytes);
|
||||
static int ViewVictim(void);
|
||||
static void CpuDiscard(int e);
|
||||
static void Discard(int e);
|
||||
static void Evict(int e);
|
||||
static void Flush(int e);
|
||||
static void Clone(int e);
|
||||
static int CpuViewLookup(void *CpuPtr);
|
||||
// static int AccViewLookup(void *AccPtr);
|
||||
static void AcceleratorViewClose(void* AccPtr);
|
||||
static void *AcceleratorViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
static void CpuViewClose(void* Ptr);
|
||||
static void *CpuViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
|
||||
public:
|
||||
public:
|
||||
static void Init(void);
|
||||
static void *CpuAllocate(size_t bytes);
|
||||
static void CpuFree (void *ptr,size_t bytes);
|
||||
|
||||
|
||||
private:
|
||||
|
||||
////////////////////////////////////////////////////////
|
||||
// Footprint tracking
|
||||
////////////////////////////////////////////////////////
|
||||
static uint64_t DeviceBytes;
|
||||
static uint64_t DeviceLRUBytes;
|
||||
static uint64_t DeviceMaxBytes;
|
||||
static uint64_t HostToDeviceBytes;
|
||||
static uint64_t DeviceToHostBytes;
|
||||
|
||||
#ifndef GRID_UVM
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Data tables for ViewCache
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
typedef std::list<uint64_t> LRU_t;
|
||||
typedef typename LRU_t::iterator LRUiterator;
|
||||
typedef struct {
|
||||
int LRU_valid;
|
||||
LRUiterator LRU_entry;
|
||||
uint64_t CpuPtr;
|
||||
uint64_t AccPtr;
|
||||
size_t bytes;
|
||||
uint32_t transient;
|
||||
uint32_t state;
|
||||
uint32_t accLock;
|
||||
uint32_t cpuLock;
|
||||
} AcceleratorViewEntry;
|
||||
|
||||
typedef std::unordered_map<uint64_t,AcceleratorViewEntry> AccViewTable_t;
|
||||
typedef typename AccViewTable_t::iterator AccViewTableIterator ;
|
||||
|
||||
static AccViewTable_t AccViewTable;
|
||||
static LRU_t LRU;
|
||||
static LRU_t LRU_transient;
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
// Device motion
|
||||
/////////////////////////////////////////////////
|
||||
static void Create(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
static void EvictVictims(uint64_t bytes); // Frees up <bytes>
|
||||
static void Evict(AcceleratorViewEntry &AccCache);
|
||||
static void Flush(AcceleratorViewEntry &AccCache);
|
||||
static void Clone(AcceleratorViewEntry &AccCache);
|
||||
static void AccDiscard(AcceleratorViewEntry &AccCache);
|
||||
static void CpuDiscard(AcceleratorViewEntry &AccCache);
|
||||
|
||||
// static void LRUupdate(AcceleratorViewEntry &AccCache);
|
||||
static void LRUinsert(AcceleratorViewEntry &AccCache);
|
||||
static void LRUremove(AcceleratorViewEntry &AccCache);
|
||||
|
||||
// manage entries in the table
|
||||
static int EntryPresent(uint64_t CpuPtr);
|
||||
static void EntryCreate(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
static void EntryErase (uint64_t CpuPtr);
|
||||
static AccViewTableIterator EntryLookup(uint64_t CpuPtr);
|
||||
static void EntrySet (uint64_t CpuPtr,AcceleratorViewEntry &entry);
|
||||
|
||||
static void AcceleratorViewClose(uint64_t AccPtr);
|
||||
static uint64_t AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
static void CpuViewClose(uint64_t Ptr);
|
||||
static uint64_t CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
#endif
|
||||
static void NotifyDeletion(void * CpuPtr);
|
||||
|
||||
public:
|
||||
static void Print(void);
|
||||
static void ViewClose(void* AccPtr,ViewMode mode);
|
||||
static void *ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
|
||||
|
||||
static void *CpuAllocate(size_t bytes);
|
||||
static void CpuFree (void *ptr,size_t bytes);
|
||||
};
|
||||
|
||||
NAMESPACE_END(Grid);
|
@ -1,3 +1,4 @@
|
||||
#if 0
|
||||
#include <Grid/GridCore.h>
|
||||
#ifndef GRID_UVM
|
||||
|
||||
@ -40,7 +41,7 @@ static int NaccCache = 32;
|
||||
#define AccDirty (0x4) /*ACC copy is golden */
|
||||
#define EvictNext (0x8) /*Priority for eviction*/
|
||||
|
||||
int AllocationCache::ViewVictim(void)
|
||||
int MemoryManager::ViewVictim(void)
|
||||
{
|
||||
int prioEmpty =-1;
|
||||
int prioCpuDirty =-1;
|
||||
@ -55,7 +56,7 @@ int AllocationCache::ViewVictim(void)
|
||||
// round robin priority search of unlocked entries offset from current victim
|
||||
for(int ep=0;ep<NaccCache;ep++){
|
||||
int e = (ep+AccCacheVictim)%NaccCache;
|
||||
dprintf("AllocationCacheDeviceMem: Inspecting cache entry %d :",e);
|
||||
dprintf("MemoryManagerDeviceMem: Inspecting cache entry %d :",e);
|
||||
|
||||
uint32_t locks = AccCache[e].cpuLock+AccCache[e].accLock;
|
||||
uint32_t s = AccCache[e].state;
|
||||
@ -94,11 +95,11 @@ int AllocationCache::ViewVictim(void)
|
||||
if ( prioEmpty >= 0 ) victim = prioEmpty; /*Highest prio is winner*/
|
||||
|
||||
assert(victim >= 0); // Must succeed/
|
||||
dprintf("AllocationCacheDeviceMem: Selected victim cache entry %d\n",victim);
|
||||
dprintf("MemoryManagerDeviceMem: Selected victim cache entry %d\n",victim);
|
||||
|
||||
// advance victim pointer
|
||||
AccCacheVictim=(AccCacheVictim+1)%NaccCache;
|
||||
dprintf("AllocationCacheDeviceMem: victim pointer now %d / %d\n",AccCacheVictim,NaccCache);
|
||||
dprintf("MemoryManagerDeviceMem: victim pointer now %d / %d\n",AccCacheVictim,NaccCache);
|
||||
|
||||
return victim;
|
||||
}
|
||||
@ -106,15 +107,15 @@ int AllocationCache::ViewVictim(void)
|
||||
// Accelerator cache motion
|
||||
/////////////////////////////////////////////////
|
||||
|
||||
void AllocationCache::Discard(int e) // remove from Accelerator, remove entry, without flush
|
||||
void MemoryManager::Discard(int e) // remove from Accelerator, remove entry, without flush
|
||||
{
|
||||
if(AccCache[e].state!=Empty){
|
||||
dprintf("AllocationCache: Discard(%d) %llx,%llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr);
|
||||
dprintf("MemoryManager: Discard(%d) %llx,%llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr);
|
||||
assert(AccCache[e].accLock==0);
|
||||
assert(AccCache[e].cpuLock==0);
|
||||
assert(AccCache[e].CpuPtr!=NULL);
|
||||
if(AccCache[e].AccPtr) {
|
||||
dprintf("AllocationCache: Free(%d) %llx\n",e,(uint64_t)AccCache[e].AccPtr);
|
||||
dprintf("MemoryManager: Free(%d) %llx\n",e,(uint64_t)AccCache[e].AccPtr);
|
||||
AcceleratorFree(AccCache[e].AccPtr,AccCache[e].bytes);
|
||||
}
|
||||
}
|
||||
@ -126,10 +127,10 @@ void AllocationCache::Discard(int e) // remove from Accelerator, remove entry, w
|
||||
AccCache[e].cpuLock=0;
|
||||
}
|
||||
|
||||
void AllocationCache::Evict(int e) // Make CPU consistent, remove from Accelerator, remove entry
|
||||
void MemoryManager::Evict(int e) // Make CPU consistent, remove from Accelerator, remove entry
|
||||
{
|
||||
if(AccCache[e].state!=Empty){
|
||||
dprintf("AllocationCache: Evict(%d) %llx,%llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr);
|
||||
dprintf("MemoryManager: Evict(%d) %llx,%llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr);
|
||||
assert(AccCache[e].accLock==0);
|
||||
assert(AccCache[e].cpuLock==0);
|
||||
if(AccCache[e].state==AccDirty) {
|
||||
@ -137,7 +138,7 @@ void AllocationCache::Evict(int e) // Make CPU consistent, remove from Accelerat
|
||||
}
|
||||
assert(AccCache[e].CpuPtr!=NULL);
|
||||
if(AccCache[e].AccPtr) {
|
||||
dprintf("AllocationCache: Free(%d) %llx\n",e,(uint64_t)AccCache[e].AccPtr);
|
||||
dprintf("MemoryManager: Free(%d) %llx\n",e,(uint64_t)AccCache[e].AccPtr);
|
||||
AcceleratorFree(AccCache[e].AccPtr,AccCache[e].bytes);
|
||||
}
|
||||
}
|
||||
@ -148,9 +149,9 @@ void AllocationCache::Evict(int e) // Make CPU consistent, remove from Accelerat
|
||||
AccCache[e].accLock=0;
|
||||
AccCache[e].cpuLock=0;
|
||||
}
|
||||
void AllocationCache::Flush(int e)// Copy back from a dirty device state and mark consistent. Do not remove
|
||||
void MemoryManager::Flush(int e)// Copy back from a dirty device state and mark consistent. Do not remove
|
||||
{
|
||||
// printf("AllocationCache: Flush(%d) %llx -> %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
// printf("MemoryManager: Flush(%d) %llx -> %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
assert(AccCache[e].state==AccDirty);
|
||||
assert(AccCache[e].cpuLock==0);
|
||||
assert(AccCache[e].accLock==0);
|
||||
@ -159,7 +160,7 @@ void AllocationCache::Flush(int e)// Copy back from a dirty device state and mar
|
||||
acceleratorCopyFromDevice(AccCache[e].AccPtr,AccCache[e].CpuPtr,AccCache[e].bytes);
|
||||
AccCache[e].state=Consistent;
|
||||
}
|
||||
void AllocationCache::Clone(int e)// Copy from CPU, mark consistent. Allocate if necessary
|
||||
void MemoryManager::Clone(int e)// Copy from CPU, mark consistent. Allocate if necessary
|
||||
{
|
||||
assert(AccCache[e].state==CpuDirty);
|
||||
assert(AccCache[e].cpuLock==0);
|
||||
@ -168,12 +169,12 @@ void AllocationCache::Clone(int e)// Copy from CPU, mark consistent. Allocate if
|
||||
if(AccCache[e].AccPtr==NULL){
|
||||
AccCache[e].AccPtr=AcceleratorAllocate(AccCache[e].bytes);
|
||||
}
|
||||
// printf("AllocationCache: Clone(%d) %llx <- %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
// printf("MemoryManager: Clone(%d) %llx <- %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
acceleratorCopyToDevice(AccCache[e].CpuPtr,AccCache[e].AccPtr,AccCache[e].bytes);
|
||||
AccCache[e].state=Consistent;
|
||||
}
|
||||
|
||||
void AllocationCache::CpuDiscard(int e)// Mark accelerator dirty without copy. Allocate if necessary
|
||||
void MemoryManager::CpuDiscard(int e)// Mark accelerator dirty without copy. Allocate if necessary
|
||||
{
|
||||
assert(AccCache[e].state!=Empty);
|
||||
assert(AccCache[e].cpuLock==0);
|
||||
@ -182,7 +183,7 @@ void AllocationCache::CpuDiscard(int e)// Mark accelerator dirty without copy. A
|
||||
if(AccCache[e].AccPtr==NULL){
|
||||
AccCache[e].AccPtr=AcceleratorAllocate(AccCache[e].bytes);
|
||||
}
|
||||
// printf("AllocationCache: CpuDiscard(%d) %llx <- %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
// printf("MemoryManager: CpuDiscard(%d) %llx <- %llx\n",e,(uint64_t)AccCache[e].AccPtr,(uint64_t)AccCache[e].CpuPtr); fflush(stdout);
|
||||
// acceleratorCopyToDevice(AccCache[e].CpuPtr,AccCache[e].AccPtr,AccCache[e].bytes);
|
||||
AccCache[e].state=AccDirty;
|
||||
}
|
||||
@ -190,7 +191,7 @@ void AllocationCache::CpuDiscard(int e)// Mark accelerator dirty without copy. A
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// View management
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
void AllocationCache::ViewClose(void* Ptr,ViewMode mode)
|
||||
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
||||
{
|
||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||
AcceleratorViewClose(Ptr);
|
||||
@ -200,7 +201,7 @@ void AllocationCache::ViewClose(void* Ptr,ViewMode mode)
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
void *AllocationCache::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
|
||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
|
||||
{
|
||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||
return AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
||||
@ -211,7 +212,7 @@ void *AllocationCache::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvi
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
void *AllocationCache::AcceleratorViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
|
||||
void *MemoryManager::AcceleratorViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Find if present, otherwise get or force an empty
|
||||
@ -312,7 +313,7 @@ void *AllocationCache::AcceleratorViewOpen(void* CpuPtr,size_t bytes,ViewMode mo
|
||||
////////////////////////////////////
|
||||
// look up & decrement lock count
|
||||
////////////////////////////////////
|
||||
void AllocationCache::AcceleratorViewClose(void* AccPtr)
|
||||
void MemoryManager::AcceleratorViewClose(void* AccPtr)
|
||||
{
|
||||
int e=CpuViewLookup(AccPtr);
|
||||
// printf("AccView close %d lock %d \n",e,AccCache[e].accLock);
|
||||
@ -326,7 +327,7 @@ void AllocationCache::AcceleratorViewClose(void* AccPtr)
|
||||
*/
|
||||
AccCache[e].accLock--;
|
||||
}
|
||||
void AllocationCache::CpuViewClose(void* CpuPtr)
|
||||
void MemoryManager::CpuViewClose(void* CpuPtr)
|
||||
{
|
||||
int e=CpuViewLookup(CpuPtr);
|
||||
assert(e!=-1);
|
||||
@ -334,7 +335,7 @@ void AllocationCache::CpuViewClose(void* CpuPtr)
|
||||
assert(AccCache[e].accLock==0);
|
||||
AccCache[e].cpuLock--;
|
||||
}
|
||||
void *AllocationCache::CpuViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise transient)
|
||||
void *MemoryManager::CpuViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise transient)
|
||||
{
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// Find if present, otherwise get or force an empty
|
||||
@ -390,7 +391,7 @@ void *AllocationCache::CpuViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewA
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
//loop round robin over entries checking acc pointer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
int AllocationCache::CpuViewLookup(void *CpuPtr)
|
||||
int MemoryManager::CpuViewLookup(void *CpuPtr)
|
||||
{
|
||||
assert(CpuPtr!=NULL);
|
||||
for(int e=0;e<NaccCache;e++){
|
||||
@ -405,3 +406,4 @@ int AllocationCache::CpuViewLookup(void *CpuPtr)
|
||||
NAMESPACE_END(Grid);
|
||||
|
||||
#endif
|
||||
#endif
|
16
Grid/allocator/MemoryManagerShared.cc
Normal file
16
Grid/allocator/MemoryManagerShared.cc
Normal file
@ -0,0 +1,16 @@
|
||||
#include <Grid/GridCore.h>
|
||||
#ifdef GRID_UVM
|
||||
|
||||
#warning "Grid is assuming unified virtual memory address space"
|
||||
NAMESPACE_BEGIN(Grid);
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
// View management is 1:1 address space mapping
|
||||
/////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void MemoryManager::ViewClose(void* AccPtr,ViewMode mode){};
|
||||
void *MemoryManager::ViewOpen(void* CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint){ return CpuPtr; };
|
||||
void MemoryManager::Print(void){};
|
||||
void MemoryManager::NotifyDeletion(void *ptr){};
|
||||
|
||||
NAMESPACE_END(Grid);
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user