mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-04 19:25:56 +01:00
Merged
This commit is contained in:
commit
9ffd1ed4ce
@ -74,7 +74,7 @@ public:
|
|||||||
|
|
||||||
void operator() (const Field &src, Field &psi){
|
void operator() (const Field &src, Field &psi){
|
||||||
|
|
||||||
psi=Zero();
|
// psi=Zero();
|
||||||
RealD cp, ssq,rsq;
|
RealD cp, ssq,rsq;
|
||||||
ssq=norm2(src);
|
ssq=norm2(src);
|
||||||
rsq=Tolerance*Tolerance*ssq;
|
rsq=Tolerance*Tolerance*ssq;
|
||||||
|
@ -30,6 +30,8 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
/* END LEGAL */
|
/* END LEGAL */
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <Grid/algorithms/iterative/PrecGeneralisedConjugateResidualNonHermitian.h>
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
inline RealD AggregatePowerLaw(RealD x)
|
inline RealD AggregatePowerLaw(RealD x)
|
||||||
@ -124,6 +126,53 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void CreateSubspaceGCR(GridParallelRNG &RNG,LinearOperatorBase<FineField> &DiracOp,int nn=nbasis)
|
||||||
|
{
|
||||||
|
RealD scale;
|
||||||
|
|
||||||
|
TrivialPrecon<FineField> simple_fine;
|
||||||
|
PrecGeneralisedConjugateResidualNonHermitian<FineField> GCR(0.001,30,DiracOp,simple_fine,12,12);
|
||||||
|
FineField noise(FineGrid);
|
||||||
|
FineField src(FineGrid);
|
||||||
|
FineField guess(FineGrid);
|
||||||
|
FineField Mn(FineGrid);
|
||||||
|
|
||||||
|
for(int b=0;b<nn;b++){
|
||||||
|
|
||||||
|
subspace[b] = Zero();
|
||||||
|
gaussian(RNG,noise);
|
||||||
|
scale = std::pow(norm2(noise),-0.5);
|
||||||
|
noise=noise*scale;
|
||||||
|
|
||||||
|
DiracOp.Op(noise,Mn); std::cout<<GridLogMessage << "noise ["<<b<<"] <n|Op|n> "<<innerProduct(noise,Mn)<<std::endl;
|
||||||
|
|
||||||
|
for(int i=0;i<3;i++){
|
||||||
|
// void operator() (const Field &src, Field &psi){
|
||||||
|
#if 1
|
||||||
|
std::cout << GridLogMessage << " inverting on noise "<<std::endl;
|
||||||
|
src = noise;
|
||||||
|
guess=Zero();
|
||||||
|
GCR(src,guess);
|
||||||
|
subspace[b] = guess;
|
||||||
|
#else
|
||||||
|
std::cout << GridLogMessage << " inverting on zero "<<std::endl;
|
||||||
|
src=Zero();
|
||||||
|
guess = noise;
|
||||||
|
GCR(src,guess);
|
||||||
|
subspace[b] = guess;
|
||||||
|
#endif
|
||||||
|
noise = subspace[b];
|
||||||
|
scale = std::pow(norm2(noise),-0.5);
|
||||||
|
noise=noise*scale;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
DiracOp.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|Op|f> "<<innerProduct(noise,Mn)<<std::endl;
|
||||||
|
subspace[b] = noise;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit)
|
// World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit)
|
||||||
// and this is the best I found
|
// and this is the best I found
|
||||||
@ -160,14 +209,21 @@ public:
|
|||||||
|
|
||||||
int b =0;
|
int b =0;
|
||||||
{
|
{
|
||||||
|
ComplexD ip;
|
||||||
// Filter
|
// Filter
|
||||||
Chebyshev<FineField> Cheb(lo,hi,orderfilter);
|
Chebyshev<FineField> Cheb(lo,hi,orderfilter);
|
||||||
Cheb(hermop,noise,Mn);
|
Cheb(hermop,noise,Mn);
|
||||||
// normalise
|
// normalise
|
||||||
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
|
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
|
||||||
subspace[b] = Mn;
|
subspace[b] = Mn;
|
||||||
hermop.Op(Mn,tmp);
|
|
||||||
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
|
hermop.Op(Mn,tmp);
|
||||||
|
ip= innerProduct(Mn,tmp);
|
||||||
|
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|Op|n> "<<norm2(tmp)<<" "<<ip<<std::endl;
|
||||||
|
|
||||||
|
hermop.AdjOp(Mn,tmp);
|
||||||
|
ip = innerProduct(Mn,tmp);
|
||||||
|
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|AdjOp|n> "<<norm2(tmp)<<" "<<ip<<std::endl;
|
||||||
b++;
|
b++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -213,8 +269,18 @@ public:
|
|||||||
Mn=*Tnp;
|
Mn=*Tnp;
|
||||||
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
|
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
|
||||||
subspace[b] = Mn;
|
subspace[b] = Mn;
|
||||||
hermop.Op(Mn,tmp);
|
|
||||||
std::cout<<GridLogMessage << n<<" filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
|
|
||||||
|
ComplexD ip;
|
||||||
|
|
||||||
|
hermop.Op(Mn,tmp);
|
||||||
|
ip= innerProduct(Mn,tmp);
|
||||||
|
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|Op|n> "<<norm2(tmp)<<" "<<ip<<std::endl;
|
||||||
|
|
||||||
|
hermop.AdjOp(Mn,tmp);
|
||||||
|
ip = innerProduct(Mn,tmp);
|
||||||
|
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|AdjOp|n> "<<norm2(tmp)<<" "<<ip<<std::endl;
|
||||||
|
|
||||||
b++;
|
b++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,10 +175,11 @@ template<typename _Tp> inline bool operator!=(const devAllocator<_Tp>&, const d
|
|||||||
// Template typedefs
|
// Template typedefs
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
template<class T> using hostVector = std::vector<T,alignedAllocator<T> >; // Needs autoview
|
template<class T> using hostVector = std::vector<T,alignedAllocator<T> >; // Needs autoview
|
||||||
template<class T> using Vector = std::vector<T,uvmAllocator<T> >; //
|
template<class T> using Vector = std::vector<T,uvmAllocator<T> >; // Really want to deprecate
|
||||||
template<class T> using uvmVector = std::vector<T,uvmAllocator<T> >; // auto migrating page
|
template<class T> using uvmVector = std::vector<T,uvmAllocator<T> >; // auto migrating page
|
||||||
template<class T> using deviceVector = std::vector<T,devAllocator<T> >; // device vector
|
template<class T> using deviceVector = std::vector<T,devAllocator<T> >; // device vector
|
||||||
|
|
||||||
|
/*
|
||||||
template<class T> class vecView
|
template<class T> class vecView
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
@ -214,6 +215,7 @@ template<class T> vecView<T> VectorView(Vector<T> &vec,ViewMode _mode)
|
|||||||
#define autoVecView(v_v,v,mode) \
|
#define autoVecView(v_v,v,mode) \
|
||||||
auto v_v = VectorView(v,mode); \
|
auto v_v = VectorView(v,mode); \
|
||||||
ViewCloser<decltype(v_v)> _autoView##v_v(v_v);
|
ViewCloser<decltype(v_v)> _autoView##v_v(v_v);
|
||||||
|
*/
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ static char print_buffer [ MAXLINE ];
|
|||||||
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer << std::endl;
|
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer << std::endl;
|
||||||
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogDebug << print_buffer << std::endl;
|
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogDebug << print_buffer << std::endl;
|
||||||
//#define dprintf(...)
|
//#define dprintf(...)
|
||||||
|
//#define mprintf(...)
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
// For caching copies of data on device
|
// For caching copies of data on device
|
||||||
@ -109,7 +110,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
dprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
dprintf("MemoryManager: Discard(%lx) %lx",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
|
||||||
assert(AccCache.accLock==0);
|
assert(AccCache.accLock==0);
|
||||||
assert(AccCache.cpuLock==0);
|
assert(AccCache.cpuLock==0);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
@ -119,7 +120,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
AccCache.AccPtr=(uint64_t) NULL;
|
AccCache.AccPtr=(uint64_t) NULL;
|
||||||
dprintf("MemoryManager: Free(%lx) LRU %ld Total %ld\n",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
dprintf("MemoryManager: Free(%lx) LRU %ld Total %ld",(uint64_t)AccCache.AccPtr,DeviceLRUBytes,DeviceBytes);
|
||||||
}
|
}
|
||||||
uint64_t CpuPtr = AccCache.CpuPtr;
|
uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
EntryErase(CpuPtr);
|
EntryErase(CpuPtr);
|
||||||
@ -139,7 +140,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
|||||||
///////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////
|
||||||
assert(AccCache.state!=Empty);
|
assert(AccCache.state!=Empty);
|
||||||
|
|
||||||
mprintf("MemoryManager: Evict CpuPtr %lx AccPtr %lx cpuLock %ld accLock %ld\n",
|
mprintf("MemoryManager: Evict CpuPtr %lx AccPtr %lx cpuLock %ld accLock %ld",
|
||||||
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
|
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
|
||||||
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
|
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
|
||||||
if (AccCache.accLock!=0) return;
|
if (AccCache.accLock!=0) return;
|
||||||
@ -153,7 +154,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.AccPtr=(uint64_t)NULL;
|
AccCache.AccPtr=(uint64_t)NULL;
|
||||||
AccCache.state=CpuDirty; // CPU primary now
|
AccCache.state=CpuDirty; // CPU primary now
|
||||||
DeviceBytes -=AccCache.bytes;
|
DeviceBytes -=AccCache.bytes;
|
||||||
dprintf("MemoryManager: Free(AccPtr %lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
dprintf("MemoryManager: Free(AccPtr %lx) footprint now %ld ",(uint64_t)AccCache.AccPtr,DeviceBytes);
|
||||||
}
|
}
|
||||||
// uint64_t CpuPtr = AccCache.CpuPtr;
|
// uint64_t CpuPtr = AccCache.CpuPtr;
|
||||||
DeviceEvictions++;
|
DeviceEvictions++;
|
||||||
@ -167,7 +168,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
|
|||||||
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
assert(AccCache.AccPtr!=(uint64_t)NULL);
|
||||||
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
assert(AccCache.CpuPtr!=(uint64_t)NULL);
|
||||||
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
|
||||||
mprintf("MemoryManager: acceleratorCopyFromDevice Flush size %ld AccPtr %lx -> CpuPtr %lx\n",(uint64_t)AccCache.bytes,(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
mprintf("MemoryManager: acceleratorCopyFromDevice Flush size %ld AccPtr %lx -> CpuPtr %lx",(uint64_t)AccCache.bytes,(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
DeviceToHostBytes+=AccCache.bytes;
|
DeviceToHostBytes+=AccCache.bytes;
|
||||||
DeviceToHostXfer++;
|
DeviceToHostXfer++;
|
||||||
AccCache.state=Consistent;
|
AccCache.state=Consistent;
|
||||||
@ -182,7 +183,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
|
|||||||
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
|
||||||
DeviceBytes+=AccCache.bytes;
|
DeviceBytes+=AccCache.bytes;
|
||||||
}
|
}
|
||||||
mprintf("MemoryManager: acceleratorCopyToDevice Clone size %ld AccPtr %lx <- CpuPtr %lx\n",
|
mprintf("MemoryManager: acceleratorCopyToDevice Clone size %ld AccPtr %lx <- CpuPtr %lx",
|
||||||
(uint64_t)AccCache.bytes,
|
(uint64_t)AccCache.bytes,
|
||||||
(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
|
||||||
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
|
||||||
@ -210,7 +211,7 @@ void MemoryManager::CpuDiscard(AcceleratorViewEntry &AccCache)
|
|||||||
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
void MemoryManager::ViewClose(void* Ptr,ViewMode mode)
|
||||||
{
|
{
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
dprintf("AcceleratorViewClose %lx\n",(uint64_t)Ptr);
|
dprintf("AcceleratorViewClose %lx",(uint64_t)Ptr);
|
||||||
AcceleratorViewClose((uint64_t)Ptr);
|
AcceleratorViewClose((uint64_t)Ptr);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
CpuViewClose((uint64_t)Ptr);
|
CpuViewClose((uint64_t)Ptr);
|
||||||
@ -222,7 +223,7 @@ void *MemoryManager::ViewOpen(void* _CpuPtr,size_t bytes,ViewMode mode,ViewAdvis
|
|||||||
{
|
{
|
||||||
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
uint64_t CpuPtr = (uint64_t)_CpuPtr;
|
||||||
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
if( (mode==AcceleratorRead)||(mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard) ){
|
||||||
dprintf("AcceleratorViewOpen %lx\n",(uint64_t)CpuPtr);
|
dprintf("AcceleratorViewOpen %lx",(uint64_t)CpuPtr);
|
||||||
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *) AcceleratorViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
} else if( (mode==CpuRead)||(mode==CpuWrite)){
|
||||||
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
return (void *)CpuViewOpen(CpuPtr,bytes,mode,hint);
|
||||||
@ -265,7 +266,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
assert(AccCache.cpuLock==0); // Programming error
|
assert(AccCache.cpuLock==0); // Programming error
|
||||||
|
|
||||||
if(AccCache.state!=Empty) {
|
if(AccCache.state!=Empty) {
|
||||||
dprintf("ViewOpen found entry %lx %lx : sizes %ld %ld accLock %ld\n",
|
dprintf("ViewOpen found entry %lx %lx : sizes %ld %ld accLock %ld",
|
||||||
(uint64_t)AccCache.CpuPtr,
|
(uint64_t)AccCache.CpuPtr,
|
||||||
(uint64_t)CpuPtr,
|
(uint64_t)CpuPtr,
|
||||||
(uint64_t)AccCache.bytes,
|
(uint64_t)AccCache.bytes,
|
||||||
@ -305,7 +306,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
AccCache.state = Consistent; // Empty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock= 1;
|
AccCache.accLock= 1;
|
||||||
dprintf("Copied Empty entry into device accLock= %d\n",AccCache.accLock);
|
dprintf("Copied Empty entry into device accLock= %d",AccCache.accLock);
|
||||||
} else if(AccCache.state==CpuDirty ){
|
} else if(AccCache.state==CpuDirty ){
|
||||||
if(mode==AcceleratorWriteDiscard) {
|
if(mode==AcceleratorWriteDiscard) {
|
||||||
CpuDiscard(AccCache);
|
CpuDiscard(AccCache);
|
||||||
@ -318,21 +319,21 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
AccCache.state = Consistent; // CpuDirty + AccRead => Consistent
|
||||||
}
|
}
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("CpuDirty entry into device ++accLock= %d\n",AccCache.accLock);
|
dprintf("CpuDirty entry into device ++accLock= %d",AccCache.accLock);
|
||||||
} else if(AccCache.state==Consistent) {
|
} else if(AccCache.state==Consistent) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // Consistent + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
AccCache.state = Consistent; // Consistent + AccRead => Consistent
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("Consistent entry into device ++accLock= %d\n",AccCache.accLock);
|
dprintf("Consistent entry into device ++accLock= %d",AccCache.accLock);
|
||||||
} else if(AccCache.state==AccDirty) {
|
} else if(AccCache.state==AccDirty) {
|
||||||
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
if((mode==AcceleratorWrite)||(mode==AcceleratorWriteDiscard))
|
||||||
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
AccCache.state = AccDirty; // AccDirty + AcceleratorWrite=> AccDirty
|
||||||
else
|
else
|
||||||
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
AccCache.state = AccDirty; // AccDirty + AccRead => AccDirty
|
||||||
AccCache.accLock++;
|
AccCache.accLock++;
|
||||||
dprintf("AccDirty entry ++accLock= %d\n",AccCache.accLock);
|
dprintf("AccDirty entry ++accLock= %d",AccCache.accLock);
|
||||||
} else {
|
} else {
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
@ -341,7 +342,7 @@ uint64_t MemoryManager::AcceleratorViewOpen(uint64_t CpuPtr,size_t bytes,ViewMod
|
|||||||
// If view is opened on device must remove from LRU
|
// If view is opened on device must remove from LRU
|
||||||
if(AccCache.LRU_valid==1){
|
if(AccCache.LRU_valid==1){
|
||||||
// must possibly remove from LRU as now locked on GPU
|
// must possibly remove from LRU as now locked on GPU
|
||||||
dprintf("AccCache entry removed from LRU \n");
|
dprintf("AccCache entry removed from LRU ");
|
||||||
LRUremove(AccCache);
|
LRUremove(AccCache);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,10 +365,10 @@ void MemoryManager::AcceleratorViewClose(uint64_t CpuPtr)
|
|||||||
AccCache.accLock--;
|
AccCache.accLock--;
|
||||||
// Move to LRU queue if not locked and close on device
|
// Move to LRU queue if not locked and close on device
|
||||||
if(AccCache.accLock==0) {
|
if(AccCache.accLock==0) {
|
||||||
dprintf("AccleratorViewClose %lx AccLock decremented to %ld move to LRU queue\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
dprintf("AccleratorViewClose %lx AccLock decremented to %ld move to LRU queue",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
||||||
LRUinsert(AccCache);
|
LRUinsert(AccCache);
|
||||||
} else {
|
} else {
|
||||||
dprintf("AccleratorViewClose %lx AccLock decremented to %ld\n",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
dprintf("AccleratorViewClose %lx AccLock decremented to %ld",(uint64_t)CpuPtr,(uint64_t)AccCache.accLock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
void MemoryManager::CpuViewClose(uint64_t CpuPtr)
|
||||||
|
@ -33,6 +33,8 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
///////////////////////////////////
|
///////////////////////////////////
|
||||||
#include <Grid/communicator/SharedMemory.h>
|
#include <Grid/communicator/SharedMemory.h>
|
||||||
|
|
||||||
|
#define NVLINK_GET
|
||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
extern bool Stencil_force_mpi ;
|
extern bool Stencil_force_mpi ;
|
||||||
@ -193,6 +195,11 @@ public:
|
|||||||
void *recv,
|
void *recv,
|
||||||
int recv_from_rank,int do_recv,
|
int recv_from_rank,int do_recv,
|
||||||
int xbytes,int rbytes,int dir);
|
int xbytes,int rbytes,int dir);
|
||||||
|
|
||||||
|
// Could do a PollHtoD and have a CommsMerge dependence
|
||||||
|
void StencilSendToRecvFromPollDtoH (std::vector<CommsRequest_t> &list);
|
||||||
|
void StencilSendToRecvFromPollIRecv(std::vector<CommsRequest_t> &list);
|
||||||
|
|
||||||
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,int do_xmit,
|
int xmit_to_rank,int do_xmit,
|
||||||
|
@ -30,6 +30,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|||||||
|
|
||||||
NAMESPACE_BEGIN(Grid);
|
NAMESPACE_BEGIN(Grid);
|
||||||
|
|
||||||
|
|
||||||
Grid_MPI_Comm CartesianCommunicator::communicator_world;
|
Grid_MPI_Comm CartesianCommunicator::communicator_world;
|
||||||
|
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
@ -362,8 +363,6 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
std::vector<MpiCommsRequest_t> reqs(0);
|
std::vector<MpiCommsRequest_t> reqs(0);
|
||||||
unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
|
||||||
unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
|
||||||
|
|
||||||
int myrank = _processor;
|
int myrank = _processor;
|
||||||
int ierr;
|
int ierr;
|
||||||
@ -379,9 +378,6 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
communicator,MPI_STATUS_IGNORE);
|
communicator,MPI_STATUS_IGNORE);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
|
|
||||||
// xcrc = crc32(xcrc,(unsigned char *)xmit,bytes);
|
|
||||||
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
|
||||||
// printf("proc %d SendToRecvFrom %d bytes xcrc %lx rcrc %lx\n",_processor,bytes,xcrc,rcrc); fflush
|
|
||||||
}
|
}
|
||||||
// Basic Halo comms primitive
|
// Basic Halo comms primitive
|
||||||
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
||||||
@ -399,6 +395,8 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
|||||||
|
|
||||||
|
|
||||||
#ifdef ACCELERATOR_AWARE_MPI
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollIRecv(std::vector<CommsRequest_t> &list) {};
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollDtoH(std::vector<CommsRequest_t> &list) {};
|
||||||
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,int dox,
|
int dest,int dox,
|
||||||
@ -561,53 +559,105 @@ double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequ
|
|||||||
|
|
||||||
if (dox) {
|
if (dox) {
|
||||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
#undef DEVICE_TO_HOST_CONCURRENT // pipeline
|
|
||||||
#ifdef DEVICE_TO_HOST_CONCURRENT
|
|
||||||
tag= dir+_processor*32;
|
tag= dir+_processor*32;
|
||||||
|
|
||||||
host_xmit = this->HostBufferMalloc(xbytes);
|
host_xmit = this->HostBufferMalloc(xbytes);
|
||||||
acceleratorCopyFromDeviceAsynch(xmit, host_xmit,xbytes); // Make this Asynch
|
CommsRequest_t srq;
|
||||||
|
|
||||||
|
srq.ev = acceleratorCopyFromDeviceAsynch(xmit, host_xmit,xbytes); // Make this Asynch
|
||||||
|
|
||||||
// ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
// ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
// assert(ierr==0);
|
// assert(ierr==0);
|
||||||
// off_node_bytes+=xbytes;
|
// off_node_bytes+=xbytes;
|
||||||
|
|
||||||
CommsRequest_t srq;
|
|
||||||
srq.PacketType = InterNodeXmit;
|
srq.PacketType = InterNodeXmit;
|
||||||
srq.bytes = xbytes;
|
srq.bytes = xbytes;
|
||||||
// srq.req = xrq;
|
// srq.req = xrq;
|
||||||
srq.host_buf = host_xmit;
|
srq.host_buf = host_xmit;
|
||||||
srq.device_buf = xmit;
|
srq.device_buf = xmit;
|
||||||
|
srq.tag = tag;
|
||||||
|
srq.dest = dest;
|
||||||
|
srq.commdir = commdir;
|
||||||
list.push_back(srq);
|
list.push_back(srq);
|
||||||
#else
|
|
||||||
tag= dir+_processor*32;
|
|
||||||
|
|
||||||
host_xmit = this->HostBufferMalloc(xbytes);
|
|
||||||
const int chunks=1;
|
|
||||||
for(int n=0;n<chunks;n++){
|
|
||||||
void * host_xmitc = (void *)( (uint64_t) host_xmit + n*xbytes/chunks);
|
|
||||||
void * xmitc = (void *)( (uint64_t) xmit + n*xbytes/chunks);
|
|
||||||
acceleratorCopyFromDeviceAsynch(xmitc, host_xmitc,xbytes/chunks); // Make this Asynch
|
|
||||||
}
|
|
||||||
acceleratorCopySynchronise(); // Complete all pending copy transfers
|
|
||||||
|
|
||||||
ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
|
||||||
assert(ierr==0);
|
|
||||||
off_node_bytes+=xbytes;
|
|
||||||
|
|
||||||
CommsRequest_t srq;
|
|
||||||
srq.PacketType = InterNodeXmit;
|
|
||||||
srq.bytes = xbytes;
|
|
||||||
srq.req = xrq;
|
|
||||||
srq.host_buf = host_xmit;
|
|
||||||
srq.device_buf = xmit;
|
|
||||||
list.push_back(srq);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return off_node_bytes;
|
return off_node_bytes;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* In the interest of better pipelining, poll for completion on each DtoH and
|
||||||
|
* start MPI_ISend in the meantime
|
||||||
|
*/
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollIRecv(std::vector<CommsRequest_t> &list)
|
||||||
|
{
|
||||||
|
int pending = 0;
|
||||||
|
do {
|
||||||
|
|
||||||
|
pending = 0;
|
||||||
|
|
||||||
|
for(int idx = 0; idx<list.size();idx++){
|
||||||
|
|
||||||
|
if ( list[idx].PacketType==InterNodeRecv ) {
|
||||||
|
|
||||||
|
int flag = 0;
|
||||||
|
MPI_Status status;
|
||||||
|
int ierr = MPI_Test(&list[idx].req,&flag,&status);
|
||||||
|
assert(ierr==0);
|
||||||
|
|
||||||
|
if ( flag ) {
|
||||||
|
// std::cout << " PollIrecv "<<idx<<" flag "<<flag<<std::endl;
|
||||||
|
acceleratorCopyToDeviceAsynch(list[idx].host_buf,list[idx].device_buf,list[idx].bytes);
|
||||||
|
list[idx].PacketType=InterNodeReceiveHtoD;
|
||||||
|
} else {
|
||||||
|
pending ++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// std::cout << " PollIrecv "<<pending<<" pending requests"<<std::endl;
|
||||||
|
} while ( pending );
|
||||||
|
|
||||||
|
}
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollDtoH(std::vector<CommsRequest_t> &list)
|
||||||
|
{
|
||||||
|
int pending = 0;
|
||||||
|
do {
|
||||||
|
|
||||||
|
pending = 0;
|
||||||
|
|
||||||
|
for(int idx = 0; idx<list.size();idx++){
|
||||||
|
|
||||||
|
if ( list[idx].PacketType==InterNodeXmit ) {
|
||||||
|
|
||||||
|
if ( acceleratorEventIsComplete(list[idx].ev) ) {
|
||||||
|
|
||||||
|
void *host_xmit = list[idx].host_buf;
|
||||||
|
uint32_t xbytes = list[idx].bytes;
|
||||||
|
int dest = list[idx].dest;
|
||||||
|
int tag = list[idx].tag;
|
||||||
|
int commdir = list[idx].commdir;
|
||||||
|
///////////////////
|
||||||
|
// Send packet
|
||||||
|
///////////////////
|
||||||
|
|
||||||
|
// std::cout << " DtoH is complete for index "<<idx<<" calling MPI_Isend "<<std::endl;
|
||||||
|
|
||||||
|
MPI_Request xrq;
|
||||||
|
int ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
|
assert(ierr==0);
|
||||||
|
|
||||||
|
list[idx].req = xrq; // Update the MPI request in the list
|
||||||
|
|
||||||
|
list[idx].PacketType=InterNodeXmitISend;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// not done, so return to polling loop
|
||||||
|
pending++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} while (pending);
|
||||||
|
}
|
||||||
|
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
@ -644,69 +694,92 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
* - complete all copies
|
* - complete all copies
|
||||||
* - post MPI send asynch
|
* - post MPI send asynch
|
||||||
*/
|
*/
|
||||||
|
#ifdef NVLINK_GET
|
||||||
|
if ( dor ) {
|
||||||
|
|
||||||
// static int printed;
|
if ( ! ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) ) {
|
||||||
// if((printed<8) && this->IsBoss() ) {
|
// Intranode
|
||||||
// printf("dir %d doX %d doR %d Face size %ld %ld\n",dir,dox,dor,xbytes,rbytes);
|
void *shm = (void *) this->ShmBufferTranslate(from,xmit);
|
||||||
// printed++;
|
assert(shm!=NULL);
|
||||||
// }
|
|
||||||
|
CommsRequest_t srq;
|
||||||
|
|
||||||
|
srq.ev = acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes);
|
||||||
|
|
||||||
|
srq.PacketType = IntraNodeRecv;
|
||||||
|
srq.bytes = xbytes;
|
||||||
|
// srq.req = xrq;
|
||||||
|
srq.host_buf = NULL;
|
||||||
|
srq.device_buf = xmit;
|
||||||
|
srq.tag = -1;
|
||||||
|
srq.dest = dest;
|
||||||
|
srq.commdir = dir;
|
||||||
|
list.push_back(srq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
if (dox) {
|
if (dox) {
|
||||||
|
|
||||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( !( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) ) {
|
||||||
#ifdef DEVICE_TO_HOST_CONCURRENT
|
// Intranode
|
||||||
tag= dir+_processor*32;
|
|
||||||
// Find the send in the prepared list
|
|
||||||
int list_idx=-1;
|
|
||||||
for(int idx = 0; idx<list.size();idx++){
|
|
||||||
|
|
||||||
if ( (list[idx].device_buf==xmit)
|
|
||||||
&&(list[idx].PacketType==InterNodeXmit)
|
|
||||||
&&(list[idx].bytes==xbytes) ) {
|
|
||||||
|
|
||||||
list_idx = idx;
|
|
||||||
host_xmit = list[idx].host_buf;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(list_idx != -1); // found it
|
|
||||||
ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
|
||||||
assert(ierr==0);
|
|
||||||
list[list_idx].req = xrq; // Update the MPI request in the list
|
|
||||||
off_node_bytes+=xbytes;
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
|
||||||
assert(shm!=NULL);
|
assert(shm!=NULL);
|
||||||
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes);
|
|
||||||
|
CommsRequest_t srq;
|
||||||
|
|
||||||
|
srq.ev = acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes);
|
||||||
|
|
||||||
|
srq.PacketType = IntraNodeXmit;
|
||||||
|
srq.bytes = xbytes;
|
||||||
|
// srq.req = xrq;
|
||||||
|
srq.host_buf = NULL;
|
||||||
|
srq.device_buf = xmit;
|
||||||
|
srq.tag = -1;
|
||||||
|
srq.dest = dest;
|
||||||
|
srq.commdir = dir;
|
||||||
|
list.push_back(srq);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
return off_node_bytes;
|
return off_node_bytes;
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsRequest_t> &list,int dir)
|
||||||
{
|
{
|
||||||
int nreq=list.size();
|
acceleratorCopySynchronise(); // Complete all pending copy transfers D2D
|
||||||
|
|
||||||
if (nreq==0) return;
|
std::vector<MPI_Status> status;
|
||||||
std::vector<MPI_Status> status(nreq);
|
std::vector<MPI_Request> MpiRequests;
|
||||||
std::vector<MPI_Request> MpiRequests(nreq);
|
|
||||||
|
for(int r=0;r<list.size();r++){
|
||||||
|
// Must check each Send buf is clear to reuse
|
||||||
|
if ( list[r].PacketType == InterNodeXmitISend ) MpiRequests.push_back(list[r].req);
|
||||||
|
// if ( list[r].PacketType == InterNodeRecv ) MpiRequests.push_back(list[r].req); // Already "Test" passed
|
||||||
|
}
|
||||||
|
|
||||||
for(int r=0;r<nreq;r++){
|
int nreq=MpiRequests.size();
|
||||||
MpiRequests[r] = list[r].req;
|
|
||||||
|
std::cout << GridLogMessage << " StencilSendToRevFromComplete "<<nreq<<" Mpi Requests"<<std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
if (nreq>0) {
|
||||||
|
status.resize(MpiRequests.size());
|
||||||
|
int ierr = MPI_Waitall(MpiRequests.size(),&MpiRequests[0],&status[0]); // Sends are guaranteed in order. No harm in not completing.
|
||||||
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ierr = MPI_Waitall(nreq,&MpiRequests[0],&status[0]);
|
// for(int r=0;r<nreq;r++){
|
||||||
assert(ierr==0);
|
// if ( list[r].PacketType==InterNodeRecv ) {
|
||||||
|
// acceleratorCopyToDeviceAsynch(list[r].host_buf,list[r].device_buf,list[r].bytes);
|
||||||
for(int r=0;r<nreq;r++){
|
// }
|
||||||
if ( list[r].PacketType==InterNodeRecv ) {
|
// }
|
||||||
acceleratorCopyToDeviceAsynch(list[r].host_buf,list[r].device_buf,list[r].bytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
acceleratorCopySynchronise(); // Complete all pending copy transfers
|
|
||||||
list.resize(0); // Delete the list
|
list.resize(0); // Delete the list
|
||||||
this->HostBufferFreeAll(); // Clean up the buffer allocs
|
this->HostBufferFreeAll(); // Clean up the buffer allocs
|
||||||
this->StencilBarrier();
|
#ifndef NVLINK_GET
|
||||||
|
this->StencilBarrier(); // if PUT must check our nbrs have filled our receive buffers.
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
|
@ -91,7 +91,7 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
{
|
{
|
||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list){ assert(0);}
|
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list){ assert(list.size()==0);}
|
||||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
@ -132,6 +132,8 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
|
|||||||
{
|
{
|
||||||
return 2.0*bytes;
|
return 2.0*bytes;
|
||||||
}
|
}
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollIRecv(std::vector<CommsRequest_t> &list) {};
|
||||||
|
void CartesianCommunicator::StencilSendToRecvFromPollDtoH(std::vector<CommsRequest_t> &list) {};
|
||||||
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int xmit_to_rank,int dox,
|
int xmit_to_rank,int dox,
|
||||||
@ -139,7 +141,7 @@ double CartesianCommunicator::StencilSendToRecvFromPrepare(std::vector<CommsRequ
|
|||||||
int recv_from_rank,int dor,
|
int recv_from_rank,int dor,
|
||||||
int xbytes,int rbytes, int dir)
|
int xbytes,int rbytes, int dir)
|
||||||
{
|
{
|
||||||
return xbytes+rbytes;
|
return 0.0;
|
||||||
}
|
}
|
||||||
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
|
@ -50,12 +50,30 @@ typedef MPI_Request MpiCommsRequest_t;
|
|||||||
#ifdef ACCELERATOR_AWARE_MPI
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
typedef MPI_Request CommsRequest_t;
|
typedef MPI_Request CommsRequest_t;
|
||||||
#else
|
#else
|
||||||
enum PacketType_t { InterNodeXmit, InterNodeRecv, IntraNodeXmit, IntraNodeRecv };
|
/*
|
||||||
|
* Enable state transitions as each packet flows.
|
||||||
|
*/
|
||||||
|
enum PacketType_t {
|
||||||
|
FaceGather,
|
||||||
|
InterNodeXmit,
|
||||||
|
InterNodeRecv,
|
||||||
|
IntraNodeXmit,
|
||||||
|
IntraNodeRecv,
|
||||||
|
InterNodeXmitISend,
|
||||||
|
InterNodeReceiveHtoD
|
||||||
|
};
|
||||||
|
/*
|
||||||
|
*Package arguments needed for various actions along packet flow
|
||||||
|
*/
|
||||||
typedef struct {
|
typedef struct {
|
||||||
PacketType_t PacketType;
|
PacketType_t PacketType;
|
||||||
void *host_buf;
|
void *host_buf;
|
||||||
void *device_buf;
|
void *device_buf;
|
||||||
|
int dest;
|
||||||
|
int tag;
|
||||||
|
int commdir;
|
||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
|
acceleratorEvent_t ev;
|
||||||
MpiCommsRequest_t req;
|
MpiCommsRequest_t req;
|
||||||
} CommsRequest_t;
|
} CommsRequest_t;
|
||||||
#endif
|
#endif
|
||||||
|
@ -68,7 +68,7 @@ template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension
|
|||||||
if(Cshift_verbose) std::cout << GridLogPerformance << "Cshift took "<< (t1-t0)/1e3 << " ms"<<std::endl;
|
if(Cshift_verbose) std::cout << GridLogPerformance << "Cshift took "<< (t1-t0)/1e3 << " ms"<<std::endl;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
#if 1
|
|
||||||
template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &rhs,int dimension,int shift)
|
template<class vobj> void Cshift_comms(Lattice<vobj>& ret,const Lattice<vobj> &rhs,int dimension,int shift)
|
||||||
{
|
{
|
||||||
int sshift[2];
|
int sshift[2];
|
||||||
@ -125,7 +125,11 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
||||||
static deviceVector<vobj> send_buf; send_buf.resize(buffer_size);
|
static deviceVector<vobj> send_buf; send_buf.resize(buffer_size);
|
||||||
static deviceVector<vobj> recv_buf; recv_buf.resize(buffer_size);
|
static deviceVector<vobj> recv_buf; recv_buf.resize(buffer_size);
|
||||||
|
#ifndef ACCELERATOR_AWARE_MPI
|
||||||
|
static hostVector<vobj> hsend_buf; hsend_buf.resize(buffer_size);
|
||||||
|
static hostVector<vobj> hrecv_buf; hrecv_buf.resize(buffer_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
int cb= (cbmask==0x2)? Odd : Even;
|
int cb= (cbmask==0x2)? Odd : Even;
|
||||||
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
||||||
RealD tcopy=0.0;
|
RealD tcopy=0.0;
|
||||||
@ -156,16 +160,29 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
|
|||||||
// int rank = grid->_processor;
|
// int rank = grid->_processor;
|
||||||
int recv_from_rank;
|
int recv_from_rank;
|
||||||
int xmit_to_rank;
|
int xmit_to_rank;
|
||||||
|
|
||||||
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
||||||
|
|
||||||
tcomms-=usecond();
|
tcomms-=usecond();
|
||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
|
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
grid->SendToRecvFrom((void *)&send_buf[0],
|
grid->SendToRecvFrom((void *)&send_buf[0],
|
||||||
xmit_to_rank,
|
xmit_to_rank,
|
||||||
(void *)&recv_buf[0],
|
(void *)&recv_buf[0],
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
#else
|
||||||
|
// bouncy bouncy
|
||||||
|
acceleratorCopyFromDevice(&send_buf[0],&hsend_buf[0],bytes);
|
||||||
|
grid->SendToRecvFrom((void *)&hsend_buf[0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&hrecv_buf[0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
acceleratorCopyToDevice(&hrecv_buf[0],&recv_buf[0],bytes);
|
||||||
|
#endif
|
||||||
|
|
||||||
xbytes+=bytes;
|
xbytes+=bytes;
|
||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
tcomms+=usecond();
|
tcomms+=usecond();
|
||||||
@ -226,12 +243,17 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
static std::vector<deviceVector<scalar_object> > recv_buf_extract; recv_buf_extract.resize(Nsimd);
|
static std::vector<deviceVector<scalar_object> > recv_buf_extract; recv_buf_extract.resize(Nsimd);
|
||||||
scalar_object * recv_buf_extract_mpi;
|
scalar_object * recv_buf_extract_mpi;
|
||||||
scalar_object * send_buf_extract_mpi;
|
scalar_object * send_buf_extract_mpi;
|
||||||
|
|
||||||
|
|
||||||
for(int s=0;s<Nsimd;s++){
|
for(int s=0;s<Nsimd;s++){
|
||||||
send_buf_extract[s].resize(buffer_size);
|
send_buf_extract[s].resize(buffer_size);
|
||||||
recv_buf_extract[s].resize(buffer_size);
|
recv_buf_extract[s].resize(buffer_size);
|
||||||
}
|
}
|
||||||
|
#ifndef ACCELERATOR_AWARE_MPI
|
||||||
|
hostVector<scalar_object> hsend_buf; hsend_buf.resize(buffer_size);
|
||||||
|
hostVector<scalar_object> hrecv_buf; hrecv_buf.resize(buffer_size);
|
||||||
|
#endif
|
||||||
|
|
||||||
int bytes = buffer_size*sizeof(scalar_object);
|
int bytes = buffer_size*sizeof(scalar_object);
|
||||||
|
|
||||||
ExtractPointerArray<scalar_object> pointers(Nsimd); //
|
ExtractPointerArray<scalar_object> pointers(Nsimd); //
|
||||||
@ -283,11 +305,22 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
|
|
||||||
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
|
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
|
||||||
recv_buf_extract_mpi = &recv_buf_extract[i][0];
|
recv_buf_extract_mpi = &recv_buf_extract[i][0];
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
||||||
xmit_to_rank,
|
xmit_to_rank,
|
||||||
(void *)recv_buf_extract_mpi,
|
(void *)recv_buf_extract_mpi,
|
||||||
recv_from_rank,
|
recv_from_rank,
|
||||||
bytes);
|
bytes);
|
||||||
|
#else
|
||||||
|
// bouncy bouncy
|
||||||
|
acceleratorCopyFromDevice((void *)send_buf_extract_mpi,(void *)&hsend_buf[0],bytes);
|
||||||
|
grid->SendToRecvFrom((void *)&hsend_buf[0],
|
||||||
|
xmit_to_rank,
|
||||||
|
(void *)&hrecv_buf[0],
|
||||||
|
recv_from_rank,
|
||||||
|
bytes);
|
||||||
|
acceleratorCopyToDevice((void *)&hrecv_buf[0],(void *)recv_buf_extract_mpi,bytes);
|
||||||
|
#endif
|
||||||
|
|
||||||
xbytes+=bytes;
|
xbytes+=bytes;
|
||||||
grid->Barrier();
|
grid->Barrier();
|
||||||
@ -311,234 +344,6 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
|
|||||||
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
|
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
|
||||||
{
|
|
||||||
typedef typename vobj::vector_type vector_type;
|
|
||||||
typedef typename vobj::scalar_type scalar_type;
|
|
||||||
|
|
||||||
GridBase *grid=rhs.Grid();
|
|
||||||
Lattice<vobj> temp(rhs.Grid());
|
|
||||||
|
|
||||||
int fd = rhs.Grid()->_fdimensions[dimension];
|
|
||||||
int rd = rhs.Grid()->_rdimensions[dimension];
|
|
||||||
int pd = rhs.Grid()->_processors[dimension];
|
|
||||||
int simd_layout = rhs.Grid()->_simd_layout[dimension];
|
|
||||||
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
|
|
||||||
assert(simd_layout==1);
|
|
||||||
assert(comm_dim==1);
|
|
||||||
assert(shift>=0);
|
|
||||||
assert(shift<fd);
|
|
||||||
RealD tcopy=0.0;
|
|
||||||
RealD tgather=0.0;
|
|
||||||
RealD tscatter=0.0;
|
|
||||||
RealD tcomms=0.0;
|
|
||||||
uint64_t xbytes=0;
|
|
||||||
|
|
||||||
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
|
|
||||||
static cshiftVector<vobj> send_buf_v; send_buf_v.resize(buffer_size);
|
|
||||||
static cshiftVector<vobj> recv_buf_v; recv_buf_v.resize(buffer_size);
|
|
||||||
vobj *send_buf;
|
|
||||||
vobj *recv_buf;
|
|
||||||
{
|
|
||||||
grid->ShmBufferFreeAll();
|
|
||||||
size_t bytes = buffer_size*sizeof(vobj);
|
|
||||||
send_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
|
||||||
recv_buf=(vobj *)grid->ShmBufferMalloc(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
int cb= (cbmask==0x2)? Odd : Even;
|
|
||||||
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
|
||||||
|
|
||||||
for(int x=0;x<rd;x++){
|
|
||||||
|
|
||||||
int sx = (x+sshift)%rd;
|
|
||||||
int comm_proc = ((x+sshift)/rd)%pd;
|
|
||||||
|
|
||||||
if (comm_proc==0) {
|
|
||||||
|
|
||||||
tcopy-=usecond();
|
|
||||||
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
|
|
||||||
tcopy+=usecond();
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
int words = buffer_size;
|
|
||||||
if (cbmask != 0x3) words=words>>1;
|
|
||||||
|
|
||||||
int bytes = words * sizeof(vobj);
|
|
||||||
|
|
||||||
tgather-=usecond();
|
|
||||||
Gather_plane_simple (rhs,send_buf_v,dimension,sx,cbmask);
|
|
||||||
tgather+=usecond();
|
|
||||||
|
|
||||||
// int rank = grid->_processor;
|
|
||||||
int recv_from_rank;
|
|
||||||
int xmit_to_rank;
|
|
||||||
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
|
||||||
|
|
||||||
|
|
||||||
tcomms-=usecond();
|
|
||||||
// grid->Barrier();
|
|
||||||
|
|
||||||
acceleratorCopyDeviceToDevice((void *)&send_buf_v[0],(void *)&send_buf[0],bytes);
|
|
||||||
grid->SendToRecvFrom((void *)&send_buf[0],
|
|
||||||
xmit_to_rank,
|
|
||||||
(void *)&recv_buf[0],
|
|
||||||
recv_from_rank,
|
|
||||||
bytes);
|
|
||||||
xbytes+=bytes;
|
|
||||||
acceleratorCopyDeviceToDevice((void *)&recv_buf[0],(void *)&recv_buf_v[0],bytes);
|
|
||||||
|
|
||||||
// grid->Barrier();
|
|
||||||
tcomms+=usecond();
|
|
||||||
|
|
||||||
tscatter-=usecond();
|
|
||||||
Scatter_plane_simple (ret,recv_buf_v,dimension,x,cbmask);
|
|
||||||
tscatter+=usecond();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if(Cshift_verbose){
|
|
||||||
std::cout << GridLogPerformance << " Cshift copy "<<tcopy/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift gather "<<tgather/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift scatter "<<tscatter/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift comm "<<tcomms/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
|
|
||||||
{
|
|
||||||
GridBase *grid=rhs.Grid();
|
|
||||||
const int Nsimd = grid->Nsimd();
|
|
||||||
typedef typename vobj::vector_type vector_type;
|
|
||||||
typedef typename vobj::scalar_object scalar_object;
|
|
||||||
typedef typename vobj::scalar_type scalar_type;
|
|
||||||
|
|
||||||
int fd = grid->_fdimensions[dimension];
|
|
||||||
int rd = grid->_rdimensions[dimension];
|
|
||||||
int ld = grid->_ldimensions[dimension];
|
|
||||||
int pd = grid->_processors[dimension];
|
|
||||||
int simd_layout = grid->_simd_layout[dimension];
|
|
||||||
int comm_dim = grid->_processors[dimension] >1 ;
|
|
||||||
|
|
||||||
//std::cout << "Cshift_comms_simd dim "<< dimension << " fd "<<fd<<" rd "<<rd
|
|
||||||
// << " ld "<<ld<<" pd " << pd<<" simd_layout "<<simd_layout
|
|
||||||
// << " comm_dim " << comm_dim << " cbmask " << cbmask <<std::endl;
|
|
||||||
|
|
||||||
assert(comm_dim==1);
|
|
||||||
assert(simd_layout==2);
|
|
||||||
assert(shift>=0);
|
|
||||||
assert(shift<fd);
|
|
||||||
RealD tcopy=0.0;
|
|
||||||
RealD tgather=0.0;
|
|
||||||
RealD tscatter=0.0;
|
|
||||||
RealD tcomms=0.0;
|
|
||||||
uint64_t xbytes=0;
|
|
||||||
|
|
||||||
int permute_type=grid->PermuteType(dimension);
|
|
||||||
|
|
||||||
///////////////////////////////////////////////
|
|
||||||
// Simd direction uses an extract/merge pair
|
|
||||||
///////////////////////////////////////////////
|
|
||||||
int buffer_size = grid->_slice_nblock[dimension]*grid->_slice_block[dimension];
|
|
||||||
// int words = sizeof(vobj)/sizeof(vector_type);
|
|
||||||
|
|
||||||
static std::vector<cshiftVector<scalar_object> > send_buf_extract; send_buf_extract.resize(Nsimd);
|
|
||||||
static std::vector<cshiftVector<scalar_object> > recv_buf_extract; recv_buf_extract.resize(Nsimd);
|
|
||||||
scalar_object * recv_buf_extract_mpi;
|
|
||||||
scalar_object * send_buf_extract_mpi;
|
|
||||||
{
|
|
||||||
size_t bytes = sizeof(scalar_object)*buffer_size;
|
|
||||||
grid->ShmBufferFreeAll();
|
|
||||||
send_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
|
||||||
recv_buf_extract_mpi = (scalar_object *)grid->ShmBufferMalloc(bytes);
|
|
||||||
}
|
|
||||||
for(int s=0;s<Nsimd;s++){
|
|
||||||
send_buf_extract[s].resize(buffer_size);
|
|
||||||
recv_buf_extract[s].resize(buffer_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
int bytes = buffer_size*sizeof(scalar_object);
|
|
||||||
|
|
||||||
ExtractPointerArray<scalar_object> pointers(Nsimd); //
|
|
||||||
ExtractPointerArray<scalar_object> rpointers(Nsimd); // received pointers
|
|
||||||
|
|
||||||
///////////////////////////////////////////
|
|
||||||
// Work out what to send where
|
|
||||||
///////////////////////////////////////////
|
|
||||||
int cb = (cbmask==0x2)? Odd : Even;
|
|
||||||
int sshift= grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
|
||||||
|
|
||||||
// loop over outer coord planes orthog to dim
|
|
||||||
for(int x=0;x<rd;x++){
|
|
||||||
|
|
||||||
// FIXME call local permute copy if none are offnode.
|
|
||||||
for(int i=0;i<Nsimd;i++){
|
|
||||||
pointers[i] = &send_buf_extract[i][0];
|
|
||||||
}
|
|
||||||
tgather-=usecond();
|
|
||||||
int sx = (x+sshift)%rd;
|
|
||||||
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
|
|
||||||
tgather+=usecond();
|
|
||||||
|
|
||||||
for(int i=0;i<Nsimd;i++){
|
|
||||||
|
|
||||||
int inner_bit = (Nsimd>>(permute_type+1));
|
|
||||||
int ic= (i&inner_bit)? 1:0;
|
|
||||||
|
|
||||||
int my_coor = rd*ic + x;
|
|
||||||
int nbr_coor = my_coor+sshift;
|
|
||||||
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
|
||||||
|
|
||||||
int nbr_ic = (nbr_coor%ld)/rd; // inner coord of peer
|
|
||||||
int nbr_ox = (nbr_coor%rd); // outer coord of peer
|
|
||||||
int nbr_lane = (i&(~inner_bit));
|
|
||||||
|
|
||||||
int recv_from_rank;
|
|
||||||
int xmit_to_rank;
|
|
||||||
|
|
||||||
if (nbr_ic) nbr_lane|=inner_bit;
|
|
||||||
|
|
||||||
assert (sx == nbr_ox);
|
|
||||||
|
|
||||||
if(nbr_proc){
|
|
||||||
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
|
||||||
|
|
||||||
tcomms-=usecond();
|
|
||||||
// grid->Barrier();
|
|
||||||
|
|
||||||
acceleratorCopyDeviceToDevice((void *)&send_buf_extract[nbr_lane][0],(void *)send_buf_extract_mpi,bytes);
|
|
||||||
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
|
|
||||||
xmit_to_rank,
|
|
||||||
(void *)recv_buf_extract_mpi,
|
|
||||||
recv_from_rank,
|
|
||||||
bytes);
|
|
||||||
acceleratorCopyDeviceToDevice((void *)recv_buf_extract_mpi,(void *)&recv_buf_extract[i][0],bytes);
|
|
||||||
xbytes+=bytes;
|
|
||||||
|
|
||||||
// grid->Barrier();
|
|
||||||
tcomms+=usecond();
|
|
||||||
rpointers[i] = &recv_buf_extract[i][0];
|
|
||||||
} else {
|
|
||||||
rpointers[i] = &send_buf_extract[nbr_lane][0];
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
tscatter-=usecond();
|
|
||||||
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
|
|
||||||
tscatter+=usecond();
|
|
||||||
|
|
||||||
}
|
|
||||||
if(Cshift_verbose){
|
|
||||||
std::cout << GridLogPerformance << " Cshift (s) copy "<<tcopy/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift (s) gather "<<tgather/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift (s) scatter "<<tscatter/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift (s) comm "<<tcomms/1e3<<" ms"<<std::endl;
|
|
||||||
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s"<<std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
@ -466,6 +466,12 @@ public:
|
|||||||
static deviceVector<vobj> recv_buf;
|
static deviceVector<vobj> recv_buf;
|
||||||
send_buf.resize(buffer_size*2*depth);
|
send_buf.resize(buffer_size*2*depth);
|
||||||
recv_buf.resize(buffer_size*2*depth);
|
recv_buf.resize(buffer_size*2*depth);
|
||||||
|
#ifndef ACCELERATOR_AWARE_MPI
|
||||||
|
static hostVector<vobj> hsend_buf;
|
||||||
|
static hostVector<vobj> hrecv_buf;
|
||||||
|
hsend_buf.resize(buffer_size*2*depth);
|
||||||
|
hrecv_buf.resize(buffer_size*2*depth);
|
||||||
|
#endif
|
||||||
|
|
||||||
std::vector<MpiCommsRequest_t> fwd_req;
|
std::vector<MpiCommsRequest_t> fwd_req;
|
||||||
std::vector<MpiCommsRequest_t> bwd_req;
|
std::vector<MpiCommsRequest_t> bwd_req;
|
||||||
@ -495,9 +501,17 @@ public:
|
|||||||
t_gather+=usecond()-t;
|
t_gather+=usecond()-t;
|
||||||
|
|
||||||
t=usecond();
|
t=usecond();
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
grid->SendToRecvFromBegin(fwd_req,
|
grid->SendToRecvFromBegin(fwd_req,
|
||||||
(void *)&send_buf[d*buffer_size], xmit_to_rank,
|
(void *)&send_buf[d*buffer_size], xmit_to_rank,
|
||||||
(void *)&recv_buf[d*buffer_size], recv_from_rank, bytes, tag);
|
(void *)&recv_buf[d*buffer_size], recv_from_rank, bytes, tag);
|
||||||
|
#else
|
||||||
|
acceleratorCopyFromDevice(&send_buf[d*buffer_size],&hsend_buf[d*buffer_size],bytes);
|
||||||
|
grid->SendToRecvFromBegin(fwd_req,
|
||||||
|
(void *)&hsend_buf[d*buffer_size], xmit_to_rank,
|
||||||
|
(void *)&hrecv_buf[d*buffer_size], recv_from_rank, bytes, tag);
|
||||||
|
acceleratorCopyToDevice(&hrecv_buf[d*buffer_size],&recv_buf[d*buffer_size],bytes);
|
||||||
|
#endif
|
||||||
t_comms+=usecond()-t;
|
t_comms+=usecond()-t;
|
||||||
}
|
}
|
||||||
for ( int d=0;d < depth ; d ++ ) {
|
for ( int d=0;d < depth ; d ++ ) {
|
||||||
@ -508,9 +522,17 @@ public:
|
|||||||
t_gather+= usecond() - t;
|
t_gather+= usecond() - t;
|
||||||
|
|
||||||
t=usecond();
|
t=usecond();
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
grid->SendToRecvFromBegin(bwd_req,
|
grid->SendToRecvFromBegin(bwd_req,
|
||||||
(void *)&send_buf[(d+depth)*buffer_size], recv_from_rank,
|
(void *)&send_buf[(d+depth)*buffer_size], recv_from_rank,
|
||||||
(void *)&recv_buf[(d+depth)*buffer_size], xmit_to_rank, bytes,tag);
|
(void *)&recv_buf[(d+depth)*buffer_size], xmit_to_rank, bytes,tag);
|
||||||
|
#else
|
||||||
|
acceleratorCopyFromDevice(&send_buf[(d+depth)*buffer_size],&hsend_buf[(d+depth)*buffer_size],bytes);
|
||||||
|
grid->SendToRecvFromBegin(bwd_req,
|
||||||
|
(void *)&hsend_buf[(d+depth)*buffer_size], recv_from_rank,
|
||||||
|
(void *)&hrecv_buf[(d+depth)*buffer_size], xmit_to_rank, bytes,tag);
|
||||||
|
acceleratorCopyToDevice(&hrecv_buf[(d+depth)*buffer_size],&recv_buf[(d+depth)*buffer_size],bytes);
|
||||||
|
#endif
|
||||||
t_comms+=usecond()-t;
|
t_comms+=usecond()-t;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,6 +484,12 @@ public:
|
|||||||
this->face_table_computed=1;
|
this->face_table_computed=1;
|
||||||
assert(this->u_comm_offset==this->_unified_buffer_size);
|
assert(this->u_comm_offset==this->_unified_buffer_size);
|
||||||
accelerator_barrier();
|
accelerator_barrier();
|
||||||
|
#ifdef NVLINK_GET
|
||||||
|
#warning "NVLINK_GET"
|
||||||
|
this->_grid->StencilBarrier(); // He can now get mu local gather, I can get his
|
||||||
|
// Synch shared memory on a single nodes; could use an asynchronous barrier here and defer check
|
||||||
|
// Or issue barrier AFTER the DMA is running
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -504,7 +504,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
autoView(st_v , st,AcceleratorRead);
|
autoView(st_v , st,AcceleratorRead);
|
||||||
|
|
||||||
if( interior && exterior ) {
|
if( interior && exterior ) {
|
||||||
acceleratorFenceComputeStream();
|
// acceleratorFenceComputeStream();
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSite); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSite); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
|
||||||
#ifndef GRID_CUDA
|
#ifndef GRID_CUDA
|
||||||
@ -517,7 +517,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
|
|||||||
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
|
if (Opt == WilsonKernelsStatic::OptInlineAsm ) { ASM_CALL(AsmDhopSiteInt); return;}
|
||||||
#endif
|
#endif
|
||||||
} else if( exterior ) {
|
} else if( exterior ) {
|
||||||
// dependent on result of merge
|
// // dependent on result of merge
|
||||||
acceleratorFenceComputeStream();
|
acceleratorFenceComputeStream();
|
||||||
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteExt); return;}
|
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL_EXT(GenericDhopSiteExt); return;}
|
||||||
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteExt); return;}
|
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL_EXT(HandDhopSiteExt); return;}
|
||||||
|
@ -363,12 +363,16 @@ public:
|
|||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
{
|
{
|
||||||
|
// std::cout << "Communicate Begin "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
FlightRecorder::StepLog("Communicate begin");
|
FlightRecorder::StepLog("Communicate begin");
|
||||||
// All GPU kernel tasks must complete
|
// All GPU kernel tasks must complete
|
||||||
// accelerator_barrier(); // All kernels should ALREADY be complete
|
// accelerator_barrier(); // All kernels should ALREADY be complete
|
||||||
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
|
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
|
||||||
// But the HaloGather had a barrier too.
|
// But the HaloGather had a barrier too.
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
|
// std::cout << "Communicate prepare "<<i<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
_grid->StencilSendToRecvFromPrepare(MpiReqs,
|
_grid->StencilSendToRecvFromPrepare(MpiReqs,
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
Packets[i].to_rank,Packets[i].do_send,
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
@ -376,8 +380,15 @@ public:
|
|||||||
Packets[i].from_rank,Packets[i].do_recv,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].xbytes,Packets[i].rbytes,i);
|
Packets[i].xbytes,Packets[i].rbytes,i);
|
||||||
}
|
}
|
||||||
|
// std::cout << "Communicate PollDtoH "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
|
_grid->StencilSendToRecvFromPollDtoH (MpiReqs); /* Starts MPI*/
|
||||||
|
// std::cout << "Communicate CopySynch "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
acceleratorCopySynchronise();
|
acceleratorCopySynchronise();
|
||||||
|
// Starts intranode
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
|
// std::cout << "Communicate Begin "<<i<<std::endl;
|
||||||
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
Packets[i].to_rank,Packets[i].do_send,
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
@ -395,7 +406,14 @@ public:
|
|||||||
|
|
||||||
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
||||||
{
|
{
|
||||||
|
// std::cout << "Communicate Complete "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
FlightRecorder::StepLog("Start communicate complete");
|
FlightRecorder::StepLog("Start communicate complete");
|
||||||
|
// std::cout << "Communicate Complete PollIRecv "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
|
_grid->StencilSendToRecvFromPollIRecv(MpiReqs);
|
||||||
|
// std::cout << "Communicate Complete Complete "<<std::endl;
|
||||||
|
// _grid->Barrier();
|
||||||
_grid->StencilSendToRecvFromComplete(MpiReqs,0); // MPI is done
|
_grid->StencilSendToRecvFromComplete(MpiReqs,0); // MPI is done
|
||||||
if ( this->partialDirichlet ) DslashLogPartial();
|
if ( this->partialDirichlet ) DslashLogPartial();
|
||||||
else if ( this->fullDirichlet ) DslashLogDirichlet();
|
else if ( this->fullDirichlet ) DslashLogDirichlet();
|
||||||
@ -483,6 +501,9 @@ public:
|
|||||||
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
||||||
{
|
{
|
||||||
// accelerator_barrier();
|
// accelerator_barrier();
|
||||||
|
//////////////////////////////////
|
||||||
|
// I will overwrite my send buffers
|
||||||
|
//////////////////////////////////
|
||||||
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
||||||
|
|
||||||
assert(source.Grid()==_grid);
|
assert(source.Grid()==_grid);
|
||||||
@ -496,7 +517,12 @@ public:
|
|||||||
HaloGatherDir(source,compress,point,face_idx);
|
HaloGatherDir(source,compress,point,face_idx);
|
||||||
}
|
}
|
||||||
accelerator_barrier(); // All my local gathers are complete
|
accelerator_barrier(); // All my local gathers are complete
|
||||||
// _grid->StencilBarrier();// Synch shared memory on a single nodes
|
#ifdef NVLINK_GET
|
||||||
|
#warning "NVLINK_GET"
|
||||||
|
_grid->StencilBarrier(); // He can now get mu local gather, I can get his
|
||||||
|
// Synch shared memory on a single nodes; could use an asynchronous barrier here and defer check
|
||||||
|
// Or issue barrier AFTER the DMA is running
|
||||||
|
#endif
|
||||||
face_table_computed=1;
|
face_table_computed=1;
|
||||||
assert(u_comm_offset==_unified_buffer_size);
|
assert(u_comm_offset==_unified_buffer_size);
|
||||||
}
|
}
|
||||||
@ -535,6 +561,7 @@ public:
|
|||||||
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
coalescedWrite(to[j] ,coalescedRead(from [j]));
|
||||||
});
|
});
|
||||||
acceleratorFenceComputeStream();
|
acceleratorFenceComputeStream();
|
||||||
|
// Also fenced in WilsonKernels
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -663,7 +690,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
|
|
||||||
surface_list.resize(surface_list_size);
|
surface_list.resize(surface_list_size);
|
||||||
std::vector<int> surface_list_host(surface_list_size);
|
std::vector<int> surface_list_host(surface_list_size);
|
||||||
int32_t ss=0;
|
int32_t ss=0;
|
||||||
@ -683,6 +709,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
acceleratorCopyToDevice(&surface_list_host[0],&surface_list[0],surface_list_size*sizeof(int));
|
acceleratorCopyToDevice(&surface_list_host[0],&surface_list[0],surface_list_size*sizeof(int));
|
||||||
|
std::cout << GridLogMessage<<"BuildSurfaceList size is "<<surface_list_size<<std::endl;
|
||||||
}
|
}
|
||||||
/// Introduce a block structure and switch off comms on boundaries
|
/// Introduce a block structure and switch off comms on boundaries
|
||||||
void DirichletBlock(const Coordinate &dirichlet_block)
|
void DirichletBlock(const Coordinate &dirichlet_block)
|
||||||
|
@ -245,12 +245,12 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
|||||||
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
inline void acceleratorFreeShared(void *ptr){ cudaFree(ptr);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
inline void acceleratorFreeDevice(void *ptr){ cudaFree(ptr);};
|
||||||
inline void acceleratorFreeHost(void *ptr){ cudaFree(ptr);};
|
inline void acceleratorFreeHost(void *ptr){ cudaFree(ptr);};
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
inline void acceleratorCopyToDevice(const void *from,void *to,size_t bytes) { cudaMemcpy(to,from,bytes, cudaMemcpyHostToDevice);}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
inline void acceleratorCopyFromDevice(const void *from,void *to,size_t bytes){ cudaMemcpy(to,from,bytes, cudaMemcpyDeviceToHost);}
|
||||||
inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyHostToDevice, stream);}
|
inline void acceleratorCopyToDeviceAsync(const void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyHostToDevice, stream);}
|
||||||
inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToHost, stream);}
|
inline void acceleratorCopyFromDeviceAsync(const void *from, void *to, size_t bytes, cudaStream_t stream = copyStream) { cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToHost, stream);}
|
||||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { cudaMemset(base,value,bytes);}
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
inline void acceleratorCopyDeviceToDeviceAsynch(const void *from,void *to,size_t bytes) // Asynch
|
||||||
{
|
{
|
||||||
cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToDevice,copyStream);
|
cudaMemcpyAsync(to,from,bytes, cudaMemcpyDeviceToDevice,copyStream);
|
||||||
}
|
}
|
||||||
@ -343,11 +343,28 @@ inline void acceleratorFreeDevice(void *ptr){free(ptr,*theGridAccelerator);};
|
|||||||
|
|
||||||
inline void acceleratorCopySynchronise(void) { theCopyAccelerator->wait(); }
|
inline void acceleratorCopySynchronise(void) { theCopyAccelerator->wait(); }
|
||||||
|
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes);}
|
|
||||||
inline void acceleratorCopyToDeviceAsynch(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); }
|
///////
|
||||||
inline void acceleratorCopyFromDeviceAsynch(void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); }
|
// Asynch event interface
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
///////
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
typedef sycl::event acceleratorEvent_t;
|
||||||
|
|
||||||
|
inline void acceleratorEventWait(acceleratorEvent_t ev)
|
||||||
|
{
|
||||||
|
ev.wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int acceleratorEventIsComplete(acceleratorEvent_t ev)
|
||||||
|
{
|
||||||
|
return (ev.get_info<sycl::info::event::command_execution_status>() == sycl::info::event_command_status::complete);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline acceleratorEvent_t acceleratorCopyDeviceToDeviceAsynch(const void *from,void *to,size_t bytes) { return theCopyAccelerator->memcpy(to,from,bytes);}
|
||||||
|
inline acceleratorEvent_t acceleratorCopyToDeviceAsynch(const void *from,void *to,size_t bytes) { return theCopyAccelerator->memcpy(to,from,bytes); }
|
||||||
|
inline acceleratorEvent_t acceleratorCopyFromDeviceAsynch(const void *from,void *to,size_t bytes) { return theCopyAccelerator->memcpy(to,from,bytes); }
|
||||||
|
|
||||||
|
inline void acceleratorCopyToDevice(const void *from,void *to,size_t bytes) { theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
||||||
|
inline void acceleratorCopyFromDevice(const void *from,void *to,size_t bytes){ theCopyAccelerator->memcpy(to,from,bytes); theCopyAccelerator->wait();}
|
||||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theCopyAccelerator->memset(base,value,bytes); theCopyAccelerator->wait();}
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { theCopyAccelerator->memset(base,value,bytes); theCopyAccelerator->wait();}
|
||||||
|
|
||||||
inline int acceleratorIsCommunicable(void *ptr)
|
inline int acceleratorIsCommunicable(void *ptr)
|
||||||
@ -358,8 +375,10 @@ inline int acceleratorIsCommunicable(void *ptr)
|
|||||||
else return 0;
|
else return 0;
|
||||||
#endif
|
#endif
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//////////////////////////////////////////////
|
//////////////////////////////////////////////
|
||||||
@ -492,19 +511,19 @@ inline void *acceleratorAllocDevice(size_t bytes)
|
|||||||
inline void acceleratorFreeHost(void *ptr){ auto discard=hipFree(ptr);};
|
inline void acceleratorFreeHost(void *ptr){ auto discard=hipFree(ptr);};
|
||||||
inline void acceleratorFreeShared(void *ptr){ auto discard=hipFree(ptr);};
|
inline void acceleratorFreeShared(void *ptr){ auto discard=hipFree(ptr);};
|
||||||
inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);};
|
inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);};
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
inline void acceleratorCopyToDevice(const void *from,void *to,size_t bytes) { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
inline void acceleratorCopyFromDevice(const void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
|
||||||
|
|
||||||
inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);}
|
inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);}
|
||||||
|
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
|
inline void acceleratorCopyDeviceToDeviceAsynch(const void *from,void *to,size_t bytes) // Asynch
|
||||||
{
|
{
|
||||||
auto discard=hipMemcpyDtoDAsync(to,from,bytes, copyStream);
|
auto discard=hipMemcpyDtoDAsync(to,from,bytes, copyStream);
|
||||||
}
|
}
|
||||||
inline void acceleratorCopyToDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) {
|
inline void acceleratorCopyToDeviceAsync(const void *from, void *to, size_t bytes, hipStream_t stream = copyStream) {
|
||||||
auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyHostToDevice, stream);
|
auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyHostToDevice, stream);
|
||||||
}
|
}
|
||||||
inline void acceleratorCopyFromDeviceAsync(void *from, void *to, size_t bytes, hipStream_t stream = copyStream) {
|
inline void acceleratorCopyFromDeviceAsync(const void *from, void *to, size_t bytes, hipStream_t stream = copyStream) {
|
||||||
auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToHost, stream);
|
auto r = hipMemcpyAsync(to,from,bytes, hipMemcpyDeviceToHost, stream);
|
||||||
}
|
}
|
||||||
inline void acceleratorCopySynchronise(void) { auto discard=hipStreamSynchronize(copyStream); };
|
inline void acceleratorCopySynchronise(void) { auto discard=hipStreamSynchronize(copyStream); };
|
||||||
@ -564,9 +583,9 @@ inline void acceleratorMem(void)
|
|||||||
|
|
||||||
accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA specific
|
accelerator_inline int acceleratorSIMTlane(int Nsimd) { return 0; } // CUDA specific
|
||||||
|
|
||||||
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes); }
|
inline void acceleratorCopyToDevice(const void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes); }
|
||||||
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ thread_bcopy(from,to,bytes);}
|
inline void acceleratorCopyFromDevice(const void *from,void *to,size_t bytes){ thread_bcopy(from,to,bytes);}
|
||||||
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes);}
|
inline void acceleratorCopyDeviceToDeviceAsynch(const void *from,void *to,size_t bytes) { thread_bcopy(from,to,bytes);}
|
||||||
inline void acceleratorCopySynchronise(void) {};
|
inline void acceleratorCopySynchronise(void) {};
|
||||||
|
|
||||||
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
inline int acceleratorIsCommunicable(void *ptr){ return 1; }
|
||||||
@ -649,15 +668,15 @@ accelerator_inline void acceleratorFence(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes)
|
inline void acceleratorCopyDeviceToDevice(const void *from,void *to,size_t bytes)
|
||||||
{
|
{
|
||||||
acceleratorCopyDeviceToDeviceAsynch(from,to,bytes);
|
acceleratorCopyDeviceToDeviceAsynch(from,to,bytes);
|
||||||
acceleratorCopySynchronise();
|
acceleratorCopySynchronise();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<class T> void acceleratorPut(T& dev,T&host)
|
template<class T> void acceleratorPut(T& dev,const T&host)
|
||||||
{
|
{
|
||||||
acceleratorCopyToDevice(&host,&dev,sizeof(T));
|
acceleratorCopyToDevice((void *)&host,&dev,sizeof(T));
|
||||||
}
|
}
|
||||||
template<class T> T acceleratorGet(T& dev)
|
template<class T> T acceleratorGet(T& dev)
|
||||||
{
|
{
|
||||||
|
@ -73,9 +73,9 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
|
|||||||
#define thread_critical DO_PRAGMA(omp critical)
|
#define thread_critical DO_PRAGMA(omp critical)
|
||||||
|
|
||||||
#ifdef GRID_OMP
|
#ifdef GRID_OMP
|
||||||
inline void thread_bcopy(void *from, void *to,size_t bytes)
|
inline void thread_bcopy(const void *from, void *to,size_t bytes)
|
||||||
{
|
{
|
||||||
uint64_t *ufrom = (uint64_t *)from;
|
const uint64_t *ufrom = (const uint64_t *)from;
|
||||||
uint64_t *uto = (uint64_t *)to;
|
uint64_t *uto = (uint64_t *)to;
|
||||||
assert(bytes%8==0);
|
assert(bytes%8==0);
|
||||||
uint64_t words=bytes/8;
|
uint64_t words=bytes/8;
|
||||||
@ -84,7 +84,7 @@ inline void thread_bcopy(void *from, void *to,size_t bytes)
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
inline void thread_bcopy(void *from, void *to,size_t bytes)
|
inline void thread_bcopy(const void *from, void *to,size_t bytes)
|
||||||
{
|
{
|
||||||
bcopy(from,to,bytes);
|
bcopy(from,to,bytes);
|
||||||
}
|
}
|
||||||
|
@ -509,7 +509,14 @@ void Grid_init(int *argc,char ***argv)
|
|||||||
Grid_default_latt,
|
Grid_default_latt,
|
||||||
Grid_default_mpi);
|
Grid_default_mpi);
|
||||||
|
|
||||||
|
if( GridCmdOptionExists(*argv,*argv+*argc,"--flightrecorder") ){
|
||||||
|
std::cout << GridLogMessage <<" Enabling flight recorder " <<std::endl;
|
||||||
|
FlightRecorder::SetLoggingMode(FlightRecorder::LoggingModeRecord);
|
||||||
|
FlightRecorder::PrintEntireLog = 1;
|
||||||
|
FlightRecorder::ChecksumComms = 1;
|
||||||
|
FlightRecorder::ChecksumCommsSend=1;
|
||||||
|
}
|
||||||
|
|
||||||
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
if( GridCmdOptionExists(*argv,*argv+*argc,"--decomposition") ){
|
||||||
std::cout<<GridLogMessage<<"Grid Default Decomposition patterns\n";
|
std::cout<<GridLogMessage<<"Grid Default Decomposition patterns\n";
|
||||||
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
std::cout<<GridLogMessage<<"\tOpenMP threads : "<<GridThread::GetThreads()<<std::endl;
|
||||||
@ -651,3 +658,4 @@ void Grid_debug_handler_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
NAMESPACE_END(Grid);
|
NAMESPACE_END(Grid);
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ namespace Grid{
|
|||||||
int64_t index64;
|
int64_t index64;
|
||||||
IndexFromCoorReversed(coor,index64,dims);
|
IndexFromCoorReversed(coor,index64,dims);
|
||||||
if ( index64>=2*1024*1024*1024LL ){
|
if ( index64>=2*1024*1024*1024LL ){
|
||||||
//std::cout << " IndexFromCoorReversed " << coor<<" index " << index64<< " dims "<<dims<<std::endl;
|
// std::cout << " IndexFromCoorReversed " << coor<<" index " << index64<< " dims "<<dims<<std::endl;
|
||||||
}
|
}
|
||||||
assert(index64<2*1024*1024*1024LL);
|
assert(index64<2*1024*1024*1024LL);
|
||||||
index = (int) index64;
|
index = (int) index64;
|
||||||
|
@ -52,7 +52,7 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
int threads = GridThread::GetThreads();
|
int threads = GridThread::GetThreads();
|
||||||
|
|
||||||
int Ls=8;
|
int Ls=16;
|
||||||
for(int i=0;i<argc;i++) {
|
for(int i=0;i<argc;i++) {
|
||||||
if(std::string(argv[i]) == "-Ls"){
|
if(std::string(argv[i]) == "-Ls"){
|
||||||
std::stringstream ss(argv[i+1]); ss >> Ls;
|
std::stringstream ss(argv[i+1]); ss >> Ls;
|
||||||
|
@ -492,17 +492,18 @@ public:
|
|||||||
}
|
}
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
double t1=usecond();
|
double t1=usecond();
|
||||||
uint64_t ncall = 500;
|
uint64_t no = 50;
|
||||||
|
uint64_t ni = 100;
|
||||||
FGrid->Broadcast(0,&ncall,sizeof(ncall));
|
|
||||||
|
|
||||||
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
||||||
|
|
||||||
time_statistics timestat;
|
time_statistics timestat;
|
||||||
std::vector<double> t_time(ncall);
|
std::vector<double> t_time(no);
|
||||||
for(uint64_t i=0;i<ncall;i++){
|
for(uint64_t i=0;i<no;i++){
|
||||||
t0=usecond();
|
t0=usecond();
|
||||||
Dw.DhopEO(src_o,r_e,DaggerNo);
|
for(uint64_t j=0;j<ni;j++){
|
||||||
|
Dw.DhopEO(src_o,r_e,DaggerNo);
|
||||||
|
}
|
||||||
t1=usecond();
|
t1=usecond();
|
||||||
t_time[i] = t1-t0;
|
t_time[i] = t1-t0;
|
||||||
}
|
}
|
||||||
@ -520,11 +521,11 @@ public:
|
|||||||
double mf_hi, mf_lo, mf_err;
|
double mf_hi, mf_lo, mf_err;
|
||||||
|
|
||||||
timestat.statistics(t_time);
|
timestat.statistics(t_time);
|
||||||
mf_hi = flops/timestat.min;
|
mf_hi = flops/timestat.min*ni;
|
||||||
mf_lo = flops/timestat.max;
|
mf_lo = flops/timestat.max*ni;
|
||||||
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
||||||
|
|
||||||
mflops = flops/timestat.mean;
|
mflops = flops/timestat.mean*ni;
|
||||||
mflops_all.push_back(mflops);
|
mflops_all.push_back(mflops);
|
||||||
if ( mflops_best == 0 ) mflops_best = mflops;
|
if ( mflops_best == 0 ) mflops_best = mflops;
|
||||||
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
||||||
@ -535,6 +536,7 @@ public:
|
|||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank "<< mflops/NP<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank "<< mflops/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node "<< mflops/NN<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node "<< mflops/NN<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo us per call "<< timestat.mean/ni<<std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -654,17 +656,19 @@ public:
|
|||||||
}
|
}
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
double t1=usecond();
|
double t1=usecond();
|
||||||
uint64_t ncall = 500;
|
|
||||||
|
|
||||||
FGrid->Broadcast(0,&ncall,sizeof(ncall));
|
uint64_t no = 50;
|
||||||
|
uint64_t ni = 100;
|
||||||
|
|
||||||
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
||||||
|
|
||||||
time_statistics timestat;
|
time_statistics timestat;
|
||||||
std::vector<double> t_time(ncall);
|
std::vector<double> t_time(no);
|
||||||
for(uint64_t i=0;i<ncall;i++){
|
for(uint64_t i=0;i<no;i++){
|
||||||
t0=usecond();
|
t0=usecond();
|
||||||
Ds.DhopEO(src_o,r_e,DaggerNo);
|
for(uint64_t j=0;j<ni;j++){
|
||||||
|
Ds.DhopEO(src_o,r_e,DaggerNo);
|
||||||
|
}
|
||||||
t1=usecond();
|
t1=usecond();
|
||||||
t_time[i] = t1-t0;
|
t_time[i] = t1-t0;
|
||||||
}
|
}
|
||||||
@ -675,11 +679,11 @@ public:
|
|||||||
double mf_hi, mf_lo, mf_err;
|
double mf_hi, mf_lo, mf_err;
|
||||||
|
|
||||||
timestat.statistics(t_time);
|
timestat.statistics(t_time);
|
||||||
mf_hi = flops/timestat.min;
|
mf_hi = flops/timestat.min*ni;
|
||||||
mf_lo = flops/timestat.max;
|
mf_lo = flops/timestat.max*ni;
|
||||||
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
||||||
|
|
||||||
mflops = flops/timestat.mean;
|
mflops = flops/timestat.mean*ni;
|
||||||
mflops_all.push_back(mflops);
|
mflops_all.push_back(mflops);
|
||||||
if ( mflops_best == 0 ) mflops_best = mflops;
|
if ( mflops_best == 0 ) mflops_best = mflops;
|
||||||
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
||||||
@ -689,6 +693,7 @@ public:
|
|||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank "<< mflops/NP<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per rank "<< mflops/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node "<< mflops/NN<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo mflop/s per node "<< mflops/NN<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Deo us per call "<< timestat.mean/ni<<std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,19 +797,18 @@ public:
|
|||||||
Dc.M(src,r);
|
Dc.M(src,r);
|
||||||
}
|
}
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
double t1=usecond();
|
uint64_t ni = 100;
|
||||||
uint64_t ncall = 500;
|
uint64_t no = 50;
|
||||||
|
|
||||||
FGrid->Broadcast(0,&ncall,sizeof(ncall));
|
|
||||||
|
|
||||||
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
// std::cout << GridLogMessage << " Estimate " << ncall << " calls per second"<<std::endl;
|
||||||
|
|
||||||
time_statistics timestat;
|
time_statistics timestat;
|
||||||
std::vector<double> t_time(ncall);
|
std::vector<double> t_time(no);
|
||||||
for(uint64_t i=0;i<ncall;i++){
|
for(uint64_t i=0;i<no;i++){
|
||||||
t0=usecond();
|
double t0=usecond();
|
||||||
Dc.M(src,r);
|
for(uint64_t j=0;j<ni;j++){
|
||||||
t1=usecond();
|
Dc.M(src,r);
|
||||||
|
}
|
||||||
|
double t1=usecond();
|
||||||
t_time[i] = t1-t0;
|
t_time[i] = t1-t0;
|
||||||
}
|
}
|
||||||
FGrid->Barrier();
|
FGrid->Barrier();
|
||||||
@ -814,20 +818,21 @@ public:
|
|||||||
double mf_hi, mf_lo, mf_err;
|
double mf_hi, mf_lo, mf_err;
|
||||||
|
|
||||||
timestat.statistics(t_time);
|
timestat.statistics(t_time);
|
||||||
mf_hi = flops/timestat.min;
|
mf_hi = flops/timestat.min*ni;
|
||||||
mf_lo = flops/timestat.max;
|
mf_lo = flops/timestat.max*ni;
|
||||||
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
mf_err= flops/timestat.min * timestat.err/timestat.mean;
|
||||||
|
|
||||||
mflops = flops/timestat.mean;
|
mflops = flops/timestat.mean*ni;
|
||||||
mflops_all.push_back(mflops);
|
mflops_all.push_back(mflops);
|
||||||
if ( mflops_best == 0 ) mflops_best = mflops;
|
if ( mflops_best == 0 ) mflops_best = mflops;
|
||||||
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
if ( mflops_worst== 0 ) mflops_worst= mflops;
|
||||||
if ( mflops>mflops_best ) mflops_best = mflops;
|
if ( mflops>mflops_best ) mflops_best = mflops;
|
||||||
if ( mflops<mflops_worst) mflops_worst= mflops;
|
if ( mflops<mflops_worst) mflops_worst= mflops;
|
||||||
|
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s = "<< mflops << " ("<<mf_err<<") " << mf_lo<<"-"<<mf_hi <<" "<<timestat.mean<<" us"<<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s per rank "<< mflops/NP<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s per rank "<< mflops/NP<<std::endl;
|
||||||
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s per node "<< mflops/NN<<std::endl;
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov mflop/s per node "<< mflops/NN<<std::endl;
|
||||||
|
std::cout<<GridLogMessage << std::fixed << std::setprecision(1)<<"Dclov us per call "<< timestat.mean/ni<<std::endl;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -872,7 +877,7 @@ int main (int argc, char ** argv)
|
|||||||
int do_dslash=1;
|
int do_dslash=1;
|
||||||
|
|
||||||
int sel=4;
|
int sel=4;
|
||||||
std::vector<int> L_list({8,12,16,24});
|
std::vector<int> L_list({8,12,16,24,32});
|
||||||
int selm1=sel-1;
|
int selm1=sel-1;
|
||||||
|
|
||||||
std::vector<double> clover;
|
std::vector<double> clover;
|
||||||
|
74
systems/Aurora/benchmarks/bench16.pbs
Normal file
74
systems/Aurora/benchmarks/bench16.pbs
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
##PBS -q LatticeQCD_aesp_CNDA
|
||||||
|
#PBS -q debug-scaling
|
||||||
|
##PBS -q prod
|
||||||
|
#PBS -l select=16
|
||||||
|
#PBS -l walltime=00:20:00
|
||||||
|
#PBS -A LatticeQCD_aesp_CNDA
|
||||||
|
|
||||||
|
cd $PBS_O_WORKDIR
|
||||||
|
|
||||||
|
source ../sourceme.sh
|
||||||
|
|
||||||
|
cp $PBS_NODEFILE nodefile
|
||||||
|
|
||||||
|
export OMP_NUM_THREADS=4
|
||||||
|
export MPICH_OFI_NIC_POLICY=GPU
|
||||||
|
|
||||||
|
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||||
|
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||||
|
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||||
|
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||||
|
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||||
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||||
|
|
||||||
|
#
|
||||||
|
# Local vol 16.16.16.32
|
||||||
|
#
|
||||||
|
|
||||||
|
LX=16
|
||||||
|
LY=16
|
||||||
|
LZ=16
|
||||||
|
LT=32
|
||||||
|
|
||||||
|
NX=2
|
||||||
|
NY=2
|
||||||
|
NZ=4
|
||||||
|
NT=1
|
||||||
|
|
||||||
|
GX=2
|
||||||
|
GY=2
|
||||||
|
GZ=1
|
||||||
|
GT=3
|
||||||
|
|
||||||
|
PX=$((NX * GX ))
|
||||||
|
PY=$((NY * GY ))
|
||||||
|
PZ=$((NZ * GZ ))
|
||||||
|
PT=$((NT * GT ))
|
||||||
|
|
||||||
|
VX=$((PX * LX ))
|
||||||
|
VY=$((PY * LY ))
|
||||||
|
VZ=$((PZ * LZ ))
|
||||||
|
VT=$((PT * LT ))
|
||||||
|
|
||||||
|
NP=$((PX*PY*PZ*PT))
|
||||||
|
VOL=${VX}.${VY}.${VZ}.${VT}
|
||||||
|
AT=8
|
||||||
|
MPI=${PX}.${PY}.${PZ}.${PT}
|
||||||
|
|
||||||
|
CMD="mpiexec -np $NP -ppn 12 -envall \
|
||||||
|
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi $MPI --grid $VOL \
|
||||||
|
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads $AT --comms-overlap "
|
||||||
|
|
||||||
|
echo VOL $VOL
|
||||||
|
echo MPI $MPI
|
||||||
|
echo NPROC $NP
|
||||||
|
echo $CMD
|
||||||
|
$CMD
|
||||||
|
|
@ -19,7 +19,7 @@ export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
|||||||
|
|
||||||
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
|
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
|
||||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:3
|
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:4
|
||||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||||
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
||||||
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
|
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
|
||||||
@ -30,8 +30,8 @@ echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_A
|
|||||||
|
|
||||||
if [ $PALS_RANKID = "0" ]
|
if [ $PALS_RANKID = "0" ]
|
||||||
then
|
then
|
||||||
numactl -p $NUMAP -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
# numactl -p $NUMAP -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
||||||
# numactl -p $NUMAP -N $NUMAP "$@"
|
numactl -p $NUMAP -N $NUMAP "$@"
|
||||||
else
|
else
|
||||||
numactl -p $NUMAP -N $NUMAP "$@"
|
numactl -p $NUMAP -N $NUMAP "$@"
|
||||||
fi
|
fi
|
||||||
|
@ -1,18 +1,19 @@
|
|||||||
#Ahead of time compile for PVC
|
#Ahead of time compile for PVC
|
||||||
|
|
||||||
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl -lnuma -L/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/lib"
|
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl -lnuma -L/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/lib -fPIC -fsycl-max-parallel-link-jobs=16 -fno-sycl-rdc"
|
||||||
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -I/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/include/"
|
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -I/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/include/ -fPIC"
|
||||||
|
|
||||||
#JIT compile
|
#JIT compile
|
||||||
#export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
|
#export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
|
||||||
#export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions "
|
#export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions "
|
||||||
|
|
||||||
../../configure \
|
../configure \
|
||||||
--enable-simd=GPU \
|
--enable-simd=GPU \
|
||||||
--enable-reduction=grid \
|
--enable-reduction=grid \
|
||||||
--enable-gen-simd-width=64 \
|
--enable-gen-simd-width=64 \
|
||||||
--enable-comms=mpi-auto \
|
--enable-comms=mpi-auto \
|
||||||
--enable-debug \
|
--enable-debug \
|
||||||
|
--prefix $HOME/gpt-install \
|
||||||
--disable-gparity \
|
--disable-gparity \
|
||||||
--disable-fermion-reps \
|
--disable-fermion-reps \
|
||||||
--with-lime=$CLIME \
|
--with-lime=$CLIME \
|
||||||
|
206
systems/WorkArounds.txt
Normal file
206
systems/WorkArounds.txt
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
The purpose of this file is to collate all non-obvious known magic shell variables
|
||||||
|
and compiler flags required for either correctness or performance on various systems.
|
||||||
|
|
||||||
|
A repository of work-arounds.
|
||||||
|
|
||||||
|
Contents:
|
||||||
|
1. Interconnect + MPI
|
||||||
|
2. Compilation
|
||||||
|
3. Profiling
|
||||||
|
|
||||||
|
************************
|
||||||
|
* 1. INTERCONNECT + MPI
|
||||||
|
************************
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
MPI2-IO correctness: force OpenMPI to use the MPICH romio implementation for parallel I/O
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
export OMPI_MCA_io=romio321
|
||||||
|
|
||||||
|
--------------------------------------
|
||||||
|
ROMIO fail with > 2GB per node read (32 bit issue)
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
Use later MPICH
|
||||||
|
|
||||||
|
https://github.com/paboyle/Grid/issues/381
|
||||||
|
|
||||||
|
https://github.com/pmodels/mpich/commit/3a479ab0
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Slingshot: Frontier and Perlmutter libfabric slow down
|
||||||
|
and physical memory fragmentation
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
export FI_MR_CACHE_MONITOR=disabled
|
||||||
|
or
|
||||||
|
export FI_MR_CACHE_MONITOR=kdreg2
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Perlmutter
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
export MPICH_RDMA_ENABLED_CUDA=1
|
||||||
|
export MPICH_GPU_IPC_ENABLED=1
|
||||||
|
export MPICH_GPU_EAGER_REGISTER_HOST_MEM=0
|
||||||
|
export MPICH_GPU_NO_ASYNC_MEMCPY=0
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Frontier/LumiG
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
Hiding ROCR_VISIBLE_DEVICES triggers SDMA engines to be used for GPU-GPU
|
||||||
|
|
||||||
|
cat << EOF > select_gpu
|
||||||
|
#!/bin/bash
|
||||||
|
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||||
|
export MPICH_SMP_SINGLE_COPY_MODE=XPMEM
|
||||||
|
export GPU_MAP=(0 1 2 3 7 6 5 4)
|
||||||
|
export NUMA_MAP=(3 3 1 1 2 2 0 0)
|
||||||
|
export GPU=\${GPU_MAP[\$SLURM_LOCALID]}
|
||||||
|
export NUMA=\${NUMA_MAP[\$SLURM_LOCALID]}
|
||||||
|
export HIP_VISIBLE_DEVICES=\$GPU
|
||||||
|
unset ROCR_VISIBLE_DEVICES
|
||||||
|
echo RANK \$SLURM_LOCALID using GPU \$GPU
|
||||||
|
exec numactl -m \$NUMA -N \$NUMA \$*
|
||||||
|
EOF
|
||||||
|
chmod +x ./select_gpu
|
||||||
|
|
||||||
|
srun ./select_gpu BINARY
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Mellanox performance with A100 GPU (Tursa, Booster, Leonardo)
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
export OMPI_MCA_btl=^uct,openib
|
||||||
|
export UCX_TLS=gdr_copy,rc,rc_x,sm,cuda_copy,cuda_ipc
|
||||||
|
export UCX_RNDV_SCHEME=put_zcopy
|
||||||
|
export UCX_RNDV_THRESH=16384
|
||||||
|
export UCX_IB_GPU_DIRECT_RDMA=yes
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Mellanox + A100 correctness (Tursa, Booster, Leonardo)
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
export UCX_MEMTYPE_CACHE=n
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
MPICH/Aurora/PVC correctness and performance
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
https://github.com/pmodels/mpich/issues/7302
|
||||||
|
|
||||||
|
--enable-cuda-aware-mpi=no
|
||||||
|
--enable-unified=no
|
||||||
|
|
||||||
|
Grid's internal D-H-H-D pipeline mode, avoid device memory in MPI
|
||||||
|
Do not use SVM
|
||||||
|
|
||||||
|
Ideally use MPICH with fix to issue 7302:
|
||||||
|
|
||||||
|
https://github.com/pmodels/mpich/pull/7312
|
||||||
|
|
||||||
|
Ideally:
|
||||||
|
MPIR_CVAR_CH4_IPC_GPU_HANDLE_CACHE=generic
|
||||||
|
|
||||||
|
Alternatives:
|
||||||
|
export MPIR_CVAR_NOLOCAL=1
|
||||||
|
export MPIR_CVAR_CH4_IPC_GPU_P2P_THRESHOLD=1000000000
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
MPICH/Aurora/PVC correctness and performance
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
Broken:
|
||||||
|
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||||
|
|
||||||
|
This gives good peformance without requiring
|
||||||
|
--enable-cuda-aware-mpi=no
|
||||||
|
|
||||||
|
But is an open issue reported by James Osborn
|
||||||
|
https://github.com/pmodels/mpich/issues/7139
|
||||||
|
|
||||||
|
Possibly resolved but unclear if in the installed software yet.
|
||||||
|
|
||||||
|
************************
|
||||||
|
* 2. COMPILATION
|
||||||
|
************************
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
G++ compiler breakage / graveyard
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
9.3.0, 10.3.1,
|
||||||
|
https://github.com/paboyle/Grid/issues/290
|
||||||
|
https://github.com/paboyle/Grid/issues/264
|
||||||
|
|
||||||
|
Working (-) Broken (X):
|
||||||
|
|
||||||
|
4.9.0 -
|
||||||
|
4.9.1 -
|
||||||
|
5.1.0 X
|
||||||
|
5.2.0 X
|
||||||
|
5.3.0 X
|
||||||
|
5.4.0 X
|
||||||
|
6.1.0 X
|
||||||
|
6.2.0 X
|
||||||
|
6.3.0 -
|
||||||
|
7.1.0 -
|
||||||
|
8.0.0 (HEAD) -
|
||||||
|
|
||||||
|
https://github.com/paboyle/Grid/issues/100
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
AMD GPU nodes :
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
multiple ROCM versions broken; use 5.3.0
|
||||||
|
manifests itself as wrong results in fp32
|
||||||
|
|
||||||
|
https://github.com/paboyle/Grid/issues/464
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Aurora/PVC
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
SYCL ahead of time compilation (fixes rare runtime JIT errors and faster runtime, PB)
|
||||||
|
SYCL slow link and relocatable code issues (Christoph Lehner)
|
||||||
|
Opt large register file required for good performance in fp64
|
||||||
|
|
||||||
|
|
||||||
|
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
||||||
|
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl -fPIC -fsycl-max-parallel-link-jobs=16 -fno-sycl-rdc"
|
||||||
|
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -fPIC"
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Aurora/PVC useful extra options
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
Host only sanitizer:
|
||||||
|
-Xarch_host -fsanitize=leak
|
||||||
|
-Xarch_host -fsanitize=address
|
||||||
|
|
||||||
|
Deterministic MPI reduction:
|
||||||
|
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||||
|
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||||
|
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||||
|
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||||
|
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||||
|
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
************************
|
||||||
|
* 3. Visual profile tools
|
||||||
|
************************
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Frontier/rocprof
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Aurora/unitrace
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------------------------------------
|
||||||
|
Tursa/nsight-sys
|
||||||
|
--------------------------------------------------------------------
|
32
systems/sdcc-genoa/bench.slurm
Normal file
32
systems/sdcc-genoa/bench.slurm
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#SBATCH --partition lqcd
|
||||||
|
#SBATCH --time=00:50:00
|
||||||
|
#SBATCH -A lqcdtest
|
||||||
|
#SBATCH -q lqcd
|
||||||
|
#SBATCH --exclusive
|
||||||
|
#SBATCH --nodes=1
|
||||||
|
#SBATCH -w genoahost001,genoahost003,genoahost050,genoahost054
|
||||||
|
#SBATCH --ntasks=1
|
||||||
|
#SBATCH --cpus-per-task=64
|
||||||
|
#SBATCH --qos lqcd
|
||||||
|
|
||||||
|
source sourceme.sh
|
||||||
|
|
||||||
|
export PLACES=(1:16:4 1:32:2 0:64:1);
|
||||||
|
export THR=(16 32 64)
|
||||||
|
|
||||||
|
for t in 2
|
||||||
|
do
|
||||||
|
|
||||||
|
export OMP_NUM_THREADS=${THR[$t]}
|
||||||
|
export OMP_PLACES=${PLACES[$t]}
|
||||||
|
export thr=${THR[$t]}
|
||||||
|
|
||||||
|
#for vol in 24.24.24.24 32.32.32.32 48.48.48.96
|
||||||
|
for vol in 48.48.48.96
|
||||||
|
do
|
||||||
|
srun -N1 -n1 ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid $vol --dslash-asm --shm 8192 > $vol.1node.thr$thr
|
||||||
|
done
|
||||||
|
#srun -N1 -n1 ./benchmarks/Benchmark_usqcd --mpi 1.1.1.1 --grid $vol > usqcd.1node.thr$thr
|
||||||
|
done
|
||||||
|
|
36
systems/sdcc-genoa/bench2.slurm
Normal file
36
systems/sdcc-genoa/bench2.slurm
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#SBATCH --partition lqcd
|
||||||
|
#SBATCH --time=00:50:00
|
||||||
|
#SBATCH -A lqcdtest
|
||||||
|
#SBATCH -q lqcd
|
||||||
|
#SBATCH --exclusive
|
||||||
|
#SBATCH --nodes=2
|
||||||
|
#SBATCH -w genoahost001,genoahost003,genoahost050,genoahost054
|
||||||
|
#SBATCH --ntasks=2
|
||||||
|
#SBATCH --cpus-per-task=64
|
||||||
|
#SBATCH --qos lqcd
|
||||||
|
|
||||||
|
source sourceme.sh
|
||||||
|
|
||||||
|
export PLACES=(1:16:4 1:32:2 0:64:1);
|
||||||
|
export THR=(16 32 64)
|
||||||
|
|
||||||
|
nodes=2
|
||||||
|
mpi=1.1.1.2
|
||||||
|
|
||||||
|
for t in 2
|
||||||
|
do
|
||||||
|
|
||||||
|
export OMP_NUM_THREADS=${THR[$t]}
|
||||||
|
export OMP_PLACES=${PLACES[$t]}
|
||||||
|
export thr=${THR[$t]}
|
||||||
|
|
||||||
|
#srun -N$nodes -n$nodes ./benchmarks/Benchmark_usqcd --mpi $mpi --grid 32.32.32.32 > usqcd.n$nodes.thr$thr
|
||||||
|
|
||||||
|
for vol in 64.64.64.128
|
||||||
|
do
|
||||||
|
srun -N$nodes -n$nodes ./benchmarks/Benchmark_dwf_fp32 --mpi $mpi --grid $vol --dslash-asm --comms-overlap --shm 8192 > $vol.n$nodes.overlap.thr$thr
|
||||||
|
done
|
||||||
|
|
||||||
|
done
|
||||||
|
|
16
systems/sdcc-genoa/config-command
Normal file
16
systems/sdcc-genoa/config-command
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
../../configure \
|
||||||
|
--enable-comms=mpi-auto \
|
||||||
|
--enable-unified=yes \
|
||||||
|
--enable-shm=shmopen \
|
||||||
|
--enable-shm-fast-path=shmopen \
|
||||||
|
--enable-accelerator=none \
|
||||||
|
--enable-simd=AVX512 \
|
||||||
|
--disable-accelerator-cshift \
|
||||||
|
--disable-fermion-reps \
|
||||||
|
--disable-gparity \
|
||||||
|
CXX=clang++ \
|
||||||
|
MPICXX=mpicxx \
|
||||||
|
CXXFLAGS="-std=c++17"
|
||||||
|
|
||||||
|
|
||||||
|
|
4
systems/sdcc-genoa/sourceme.sh
Normal file
4
systems/sdcc-genoa/sourceme.sh
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
source $HOME/spack/share/spack/setup-env.sh
|
||||||
|
spack load llvm@17.0.4
|
||||||
|
export LD_LIBRARY_PATH=/direct/sdcc+u/paboyle/spack/opt/spack/linux-almalinux8-icelake/gcc-8.5.0/llvm-17.0.4-laufdrcip63ivkadmtgoepwmj3dtztdu/lib:$LD_LIBRARY_PATH
|
||||||
|
module load openmpi
|
@ -154,6 +154,8 @@ public:
|
|||||||
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
|
// std::cout<<GridLogMessage << "Calling PreSmoother input residual "<<norm2(in) <<std::endl;
|
||||||
double t;
|
double t;
|
||||||
// Fine Smoother
|
// Fine Smoother
|
||||||
|
// out = in;
|
||||||
|
out = Zero();
|
||||||
t=-usecond();
|
t=-usecond();
|
||||||
_PreSmoother(in,out);
|
_PreSmoother(in,out);
|
||||||
t+=usecond();
|
t+=usecond();
|
||||||
@ -172,6 +174,7 @@ public:
|
|||||||
|
|
||||||
// Coarse correction
|
// Coarse correction
|
||||||
t=-usecond();
|
t=-usecond();
|
||||||
|
Csol = Zero();
|
||||||
_CoarseSolve(Csrc,Csol);
|
_CoarseSolve(Csrc,Csol);
|
||||||
//Csol=Zero();
|
//Csol=Zero();
|
||||||
t+=usecond();
|
t+=usecond();
|
||||||
@ -191,6 +194,8 @@ public:
|
|||||||
|
|
||||||
// Fine Smoother
|
// Fine Smoother
|
||||||
t=-usecond();
|
t=-usecond();
|
||||||
|
// vec2=vec1;
|
||||||
|
vec2=Zero();
|
||||||
_PostSmoother(vec1,vec2);
|
_PostSmoother(vec1,vec2);
|
||||||
t+=usecond();
|
t+=usecond();
|
||||||
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
|
std::cout<<GridLogMessage << "PostSmoother took "<< t/1000.0<< "ms" <<std::endl;
|
||||||
@ -215,7 +220,8 @@ int main (int argc, char ** argv)
|
|||||||
// Construct a coarsened grid
|
// Construct a coarsened grid
|
||||||
Coordinate clatt = GridDefaultLatt();
|
Coordinate clatt = GridDefaultLatt();
|
||||||
for(int d=0;d<clatt.size();d++){
|
for(int d=0;d<clatt.size();d++){
|
||||||
clatt[d] = clatt[d]/4;
|
clatt[d] = clatt[d]/2;
|
||||||
|
// clatt[d] = clatt[d]/4;
|
||||||
}
|
}
|
||||||
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
|
GridCartesian *Coarse4d = SpaceTimeGrid::makeFourDimGrid(clatt, GridDefaultSimd(Nd,vComplex::Nsimd()),GridDefaultMpi());;
|
||||||
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
|
GridCartesian *Coarse5d = SpaceTimeGrid::makeFiveDimGrid(1,Coarse4d);
|
||||||
@ -244,7 +250,7 @@ int main (int argc, char ** argv)
|
|||||||
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
DomainWallFermionD Ddwf(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
|
||||||
DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
|
DomainWallFermionD Dpv(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,1.0,M5);
|
||||||
|
|
||||||
const int nbasis = 8;
|
const int nbasis = 20;
|
||||||
const int cb = 0 ;
|
const int cb = 0 ;
|
||||||
LatticeFermion prom(FGrid);
|
LatticeFermion prom(FGrid);
|
||||||
|
|
||||||
@ -260,7 +266,25 @@ int main (int argc, char ** argv)
|
|||||||
typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
|
typedef PVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> PVdagM_t;
|
||||||
typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
|
typedef ShiftedPVdagMLinearOperator<DomainWallFermionD,LatticeFermionD> ShiftedPVdagM_t;
|
||||||
PVdagM_t PVdagM(Ddwf,Dpv);
|
PVdagM_t PVdagM(Ddwf,Dpv);
|
||||||
ShiftedPVdagM_t ShiftedPVdagM(2.0,Ddwf,Dpv);
|
// ShiftedPVdagM_t ShiftedPVdagM(2.0,Ddwf,Dpv); // 355
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(1.0,Ddwf,Dpv); // 246
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.5,Ddwf,Dpv); // 183
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 145
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 134
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 127 -- NULL space via inverse iteration
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 57 -- NULL space via inverse iteration; 3 iterations
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 57 , tighter inversion
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 49 iters
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // nbasis 20 -- 70 iters; asymmetric
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.25,Ddwf,Dpv); // 58; Loosen coarse, tighten fine
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 56 ...
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 51 ... with 24 vecs
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 31 ... with 24 vecs and 2^4 blocking
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 43 ... with 16 vecs and 2^4 blocking, sloppier
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 35 ... with 20 vecs and 2^4 blocking, looser coarse
|
||||||
|
// ShiftedPVdagM_t ShiftedPVdagM(0.1,Ddwf,Dpv); // 64 ... with 20 vecs, Christoph setup, and 2^4 blocking, looser coarse
|
||||||
|
ShiftedPVdagM_t ShiftedPVdagM(0.01,Ddwf,Dpv); //
|
||||||
|
|
||||||
|
|
||||||
// Run power method on HOA??
|
// Run power method on HOA??
|
||||||
@ -269,6 +293,7 @@ int main (int argc, char ** argv)
|
|||||||
// Warning: This routine calls PVdagM.Op, not PVdagM.HermOp
|
// Warning: This routine calls PVdagM.Op, not PVdagM.HermOp
|
||||||
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
|
typedef Aggregation<vSpinColourVector,vTComplex,nbasis> Subspace;
|
||||||
Subspace AggregatesPD(Coarse5d,FGrid,cb);
|
Subspace AggregatesPD(Coarse5d,FGrid,cb);
|
||||||
|
/*
|
||||||
AggregatesPD.CreateSubspaceChebyshev(RNG5,
|
AggregatesPD.CreateSubspaceChebyshev(RNG5,
|
||||||
PVdagM,
|
PVdagM,
|
||||||
nbasis,
|
nbasis,
|
||||||
@ -278,6 +303,10 @@ int main (int argc, char ** argv)
|
|||||||
200,
|
200,
|
||||||
200,
|
200,
|
||||||
0.0);
|
0.0);
|
||||||
|
*/
|
||||||
|
AggregatesPD.CreateSubspaceGCR(RNG5,
|
||||||
|
PVdagM,
|
||||||
|
nbasis);
|
||||||
|
|
||||||
LittleDiracOperator LittleDiracOpPV(geom,FGrid,Coarse5d);
|
LittleDiracOperator LittleDiracOpPV(geom,FGrid,Coarse5d);
|
||||||
LittleDiracOpPV.CoarsenOperator(PVdagM,AggregatesPD);
|
LittleDiracOpPV.CoarsenOperator(PVdagM,AggregatesPD);
|
||||||
@ -334,12 +363,13 @@ int main (int argc, char ** argv)
|
|||||||
///////////////////////////////////////
|
///////////////////////////////////////
|
||||||
|
|
||||||
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
||||||
std::cout<<GridLogMessage<<" Coarse Grid Solve "<<std::endl;
|
std::cout<<GridLogMessage<<" Coarse Grid Solve -- Level 3 "<<std::endl;
|
||||||
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
||||||
TrivialPrecon<CoarseVector> simple;
|
TrivialPrecon<CoarseVector> simple;
|
||||||
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
|
NonHermitianLinearOperator<LittleDiracOperator,CoarseVector> LinOpCoarse(LittleDiracOpPV);
|
||||||
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-8, 100, LinOpCoarse,simple,10,10);
|
// PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(1.0e-4, 100, LinOpCoarse,simple,10,10);
|
||||||
L2PGCR.Level(2);
|
PrecGeneralisedConjugateResidualNonHermitian<CoarseVector> L2PGCR(3.0e-2, 100, LinOpCoarse,simple,10,10);
|
||||||
|
L2PGCR.Level(3);
|
||||||
c_res=Zero();
|
c_res=Zero();
|
||||||
L2PGCR(c_src,c_res);
|
L2PGCR(c_src,c_res);
|
||||||
|
|
||||||
@ -347,11 +377,12 @@ int main (int argc, char ** argv)
|
|||||||
// Fine grid smoother
|
// Fine grid smoother
|
||||||
////////////////////////////////////////
|
////////////////////////////////////////
|
||||||
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
||||||
std::cout<<GridLogMessage<<" Fine Grid Smoother "<<std::endl;
|
std::cout<<GridLogMessage<<" Fine Grid Smoother -- Level 2 "<<std::endl;
|
||||||
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
std::cout<<GridLogMessage<<"******************* "<<std::endl;
|
||||||
TrivialPrecon<LatticeFermionD> simple_fine;
|
TrivialPrecon<LatticeFermionD> simple_fine;
|
||||||
// NonHermitianLinearOperator<PVdagM_t,LatticeFermionD> LinOpSmooth(PVdagM);
|
// NonHermitianLinearOperator<PVdagM_t,LatticeFermionD> LinOpSmooth(PVdagM);
|
||||||
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,10,ShiftedPVdagM,simple_fine,4,4);
|
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermionD> SmootherGCR(0.01,1,ShiftedPVdagM,simple_fine,16,16);
|
||||||
|
SmootherGCR.Level(2);
|
||||||
|
|
||||||
LatticeFermionD f_src(FGrid);
|
LatticeFermionD f_src(FGrid);
|
||||||
LatticeFermionD f_res(FGrid);
|
LatticeFermionD f_res(FGrid);
|
||||||
@ -364,12 +395,12 @@ int main (int argc, char ** argv)
|
|||||||
|
|
||||||
TwoLevelMG TwoLevelPrecon(AggregatesPD,
|
TwoLevelMG TwoLevelPrecon(AggregatesPD,
|
||||||
PVdagM,
|
PVdagM,
|
||||||
SmootherGCR,
|
simple_fine,
|
||||||
SmootherGCR,
|
SmootherGCR,
|
||||||
LinOpCoarse,
|
LinOpCoarse,
|
||||||
L2PGCR);
|
L2PGCR);
|
||||||
|
|
||||||
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,8,8);
|
PrecGeneralisedConjugateResidualNonHermitian<LatticeFermion> L1PGCR(1.0e-8,1000,PVdagM,TwoLevelPrecon,16,16);
|
||||||
L1PGCR.Level(1);
|
L1PGCR.Level(1);
|
||||||
|
|
||||||
f_res=Zero();
|
f_res=Zero();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user