1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-23 18:22:02 +01:00

Compare commits

..

9 Commits

Author SHA1 Message Date
0f1c5b08a1 Dirichlet filters running on AMD and now integrated in Fermion op 2022-02-23 19:29:28 -05:00
70988e43d2 Passes multinode dirichlet test with boundaries at
node boundary or at the single rank boundary
2022-02-23 01:42:14 -05:00
aab3bcb46f Dirichlet first cut - wrong answers on dagger multiply.
Struggling to get a compute node so changing systems
2022-02-22 19:58:33 +00:00
da06d15f73 Merge branch 'feature/feature/staggered-comms' into develop 2022-02-17 04:58:50 +00:00
e8b1251b8c Staggered fix finished 2022-02-17 04:51:13 +00:00
fad5a74a4b Bug fix to detection case 2022-02-15 10:27:39 -05:00
e83f6a6ae9 Merge branch 'develop' into feature/feature/staggered-comms 2022-02-15 08:52:39 -05:00
6283d11d50 Add the comment line to tell the existance of copied data/buffer 2022-02-08 15:22:06 +00:00
6616d5d090 Commit 2022-02-02 16:38:24 -05:00
23 changed files with 673 additions and 546 deletions

View File

@ -53,10 +53,11 @@ public:
// Communicator should know nothing of the physics grid, only processor grid. // Communicator should know nothing of the physics grid, only processor grid.
//////////////////////////////////////////// ////////////////////////////////////////////
int _Nprocessors; // How many in all int _Nprocessors; // How many in all
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
int _processor; // linear processor rank int _processor; // linear processor rank
Coordinate _processor_coor; // linear processor coordinate
unsigned long _ndimension; unsigned long _ndimension;
Coordinate _shm_processors; // Which dimensions get relayed out over processors lanes.
Coordinate _processors; // Which dimensions get relayed out over processors lanes.
Coordinate _processor_coor; // linear processor coordinate
static Grid_MPI_Comm communicator_world; static Grid_MPI_Comm communicator_world;
Grid_MPI_Comm communicator; Grid_MPI_Comm communicator;
std::vector<Grid_MPI_Comm> communicator_halo; std::vector<Grid_MPI_Comm> communicator_halo;
@ -97,6 +98,7 @@ public:
int BossRank(void) ; int BossRank(void) ;
int ThisRank(void) ; int ThisRank(void) ;
const Coordinate & ThisProcessorCoor(void) ; const Coordinate & ThisProcessorCoor(void) ;
const Coordinate & ShmGrid(void) { return _shm_processors; } ;
const Coordinate & ProcessorGrid(void) ; const Coordinate & ProcessorGrid(void) ;
int ProcessorCount(void) ; int ProcessorCount(void) ;
@ -142,16 +144,16 @@ public:
int bytes); int bytes);
double StencilSendToRecvFrom(void *xmit, double StencilSendToRecvFrom(void *xmit,
int xmit_to_rank, int xmit_to_rank,int do_xmit,
void *recv, void *recv,
int recv_from_rank, int recv_from_rank,int do_recv,
int bytes,int dir); int bytes,int dir);
double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, double StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit, void *xmit,
int xmit_to_rank, int xmit_to_rank,int do_xmit,
void *recv, void *recv,
int recv_from_rank, int recv_from_rank,int do_recv,
int bytes,int dir); int bytes,int dir);

View File

@ -106,7 +106,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
// Remap using the shared memory optimising routine // Remap using the shared memory optimising routine
// The remap creates a comm which must be freed // The remap creates a comm which must be freed
//////////////////////////////////////////////////// ////////////////////////////////////////////////////
GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm); GlobalSharedMemory::OptimalCommunicator (processors,optimal_comm,_shm_processors);
InitFromMPICommunicator(processors,optimal_comm); InitFromMPICommunicator(processors,optimal_comm);
SetCommunicator(optimal_comm); SetCommunicator(optimal_comm);
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
@ -124,12 +124,13 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension); int parent_ndimension = parent._ndimension; assert(_ndimension >= parent._ndimension);
Coordinate parent_processor_coor(_ndimension,0); Coordinate parent_processor_coor(_ndimension,0);
Coordinate parent_processors (_ndimension,1); Coordinate parent_processors (_ndimension,1);
Coordinate shm_processors (_ndimension,1);
// Can make 5d grid from 4d etc... // Can make 5d grid from 4d etc...
int pad = _ndimension-parent_ndimension; int pad = _ndimension-parent_ndimension;
for(int d=0;d<parent_ndimension;d++){ for(int d=0;d<parent_ndimension;d++){
parent_processor_coor[pad+d]=parent._processor_coor[d]; parent_processor_coor[pad+d]=parent._processor_coor[d];
parent_processors [pad+d]=parent._processors[d]; parent_processors [pad+d]=parent._processors[d];
shm_processors [pad+d]=parent._shm_processors[d];
} }
////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////////////////
@ -154,6 +155,7 @@ CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const
ccoor[d] = parent_processor_coor[d] % processors[d]; ccoor[d] = parent_processor_coor[d] % processors[d];
scoor[d] = parent_processor_coor[d] / processors[d]; scoor[d] = parent_processor_coor[d] / processors[d];
ssize[d] = parent_processors[d] / processors[d]; ssize[d] = parent_processors[d] / processors[d];
if ( processors[d] < shm_processors[d] ) shm_processors[d] = processors[d]; // subnode splitting.
} }
// rank within subcomm ; srank is rank of subcomm within blocks of subcomms // rank within subcomm ; srank is rank of subcomm within blocks of subcomms
@ -335,22 +337,22 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
} }
// Basic Halo comms primitive // Basic Halo comms primitive
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
int dest, int dest, int dox,
void *recv, void *recv,
int from, int from, int dor,
int bytes,int dir) int bytes,int dir)
{ {
std::vector<CommsRequest_t> list; std::vector<CommsRequest_t> list;
double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,recv,from,bytes,dir); double offbytes = StencilSendToRecvFromBegin(list,xmit,dest,dox,recv,from,dor,bytes,dir);
StencilSendToRecvFromComplete(list,dir); StencilSendToRecvFromComplete(list,dir);
return offbytes; return offbytes;
} }
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit, void *xmit,
int dest, int dest,int dox,
void *recv, void *recv,
int from, int from,int dor,
int bytes,int dir) int bytes,int dir)
{ {
int ncomm =communicator_halo.size(); int ncomm =communicator_halo.size();
@ -370,6 +372,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
double off_node_bytes=0.0; double off_node_bytes=0.0;
int tag; int tag;
if ( dox ) {
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) { if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
tag= dir+from*32; tag= dir+from*32;
ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq); ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
@ -377,7 +380,9 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
list.push_back(rrq); list.push_back(rrq);
off_node_bytes+=bytes; off_node_bytes+=bytes;
} }
}
if (dor) {
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) { if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
tag= dir+_processor*32; tag= dir+_processor*32;
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq); ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
@ -391,6 +396,7 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
// std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl; // std::cout <<"acceleratorCopyDeviceToDeviceAsynch"<< std::endl;
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes); acceleratorCopyDeviceToDeviceAsynch(xmit,shm,bytes);
} }
}
if ( CommunicatorPolicy == CommunicatorPolicySequential ) { if ( CommunicatorPolicy == CommunicatorPolicySequential ) {
this->StencilSendToRecvFromComplete(list,dir); this->StencilSendToRecvFromComplete(list,dir);

View File

@ -45,12 +45,14 @@ void CartesianCommunicator::Init(int *argc, char *** arv)
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank) CartesianCommunicator::CartesianCommunicator(const Coordinate &processors,const CartesianCommunicator &parent,int &srank)
: CartesianCommunicator(processors) : CartesianCommunicator(processors)
{ {
_shm_processors = Coordinate(processors.size(),1);
srank=0; srank=0;
SetCommunicator(communicator_world); SetCommunicator(communicator_world);
} }
CartesianCommunicator::CartesianCommunicator(const Coordinate &processors) CartesianCommunicator::CartesianCommunicator(const Coordinate &processors)
{ {
_shm_processors = Coordinate(processors.size(),1);
_processors = processors; _processors = processors;
_ndimension = processors.size(); assert(_ndimension>=1); _ndimension = processors.size(); assert(_ndimension>=1);
_processor_coor.resize(_ndimension); _processor_coor.resize(_ndimension);
@ -111,18 +113,18 @@ void CartesianCommunicator::ShiftedRanks(int dim,int shift,int &source,int &dest
} }
double CartesianCommunicator::StencilSendToRecvFrom( void *xmit, double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
int xmit_to_rank, int xmit_to_rank,int dox,
void *recv, void *recv,
int recv_from_rank, int recv_from_rank,int dor,
int bytes, int dir) int bytes, int dir)
{ {
return 2.0*bytes; return 2.0*bytes;
} }
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list, double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit, void *xmit,
int xmit_to_rank, int xmit_to_rank,int dox,
void *recv, void *recv,
int recv_from_rank, int recv_from_rank,int dor,
int bytes, int dir) int bytes, int dir)
{ {
return 2.0*bytes; return 2.0*bytes;

View File

@ -93,9 +93,10 @@ public:
// Create an optimal reordered communicator that makes MPI_Cart_create get it right // Create an optimal reordered communicator that makes MPI_Cart_create get it right
////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////
static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD static void Init(Grid_MPI_Comm comm); // Typically MPI_COMM_WORLD
static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian // Turns MPI_COMM_WORLD into right layout for Cartesian
static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian static void OptimalCommunicator (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm); // Turns MPI_COMM_WORLD into right layout for Cartesian static void OptimalCommunicatorHypercube (const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
static void OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &ShmDims);
static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims); static void GetShmDims(const Coordinate &WorldDims,Coordinate &ShmDims);
/////////////////////////////////////////////////// ///////////////////////////////////////////////////
// Provide shared memory facilities off comm world // Provide shared memory facilities off comm world

View File

@ -152,7 +152,7 @@ int Log2Size(int TwoToPower,int MAXLOG2)
} }
return log2size; return log2size;
} }
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm) void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{ {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
// Look and see if it looks like an HPE 8600 based on hostname conventions // Look and see if it looks like an HPE 8600 based on hostname conventions
@ -165,8 +165,8 @@ void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_M
gethostname(name,namelen); gethostname(name,namelen);
int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ; int nscan = sscanf(name,"r%di%dn%d",&R,&I,&N) ;
if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm); if(nscan==3 && HPEhypercube ) OptimalCommunicatorHypercube(processors,optimal_comm,SHM);
else OptimalCommunicatorSharedMemory(processors,optimal_comm); else OptimalCommunicatorSharedMemory(processors,optimal_comm,SHM);
} }
static inline int divides(int a,int b) static inline int divides(int a,int b)
{ {
@ -221,7 +221,7 @@ void GlobalSharedMemory::GetShmDims(const Coordinate &WorldDims,Coordinate &ShmD
dim=(dim+1) %ndimension; dim=(dim+1) %ndimension;
} }
} }
void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm) void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{ {
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// Assert power of two shm_size. // Assert power of two shm_size.
@ -294,6 +294,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
Coordinate HyperCoor(ndimension); Coordinate HyperCoor(ndimension);
GetShmDims(WorldDims,ShmDims); GetShmDims(WorldDims,ShmDims);
SHM = ShmDims;
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// Establish torus of processes and nodes with sub-blockings // Establish torus of processes and nodes with sub-blockings
@ -341,7 +342,7 @@ void GlobalSharedMemory::OptimalCommunicatorHypercube(const Coordinate &processo
int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm); int ierr= MPI_Comm_split(WorldComm,0,rank,&optimal_comm);
assert(ierr==0); assert(ierr==0);
} }
void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm) void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{ {
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// Identify subblock of ranks on node spreading across dims // Identify subblock of ranks on node spreading across dims
@ -353,6 +354,8 @@ void GlobalSharedMemory::OptimalCommunicatorSharedMemory(const Coordinate &proce
Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension); Coordinate ShmCoor(ndimension); Coordinate NodeCoor(ndimension); Coordinate WorldCoor(ndimension);
GetShmDims(WorldDims,ShmDims); GetShmDims(WorldDims,ShmDims);
SHM=ShmDims;
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////
// Establish torus of processes and nodes with sub-blockings // Establish torus of processes and nodes with sub-blockings
//////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////

View File

@ -48,9 +48,10 @@ void GlobalSharedMemory::Init(Grid_MPI_Comm comm)
_ShmSetup=1; _ShmSetup=1;
} }
void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm) void GlobalSharedMemory::OptimalCommunicator(const Coordinate &processors,Grid_MPI_Comm & optimal_comm,Coordinate &SHM)
{ {
optimal_comm = WorldComm; optimal_comm = WorldComm;
SHM = Coordinate(processors.size(),1);
} }
//////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -142,15 +142,6 @@ inline typename vobj::scalar_objectD sumD(const vobj *arg, Integer osites)
return sumD_cpu(arg,osites); return sumD_cpu(arg,osites);
#endif #endif
} }
template<class vobj>
inline typename vobj::scalar_objectD sumD_large(const vobj *arg, Integer osites)
{
#if defined(GRID_CUDA)||defined(GRID_HIP)
return sumD_gpu_large(arg,osites);
#else
return sumD_cpu(arg,osites);
#endif
}
template<class vobj> template<class vobj>
inline typename vobj::scalar_object sum(const Lattice<vobj> &arg) inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
@ -168,22 +159,6 @@ inline typename vobj::scalar_object sum(const Lattice<vobj> &arg)
return ssum; return ssum;
} }
template<class vobj>
inline typename vobj::scalar_object sum_large(const Lattice<vobj> &arg)
{
#if defined(GRID_CUDA)||defined(GRID_HIP)
autoView( arg_v, arg, AcceleratorRead);
Integer osites = arg.Grid()->oSites();
auto ssum= sum_gpu_large(&arg_v[0],osites);
#else
autoView(arg_v, arg, CpuRead);
Integer osites = arg.Grid()->oSites();
auto ssum= sum_cpu(&arg_v[0],osites);
#endif
arg.Grid()->GlobalSum(ssum);
return ssum;
}
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
// Deterministic Reduction operations // Deterministic Reduction operations
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -23,7 +23,7 @@ unsigned int nextPow2(Iterator x) {
} }
template <class Iterator> template <class Iterator>
int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &threads, Iterator &blocks) { void getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &threads, Iterator &blocks) {
int device; int device;
#ifdef GRID_CUDA #ifdef GRID_CUDA
@ -37,13 +37,13 @@ int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &
Iterator sharedMemPerBlock = gpu_props[device].sharedMemPerBlock; Iterator sharedMemPerBlock = gpu_props[device].sharedMemPerBlock;
Iterator maxThreadsPerBlock = gpu_props[device].maxThreadsPerBlock; Iterator maxThreadsPerBlock = gpu_props[device].maxThreadsPerBlock;
Iterator multiProcessorCount = gpu_props[device].multiProcessorCount; Iterator multiProcessorCount = gpu_props[device].multiProcessorCount;
/*
std::cout << GridLogDebug << "GPU has:" << std::endl; std::cout << GridLogDebug << "GPU has:" << std::endl;
std::cout << GridLogDebug << "\twarpSize = " << warpSize << std::endl; std::cout << GridLogDebug << "\twarpSize = " << warpSize << std::endl;
std::cout << GridLogDebug << "\tsharedMemPerBlock = " << sharedMemPerBlock << std::endl; std::cout << GridLogDebug << "\tsharedMemPerBlock = " << sharedMemPerBlock << std::endl;
std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << maxThreadsPerBlock << std::endl; std::cout << GridLogDebug << "\tmaxThreadsPerBlock = " << maxThreadsPerBlock << std::endl;
std::cout << GridLogDebug << "\tmultiProcessorCount = " << multiProcessorCount << std::endl; std::cout << GridLogDebug << "\tmultiProcessorCount = " << multiProcessorCount << std::endl;
*/
if (warpSize != WARP_SIZE) { if (warpSize != WARP_SIZE) {
std::cout << GridLogError << "The warp size of the GPU in use does not match the warp size set when compiling Grid." << std::endl; std::cout << GridLogError << "The warp size of the GPU in use does not match the warp size set when compiling Grid." << std::endl;
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
@ -53,12 +53,12 @@ int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &
threads = warpSize; threads = warpSize;
if ( threads*sizeofsobj > sharedMemPerBlock ) { if ( threads*sizeofsobj > sharedMemPerBlock ) {
std::cout << GridLogError << "The object is too large for the shared memory." << std::endl; std::cout << GridLogError << "The object is too large for the shared memory." << std::endl;
return 0; exit(EXIT_FAILURE);
} }
while( 2*threads*sizeofsobj < sharedMemPerBlock && 2*threads <= maxThreadsPerBlock ) threads *= 2; while( 2*threads*sizeofsobj < sharedMemPerBlock && 2*threads <= maxThreadsPerBlock ) threads *= 2;
// keep all the streaming multiprocessors busy // keep all the streaming multiprocessors busy
blocks = nextPow2(multiProcessorCount); blocks = nextPow2(multiProcessorCount);
return 1;
} }
template <class sobj, class Iterator> template <class sobj, class Iterator>
@ -198,7 +198,7 @@ __global__ void reduceKernel(const vobj *lat, sobj *buffer, Iterator n) {
// Possibly promote to double and sum // Possibly promote to double and sum
///////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////
template <class vobj> template <class vobj>
inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osites) inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
{ {
typedef typename vobj::scalar_objectD sobj; typedef typename vobj::scalar_objectD sobj;
typedef decltype(lat) Iterator; typedef decltype(lat) Iterator;
@ -207,9 +207,7 @@ inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osi
Integer size = osites*nsimd; Integer size = osites*nsimd;
Integer numThreads, numBlocks; Integer numThreads, numBlocks;
int ok = getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks); getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks);
assert(ok);
Integer smemSize = numThreads * sizeof(sobj); Integer smemSize = numThreads * sizeof(sobj);
Vector<sobj> buffer(numBlocks); Vector<sobj> buffer(numBlocks);
@ -220,54 +218,6 @@ inline typename vobj::scalar_objectD sumD_gpu_small(const vobj *lat, Integer osi
auto result = buffer_v[0]; auto result = buffer_v[0];
return result; return result;
} }
template <class vobj>
inline typename vobj::scalar_objectD sumD_gpu_large(const vobj *lat, Integer osites)
{
typedef typename vobj::vector_type vector;
typedef typename vobj::scalar_typeD scalarD;
typedef typename vobj::scalar_objectD sobj;
sobj ret;
scalarD *ret_p = (scalarD *)&ret;
const int words = sizeof(vobj)/sizeof(vector);
Vector<vector> buffer(osites);
vector *dat = (vector *)lat;
vector *buf = &buffer[0];
iScalar<vector> *tbuf =(iScalar<vector> *) &buffer[0];
for(int w=0;w<words;w++) {
accelerator_for(ss,osites,1,{
buf[ss] = dat[ss*words+w];
});
ret_p[w] = sumD_gpu_small(tbuf,osites);
}
return ret;
}
template <class vobj>
inline typename vobj::scalar_objectD sumD_gpu(const vobj *lat, Integer osites)
{
typedef typename vobj::vector_type vector;
typedef typename vobj::scalar_typeD scalarD;
typedef typename vobj::scalar_objectD sobj;
sobj ret;
Integer nsimd= vobj::Nsimd();
Integer size = osites*nsimd;
Integer numThreads, numBlocks;
int ok = getNumBlocksAndThreads(size, sizeof(sobj), numThreads, numBlocks);
if ( ok ) {
ret = sumD_gpu_small(lat,osites);
} else {
ret = sumD_gpu_large(lat,osites);
}
return ret;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////
// Return as same precision as input performing reduction in double precision though // Return as same precision as input performing reduction in double precision though
///////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -280,13 +230,6 @@ inline typename vobj::scalar_object sum_gpu(const vobj *lat, Integer osites)
return result; return result;
} }
template <class vobj>
inline typename vobj::scalar_object sum_gpu_large(const vobj *lat, Integer osites)
{
typedef typename vobj::scalar_object sobj;
sobj result;
result = sumD_gpu_large(lat,osites);
return result;
}
NAMESPACE_END(Grid); NAMESPACE_END(Grid);

View File

@ -37,6 +37,9 @@ NAMESPACE_CHECK(ActionSet);
#include <Grid/qcd/action/ActionParams.h> #include <Grid/qcd/action/ActionParams.h>
NAMESPACE_CHECK(ActionParams); NAMESPACE_CHECK(ActionParams);
#include <Grid/qcd/action/filters/MomentumFilter.h>
#include <Grid/qcd/action/filters/DirichletFilter.h>
//////////////////////////////////////////// ////////////////////////////////////////////
// Gauge Actions // Gauge Actions
//////////////////////////////////////////// ////////////////////////////////////////////

View File

@ -49,6 +49,8 @@ public:
virtual FermionField &tmp(void) = 0; virtual FermionField &tmp(void) = 0;
virtual void DirichletBlock(Coordinate & _Block) { assert(0); };
GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know GridBase * Grid(void) { return FermionGrid(); }; // this is all the linalg routines need to know
GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); }; GridBase * RedBlackGrid(void) { return FermionRedBlackGrid(); };

View File

@ -726,8 +726,8 @@ public:
static strong_inline void ApplyBoundaryMask(Field& f, const Mask& m) { static strong_inline void ApplyBoundaryMask(Field& f, const Mask& m) {
conformable(f, m); conformable(f, m);
auto grid = f.Grid(); auto grid = f.Grid();
const uint32_t Nsite = grid->oSites(); const int Nsite = grid->oSites();
const uint32_t Nsimd = grid->Nsimd(); const int Nsimd = grid->Nsimd();
autoView(f_v, f, AcceleratorWrite); autoView(f_v, f, AcceleratorWrite);
autoView(m_v, m, AcceleratorRead); autoView(m_v, m, AcceleratorRead);
// NOTE: this function cannot be 'private' since nvcc forbids this for kernels // NOTE: this function cannot be 'private' since nvcc forbids this for kernels

View File

@ -75,6 +75,10 @@ public:
FermionField _tmp; FermionField _tmp;
FermionField &tmp(void) { return _tmp; } FermionField &tmp(void) { return _tmp; }
int Dirichlet;
Coordinate Block;
/********** Deprecate timers **********/
void Report(void); void Report(void);
void ZeroCounters(void); void ZeroCounters(void);
double DhopCalls; double DhopCalls;
@ -174,6 +178,17 @@ public:
GridRedBlackCartesian &FourDimRedBlackGrid, GridRedBlackCartesian &FourDimRedBlackGrid,
double _M5,const ImplParams &p= ImplParams()); double _M5,const ImplParams &p= ImplParams());
virtual void DirichletBlock(Coordinate & block)
{
assert(block.size()==Nd+1);
if ( block[0] || block[1] || block[2] || block[3] || block[4] ){
Dirichlet = 1;
Block = block;
Stencil.DirichletBlock(block);
StencilEven.DirichletBlock(block);
StencilOdd.DirichletBlock(block);
}
}
// Constructors // Constructors
/* /*
WilsonFermion5D(int simd, WilsonFermion5D(int simd,

View File

@ -60,7 +60,8 @@ WilsonFermion5D<Impl>::WilsonFermion5D(GaugeField &_Umu,
UmuOdd (_FourDimRedBlackGrid), UmuOdd (_FourDimRedBlackGrid),
Lebesgue(_FourDimGrid), Lebesgue(_FourDimGrid),
LebesgueEvenOdd(_FourDimRedBlackGrid), LebesgueEvenOdd(_FourDimRedBlackGrid),
_tmp(&FiveDimRedBlackGrid) _tmp(&FiveDimRedBlackGrid),
Dirichlet(0)
{ {
// some assertions // some assertions
assert(FiveDimGrid._ndimension==5); assert(FiveDimGrid._ndimension==5);
@ -218,6 +219,14 @@ void WilsonFermion5D<Impl>::ImportGauge(const GaugeField &_Umu)
{ {
GaugeField HUmu(_Umu.Grid()); GaugeField HUmu(_Umu.Grid());
HUmu = _Umu*(-0.5); HUmu = _Umu*(-0.5);
if ( Dirichlet ) {
std::cout << GridLogMessage << " Dirichlet BCs 5d " <<Block<<std::endl;
Coordinate GaugeBlock(Nd);
for(int d=0;d<Nd;d++) GaugeBlock[d] = Block[d+1];
std::cout << GridLogMessage << " Dirichlet BCs 4d " <<GaugeBlock<<std::endl;
DirichletFilter<GaugeField> Filter(GaugeBlock);
Filter.applyFilter(HUmu);
}
Impl::DoubleStore(GaugeGrid(),Umu,HUmu); Impl::DoubleStore(GaugeGrid(),Umu,HUmu);
pickCheckerboard(Even,UmuEven,Umu); pickCheckerboard(Even,UmuEven,Umu);
pickCheckerboard(Odd ,UmuOdd,Umu); pickCheckerboard(Odd ,UmuOdd,Umu);

View File

@ -0,0 +1,71 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/hmc/integrators/DirichletFilter.h
Copyright (C) 2015
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
//--------------------------------------------------------------------
#pragma once
NAMESPACE_BEGIN(Grid);
template<typename MomentaField>
struct DirichletFilter: public MomentumFilterBase<MomentaField>
{
typedef typename MomentaField::vector_type vector_type; //SIMD-vectorized complex type
typedef typename MomentaField::scalar_type scalar_type; //scalar complex type
typedef iScalar<iScalar<iScalar<vector_type> > > ScalarType; //complex phase for each site
Coordinate Block;
DirichletFilter(const Coordinate &_Block): Block(_Block){}
void applyFilter(MomentaField &P) const override
{
GridBase *grid = P.Grid();
typedef decltype(PeekIndex<LorentzIndex>(P, 0)) LatCM;
////////////////////////////////////////////////////
// Zero strictly links crossing between domains
////////////////////////////////////////////////////
LatticeInteger coor(grid);
LatCM zz(grid); zz = Zero();
for(int mu=0;mu<Nd;mu++) {
if ( (Block[mu]) && (Block[mu] < grid->GlobalDimensions()[mu] ) ) {
// If costly could provide Grid earlier and precompute masks
std::cout << " Dirichlet in mu="<<mu<<std::endl;
LatticeCoordinate(coor,mu);
auto P_mu = PeekIndex<LorentzIndex>(P, mu);
P_mu = where(mod(coor,Block[mu])==Integer(Block[mu]-1),zz,P_mu);
PokeIndex<LorentzIndex>(P, P_mu, mu);
}
}
}
};
NAMESPACE_END(Grid);

View File

@ -33,7 +33,6 @@ directory
#define INTEGRATOR_INCLUDED #define INTEGRATOR_INCLUDED
#include <memory> #include <memory>
#include "MomentumFilter.h"
NAMESPACE_BEGIN(Grid); NAMESPACE_BEGIN(Grid);

View File

@ -131,8 +131,11 @@ class CartesianStencilAccelerator {
int _checkerboard; int _checkerboard;
int _npoints; // Move to template param? int _npoints; // Move to template param?
int _osites; int _osites;
int _dirichlet;
StencilVector _directions; StencilVector _directions;
StencilVector _distances; StencilVector _distances;
StencilVector _comms_send;
StencilVector _comms_recv;
StencilVector _comm_buf_size; StencilVector _comm_buf_size;
StencilVector _permute_type; StencilVector _permute_type;
StencilVector same_node; StencilVector same_node;
@ -226,6 +229,8 @@ public:
void * recv_buf; void * recv_buf;
Integer to_rank; Integer to_rank;
Integer from_rank; Integer from_rank;
Integer do_send;
Integer do_recv;
Integer bytes; Integer bytes;
}; };
struct Merge { struct Merge {
@ -240,7 +245,20 @@ public:
cobj * mpi_p; cobj * mpi_p;
Integer buffer_size; Integer buffer_size;
}; };
struct CopyReceiveBuffer {
void * from_p;
void * to_p;
Integer bytes;
};
struct CachedTransfer {
Integer direction;
Integer OrthogPlane;
Integer DestProc;
Integer bytes;
Integer lane;
Integer cb;
void *recv_buf;
};
protected: protected:
GridBase * _grid; GridBase * _grid;
@ -271,7 +289,8 @@ public:
std::vector<Merge> MergersSHM; std::vector<Merge> MergersSHM;
std::vector<Decompress> Decompressions; std::vector<Decompress> Decompressions;
std::vector<Decompress> DecompressionsSHM; std::vector<Decompress> DecompressionsSHM;
std::vector<CopyReceiveBuffer> CopyReceiveBuffers ;
std::vector<CachedTransfer> CachedTransfers;
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
// Unified Comms buffers for all directions // Unified Comms buffers for all directions
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
@ -284,29 +303,6 @@ public:
int u_comm_offset; int u_comm_offset;
int _unified_buffer_size; int _unified_buffer_size;
/////////////////////////////////////////
// Timing info; ugly; possibly temporary
/////////////////////////////////////////
double commtime;
double mpi3synctime;
double mpi3synctime_g;
double shmmergetime;
double gathertime;
double gathermtime;
double halogtime;
double mergetime;
double decompresstime;
double comms_bytes;
double shm_bytes;
double splicetime;
double nosplicetime;
double calls;
std::vector<double> comm_bytes_thr;
std::vector<double> shm_bytes_thr;
std::vector<double> comm_time_thr;
std::vector<double> comm_enter_thr;
std::vector<double> comm_leave_thr;
//////////////////////////////////////// ////////////////////////////////////////
// Stencil query // Stencil query
//////////////////////////////////////// ////////////////////////////////////////
@ -333,11 +329,12 @@ public:
////////////////////////////////////////// //////////////////////////////////////////
// Comms packet queue for asynch thread // Comms packet queue for asynch thread
// Use OpenMP Tasks for cleaner ??? // Use OpenMP Tasks for cleaner ???
// must be called *inside* parallel region
////////////////////////////////////////// //////////////////////////////////////////
/*
void CommunicateThreaded() void CommunicateThreaded()
{ {
#ifdef GRID_OMP #ifdef GRID_OMP
// must be called in parallel region
int mythread = omp_get_thread_num(); int mythread = omp_get_thread_num();
int nthreads = CartesianCommunicator::nCommThreads; int nthreads = CartesianCommunicator::nCommThreads;
#else #else
@ -346,65 +343,29 @@ public:
#endif #endif
if (nthreads == -1) nthreads = 1; if (nthreads == -1) nthreads = 1;
if (mythread < nthreads) { if (mythread < nthreads) {
comm_enter_thr[mythread] = usecond();
for (int i = mythread; i < Packets.size(); i += nthreads) { for (int i = mythread; i < Packets.size(); i += nthreads) {
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf, uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
Packets[i].to_rank, Packets[i].to_rank,
Packets[i].recv_buf, Packets[i].recv_buf,
Packets[i].from_rank, Packets[i].from_rank,
Packets[i].bytes,i); Packets[i].bytes,i);
comm_bytes_thr[mythread] += bytes;
shm_bytes_thr[mythread] += 2*Packets[i].bytes-bytes; // Send + Recv.
}
comm_leave_thr[mythread]= usecond();
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
} }
} }
void CollateThreads(void)
{
int nthreads = CartesianCommunicator::nCommThreads;
double first=0.0;
double last =0.0;
for(int t=0;t<nthreads;t++) {
double t0 = comm_enter_thr[t];
double t1 = comm_leave_thr[t];
comms_bytes+=comm_bytes_thr[t];
shm_bytes +=shm_bytes_thr[t];
comm_enter_thr[t] = 0.0;
comm_leave_thr[t] = 0.0;
comm_time_thr[t] = 0.0;
comm_bytes_thr[t]=0;
shm_bytes_thr[t]=0;
if ( first == 0.0 ) first = t0; // first is t0
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
if ( t1 > last ) last = t1; // max time seen
}
commtime+= last-first;
} }
*/
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// Non blocking send and receive. Necessarily parallel. // Non blocking send and receive. Necessarily parallel.
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs) void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
{ {
reqs.resize(Packets.size()); reqs.resize(Packets.size());
commtime-=usecond();
for(int i=0;i<Packets.size();i++){ for(int i=0;i<Packets.size();i++){
uint64_t bytes=_grid->StencilSendToRecvFromBegin(reqs[i], _grid->StencilSendToRecvFromBegin(reqs[i],
Packets[i].send_buf, Packets[i].send_buf,
Packets[i].to_rank, Packets[i].to_rank,Packets[i].do_send,
Packets[i].recv_buf, Packets[i].recv_buf,
Packets[i].from_rank, Packets[i].from_rank,Packets[i].do_recv,
Packets[i].bytes,i); Packets[i].bytes,i);
comms_bytes+=bytes;
shm_bytes +=2*Packets[i].bytes-bytes;
} }
} }
@ -413,7 +374,6 @@ public:
for(int i=0;i<Packets.size();i++){ for(int i=0;i<Packets.size();i++){
_grid->StencilSendToRecvFromComplete(reqs[i],i); _grid->StencilSendToRecvFromComplete(reqs[i],i);
} }
commtime+=usecond();
} }
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
// Blocking send and receive. Either sequential or parallel. // Blocking send and receive. Either sequential or parallel.
@ -421,28 +381,27 @@ public:
void Communicate(void) void Communicate(void)
{ {
if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){ if ( CartesianCommunicator::CommunicatorPolicy == CartesianCommunicator::CommunicatorPolicySequential ){
thread_region { /////////////////////////////////////////////////////////
// must be called in parallel region // several way threaded on different communicators.
int mythread = thread_num(); // Cannot combine with Dirichlet operators
int maxthreads= thread_max(); // This scheme is needed on Intel Omnipath for best performance
// Deprecate once there are very few omnipath clusters
/////////////////////////////////////////////////////////
int nthreads = CartesianCommunicator::nCommThreads; int nthreads = CartesianCommunicator::nCommThreads;
assert(nthreads <= maxthreads); int old = GridThread::GetThreads();
if (nthreads == -1) nthreads = 1; GridThread::SetThreads(nthreads);
if (mythread < nthreads) { thread_for(i,Packets.size(),{
for (int i = mythread; i < Packets.size(); i += nthreads) { _grid->StencilSendToRecvFrom(Packets[i].send_buf,
double start = usecond(); Packets[i].to_rank,Packets[i].do_send,
uint64_t bytes= _grid->StencilSendToRecvFrom(Packets[i].send_buf,
Packets[i].to_rank,
Packets[i].recv_buf, Packets[i].recv_buf,
Packets[i].from_rank, Packets[i].from_rank,Packets[i].do_recv,
Packets[i].bytes,i); Packets[i].bytes,i);
comm_bytes_thr[mythread] += bytes; });
shm_bytes_thr[mythread] += Packets[i].bytes - bytes; GridThread::SetThreads(old);
comm_time_thr[mythread] += usecond() - start; } else {
} /////////////////////////////////////////////////////////
} // Concurrent and non-threaded asynch calls to MPI
} /////////////////////////////////////////////////////////
} else { // Concurrent and non-threaded asynch calls to MPI
std::vector<std::vector<CommsRequest_t> > reqs; std::vector<std::vector<CommsRequest_t> > reqs;
this->CommunicateBegin(reqs); this->CommunicateBegin(reqs);
this->CommunicateComplete(reqs); this->CommunicateComplete(reqs);
@ -484,31 +443,23 @@ public:
sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd); sshift[1] = _grid->CheckerBoardShiftForCB(this->_checkerboard,dimension,shift,Odd);
if ( sshift[0] == sshift[1] ) { if ( sshift[0] == sshift[1] ) {
if (splice_dim) { if (splice_dim) {
splicetime-=usecond(); auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx,point);
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
is_same_node = is_same_node && tmp; is_same_node = is_same_node && tmp;
splicetime+=usecond();
} else { } else {
nosplicetime-=usecond(); auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx,point);
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
is_same_node = is_same_node && tmp; is_same_node = is_same_node && tmp;
nosplicetime+=usecond();
} }
} else { } else {
if(splice_dim){ if(splice_dim){
splicetime-=usecond();
// if checkerboard is unfavourable take two passes // if checkerboard is unfavourable take two passes
// both with block stride loop iteration // both with block stride loop iteration
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx); auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx,point);
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx); auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx,point);
is_same_node = is_same_node && tmp1 && tmp2; is_same_node = is_same_node && tmp1 && tmp2;
splicetime+=usecond();
} else { } else {
nosplicetime-=usecond(); auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx,point);
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx); auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx,point);
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
is_same_node = is_same_node && tmp1 && tmp2; is_same_node = is_same_node && tmp1 && tmp2;
nosplicetime+=usecond();
} }
} }
} }
@ -518,13 +469,10 @@ public:
template<class compressor> template<class compressor>
void HaloGather(const Lattice<vobj> &source,compressor &compress) void HaloGather(const Lattice<vobj> &source,compressor &compress)
{ {
mpi3synctime_g-=usecond();
_grid->StencilBarrier();// Synch shared memory on a single nodes _grid->StencilBarrier();// Synch shared memory on a single nodes
mpi3synctime_g+=usecond();
// conformable(source.Grid(),_grid); // conformable(source.Grid(),_grid);
assert(source.Grid()==_grid); assert(source.Grid()==_grid);
halogtime-=usecond();
u_comm_offset=0; u_comm_offset=0;
@ -538,7 +486,6 @@ public:
assert(u_comm_offset==_unified_buffer_size); assert(u_comm_offset==_unified_buffer_size);
accelerator_barrier(); accelerator_barrier();
halogtime+=usecond();
} }
///////////////////////// /////////////////////////
@ -551,14 +498,72 @@ public:
Mergers.resize(0); Mergers.resize(0);
MergersSHM.resize(0); MergersSHM.resize(0);
Packets.resize(0); Packets.resize(0);
calls++; CopyReceiveBuffers.resize(0);
CachedTransfers.resize(0);
} }
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){ void AddCopy(void *from,void * to, Integer bytes)
{
// std::cout << "Adding CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<bytes<<std::endl;
CopyReceiveBuffer obj;
obj.from_p = from;
obj.to_p = to;
obj.bytes= bytes;
CopyReceiveBuffers.push_back(obj);
}
void CommsCopy()
{
// These are device resident MPI buffers.
for(int i=0;i<CopyReceiveBuffers.size();i++){
cobj *from=(cobj *)CopyReceiveBuffers[i].from_p;
cobj *to =(cobj *)CopyReceiveBuffers[i].to_p;
Integer words = CopyReceiveBuffers[i].bytes/sizeof(cobj);
// std::cout << "CopyReceiveBuffer "<<std::hex<<from<<" "<<to<<std::dec<<" "<<words*sizeof(cobj)<<std::endl;
accelerator_forNB(j, words, cobj::Nsimd(), {
coalescedWrite(to[j] ,coalescedRead(from [j]));
});
}
}
Integer CheckForDuplicate(Integer direction, Integer OrthogPlane, Integer DestProc, void *recv_buf,Integer lane,Integer bytes,Integer cb)
{
CachedTransfer obj;
obj.direction = direction;
obj.OrthogPlane = OrthogPlane;
obj.DestProc = DestProc;
obj.recv_buf = recv_buf;
obj.lane = lane;
obj.bytes = bytes;
obj.cb = cb;
for(int i=0;i<CachedTransfers.size();i++){
if ( (CachedTransfers[i].direction ==direction)
&&(CachedTransfers[i].OrthogPlane==OrthogPlane)
&&(CachedTransfers[i].DestProc ==DestProc)
&&(CachedTransfers[i].bytes ==bytes)
&&(CachedTransfers[i].lane ==lane)
&&(CachedTransfers[i].cb ==cb)
){
// std::cout << "Found duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<< " bytes "<<bytes <<std::endl;
AddCopy(CachedTransfers[i].recv_buf,recv_buf,bytes);
return 1;
}
}
// std::cout << "No duplicate plane dir "<<direction<<" plane "<< OrthogPlane<< " simd "<<lane << " relproc "<<DestProc<<" bytes "<<bytes<<std::endl;
CachedTransfers.push_back(obj);
return 0;
}
void AddPacket(void *xmit,void * rcv,
Integer to, Integer do_send,
Integer from, Integer do_recv,
Integer bytes){
Packet p; Packet p;
p.send_buf = xmit; p.send_buf = xmit;
p.recv_buf = rcv; p.recv_buf = rcv;
p.to_rank = to; p.to_rank = to;
p.from_rank= from; p.from_rank= from;
p.do_send = do_send;
p.do_recv = do_recv;
p.bytes = bytes; p.bytes = bytes;
Packets.push_back(p); Packets.push_back(p);
} }
@ -578,22 +583,17 @@ public:
mv.push_back(m); mv.push_back(m);
} }
template<class decompressor> void CommsMerge(decompressor decompress) { template<class decompressor> void CommsMerge(decompressor decompress) {
CommsCopy();
CommsMerge(decompress,Mergers,Decompressions); CommsMerge(decompress,Mergers,Decompressions);
} }
template<class decompressor> void CommsMergeSHM(decompressor decompress) { template<class decompressor> void CommsMergeSHM(decompressor decompress) {
mpi3synctime-=usecond();
_grid->StencilBarrier();// Synch shared memory on a single nodes _grid->StencilBarrier();// Synch shared memory on a single nodes
mpi3synctime+=usecond();
shmmergetime-=usecond();
CommsMerge(decompress,MergersSHM,DecompressionsSHM); CommsMerge(decompress,MergersSHM,DecompressionsSHM);
shmmergetime+=usecond();
} }
template<class decompressor> template<class decompressor>
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) { void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd)
{
mergetime-=usecond();
for(int i=0;i<mm.size();i++){ for(int i=0;i<mm.size();i++){
auto mp = &mm[i].mpointer[0]; auto mp = &mm[i].mpointer[0];
auto vp0= &mm[i].vpointers[0][0]; auto vp0= &mm[i].vpointers[0][0];
@ -603,9 +603,7 @@ public:
decompress.Exchange(mp,vp0,vp1,type,o); decompress.Exchange(mp,vp0,vp1,type,o);
}); });
} }
mergetime+=usecond();
decompresstime-=usecond();
for(int i=0;i<dd.size();i++){ for(int i=0;i<dd.size();i++){
auto kp = dd[i].kernel_p; auto kp = dd[i].kernel_p;
auto mp = dd[i].mpi_p; auto mp = dd[i].mpi_p;
@ -613,7 +611,6 @@ public:
decompress.Decompress(kp,mp,o); decompress.Decompress(kp,mp,o);
}); });
} }
decompresstime+=usecond();
} }
//////////////////////////////////////// ////////////////////////////////////////
// Set up routines // Set up routines
@ -650,19 +647,58 @@ public:
} }
} }
} }
/// Introduce a block structure and switch off comms on boundaries
void DirichletBlock(const Coordinate &dirichlet_block)
{
this->_dirichlet = 1;
for(int ii=0;ii<this->_npoints;ii++){
int dimension = this->_directions[ii];
int displacement = this->_distances[ii];
int shift = displacement;
int gd = _grid->_gdimensions[dimension];
int fd = _grid->_fdimensions[dimension];
int pd = _grid->_processors [dimension];
int ld = gd/pd;
int pc = _grid->_processor_coor[dimension];
///////////////////////////////////////////
// Figure out dirichlet send and receive
// on this leg of stencil.
///////////////////////////////////////////
int comm_dim = _grid->_processors[dimension] >1 ;
int block = dirichlet_block[dimension];
this->_comms_send[ii] = comm_dim;
this->_comms_recv[ii] = comm_dim;
if ( block ) {
assert(abs(displacement) < ld );
if( displacement > 0 ) {
// High side, low side
// | <--B--->|
// | | |
// noR
// noS
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_recv[ii] = 0;
if ( ( ld*pc ) % block == 0 ) this->_comms_send[ii] = 0;
} else {
// High side, low side
// | <--B--->|
// | | |
// noS
// noR
if ( (ld*(pc+1) ) % block == 0 ) this->_comms_send[ii] = 0;
if ( ( ld*pc ) % block == 0 ) this->_comms_recv[ii] = 0;
}
}
}
}
CartesianStencil(GridBase *grid, CartesianStencil(GridBase *grid,
int npoints, int npoints,
int checkerboard, int checkerboard,
const std::vector<int> &directions, const std::vector<int> &directions,
const std::vector<int> &distances, const std::vector<int> &distances,
Parameters p) Parameters p)
: shm_bytes_thr(npoints),
comm_bytes_thr(npoints),
comm_enter_thr(npoints),
comm_leave_thr(npoints),
comm_time_thr(npoints)
{ {
this->_dirichlet = 0;
face_table_computed=0; face_table_computed=0;
_grid = grid; _grid = grid;
this->parameters=p; this->parameters=p;
@ -675,6 +711,8 @@ public:
this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels this->_simd_layout = _grid->_simd_layout; // copy simd_layout to give access to Accelerator Kernels
this->_directions = StencilVector(directions); this->_directions = StencilVector(directions);
this->_distances = StencilVector(distances); this->_distances = StencilVector(distances);
this->_comms_send.resize(npoints);
this->_comms_recv.resize(npoints);
this->same_node.resize(npoints); this->same_node.resize(npoints);
_unified_buffer_size=0; _unified_buffer_size=0;
@ -693,24 +731,27 @@ public:
int displacement = distances[i]; int displacement = distances[i];
int shift = displacement; int shift = displacement;
int gd = _grid->_gdimensions[dimension];
int fd = _grid->_fdimensions[dimension]; int fd = _grid->_fdimensions[dimension];
int pd = _grid->_processors [dimension];
int ld = gd/pd;
int rd = _grid->_rdimensions[dimension]; int rd = _grid->_rdimensions[dimension];
int pc = _grid->_processor_coor[dimension];
this->_permute_type[point]=_grid->PermuteType(dimension); this->_permute_type[point]=_grid->PermuteType(dimension);
this->_checkerboard = checkerboard; this->_checkerboard = checkerboard;
//////////////////////////
// the permute type
//////////////////////////
int simd_layout = _grid->_simd_layout[dimension]; int simd_layout = _grid->_simd_layout[dimension];
int comm_dim = _grid->_processors[dimension] >1 ; int comm_dim = _grid->_processors[dimension] >1 ;
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim); int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
int rotate_dim = _grid->_simd_layout[dimension]>2; int rotate_dim = _grid->_simd_layout[dimension]>2;
this->_comms_send[ii] = comm_dim;
this->_comms_recv[ii] = comm_dim;
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
int sshift[2]; int sshift[2];
////////////////////////// //////////////////////////
// Underlying approach. For each local site build // Underlying approach. For each local site build
// up a table containing the npoint "neighbours" and whether they // up a table containing the npoint "neighbours" and whether they
@ -811,6 +852,7 @@ public:
GridBase *grid=_grid; GridBase *grid=_grid;
const int Nsimd = grid->Nsimd(); const int Nsimd = grid->Nsimd();
int comms_recv = this->_comms_recv[point];
int fd = _grid->_fdimensions[dimension]; int fd = _grid->_fdimensions[dimension];
int ld = _grid->_ldimensions[dimension]; int ld = _grid->_ldimensions[dimension];
int rd = _grid->_rdimensions[dimension]; int rd = _grid->_rdimensions[dimension];
@ -867,7 +909,9 @@ public:
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) { if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
wraparound = 1; wraparound = 1;
} }
if (!offnode) {
// Wrap locally dirichlet support case OR node local
if ( (offnode==0) || (comms_recv==0) ) {
int permute_slice=0; int permute_slice=0;
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound); CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
@ -984,11 +1028,14 @@ public:
} }
template<class compressor> template<class compressor>
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx) int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx, int point)
{ {
typedef typename cobj::vector_type vector_type; typedef typename cobj::vector_type vector_type;
typedef typename cobj::scalar_type scalar_type; typedef typename cobj::scalar_type scalar_type;
int comms_send = this->_comms_send[point] ;
int comms_recv = this->_comms_recv[point] ;
assert(rhs.Grid()==_grid); assert(rhs.Grid()==_grid);
// conformable(_grid,rhs.Grid()); // conformable(_grid,rhs.Grid());
@ -1014,6 +1061,8 @@ public:
if (comm_proc) { if (comm_proc) {
int words = buffer_size; int words = buffer_size;
if (cbmask != 0x3) words=words>>1; if (cbmask != 0x3) words=words>>1;
@ -1045,16 +1094,20 @@ public:
recv_buf=this->u_recv_buf_p; recv_buf=this->u_recv_buf_p;
} }
cobj *send_buf; cobj *send_buf;
send_buf = this->u_send_buf_p; // Gather locally, must send send_buf = this->u_send_buf_p; // Gather locally, must send
//////////////////////////////////////////////////////// ////////////////////////////////////////////////////////
// Gather locally // Gather locally
//////////////////////////////////////////////////////// ////////////////////////////////////////////////////////
gathertime-=usecond();
assert(send_buf!=NULL); assert(send_buf!=NULL);
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++; if ( comms_send )
gathertime+=usecond(); Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so);
face_idx++;
int duplicate = CheckForDuplicate(dimension,sx,comm_proc,(void *)&recv_buf[u_comm_offset],0,bytes,cbmask);
if ( (!duplicate) ) { // Force comms for now
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
// Build a list of things to do after we synchronise GPUs // Build a list of things to do after we synchronise GPUs
@ -1062,11 +1115,12 @@ public:
/////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////
AddPacket((void *)&send_buf[u_comm_offset], AddPacket((void *)&send_buf[u_comm_offset],
(void *)&recv_buf[u_comm_offset], (void *)&recv_buf[u_comm_offset],
xmit_to_rank, xmit_to_rank, comms_send,
recv_from_rank, recv_from_rank, comms_recv,
bytes); bytes);
}
if ( compress.DecompressionStep() ) { if ( compress.DecompressionStep() && comms_recv ) {
AddDecompress(&this->u_recv_buf_p[u_comm_offset], AddDecompress(&this->u_recv_buf_p[u_comm_offset],
&recv_buf[u_comm_offset], &recv_buf[u_comm_offset],
words,Decompressions); words,Decompressions);
@ -1078,11 +1132,15 @@ public:
} }
template<class compressor> template<class compressor>
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx) int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx,int point)
{ {
const int Nsimd = _grid->Nsimd(); const int Nsimd = _grid->Nsimd();
const int maxl =2;// max layout in a direction const int maxl =2;// max layout in a direction
int comms_send = this->_comms_send[point] ;
int comms_recv = this->_comms_recv[point] ;
int fd = _grid->_fdimensions[dimension]; int fd = _grid->_fdimensions[dimension];
int rd = _grid->_rdimensions[dimension]; int rd = _grid->_rdimensions[dimension];
int ld = _grid->_ldimensions[dimension]; int ld = _grid->_ldimensions[dimension];
@ -1147,12 +1205,11 @@ public:
&face_table[face_idx][0], &face_table[face_idx][0],
face_table[face_idx].size()*sizeof(face_table_host[0])); face_table[face_idx].size()*sizeof(face_table_host[0]));
} }
gathermtime-=usecond();
if ( comms_send )
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type); Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type);
face_idx++; face_idx++;
gathermtime+=usecond();
//spointers[0] -- low //spointers[0] -- low
//spointers[1] -- high //spointers[1] -- high
@ -1181,8 +1238,13 @@ public:
rpointers[i] = rp; rpointers[i] = rp;
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes); int duplicate = CheckForDuplicate(dimension,sx,nbr_proc,(void *)rp,i,bytes,cbmask);
if ( (!duplicate) ) { // Force comms for now
AddPacket((void *)sp,(void *)rp,
xmit_to_rank,comms_send,
recv_from_rank,comms_recv,
bytes);
}
} else { } else {
@ -1191,7 +1253,9 @@ public:
} }
} }
if ( comms_recv ) {
AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers); AddMerge(&this->u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
}
u_comm_offset +=buffer_size; u_comm_offset +=buffer_size;
} }

View File

@ -534,7 +534,6 @@ void Grid_init(int *argc,char ***argv)
void Grid_finalize(void) void Grid_finalize(void)
{ {
#if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT) #if defined (GRID_COMMS_MPI) || defined (GRID_COMMS_MPI3) || defined (GRID_COMMS_MPIT)
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize(); MPI_Finalize();
Grid_unquiesce_nodes(); Grid_unquiesce_nodes();
#endif #endif

View File

@ -217,9 +217,9 @@ int main (int argc, char ** argv)
dbytes+= dbytes+=
Grid.StencilSendToRecvFromBegin(requests, Grid.StencilSendToRecvFromBegin(requests,
(void *)&xbuf[mu][0], (void *)&xbuf[mu][0],
xmit_to_rank, xmit_to_rank,1,
(void *)&rbuf[mu][0], (void *)&rbuf[mu][0],
recv_from_rank, recv_from_rank,1,
bytes,mu); bytes,mu);
comm_proc = mpi_layout[mu]-1; comm_proc = mpi_layout[mu]-1;
@ -228,9 +228,9 @@ int main (int argc, char ** argv)
dbytes+= dbytes+=
Grid.StencilSendToRecvFromBegin(requests, Grid.StencilSendToRecvFromBegin(requests,
(void *)&xbuf[mu+4][0], (void *)&xbuf[mu+4][0],
xmit_to_rank, xmit_to_rank,1,
(void *)&rbuf[mu+4][0], (void *)&rbuf[mu+4][0],
recv_from_rank, recv_from_rank,1,
bytes,mu+4); bytes,mu+4);
} }
@ -309,9 +309,9 @@ int main (int argc, char ** argv)
dbytes+= dbytes+=
Grid.StencilSendToRecvFromBegin(requests, Grid.StencilSendToRecvFromBegin(requests,
(void *)&xbuf[mu][0], (void *)&xbuf[mu][0],
xmit_to_rank, xmit_to_rank,1,
(void *)&rbuf[mu][0], (void *)&rbuf[mu][0],
recv_from_rank, recv_from_rank,1,
bytes,mu); bytes,mu);
Grid.StencilSendToRecvFromComplete(requests,mu); Grid.StencilSendToRecvFromComplete(requests,mu);
requests.resize(0); requests.resize(0);
@ -322,9 +322,9 @@ int main (int argc, char ** argv)
dbytes+= dbytes+=
Grid.StencilSendToRecvFromBegin(requests, Grid.StencilSendToRecvFromBegin(requests,
(void *)&xbuf[mu+4][0], (void *)&xbuf[mu+4][0],
xmit_to_rank, xmit_to_rank,1,
(void *)&rbuf[mu+4][0], (void *)&rbuf[mu+4][0],
recv_from_rank, recv_from_rank,1,
bytes,mu+4); bytes,mu+4);
Grid.StencilSendToRecvFromComplete(requests,mu+4); Grid.StencilSendToRecvFromComplete(requests,mu+4);
requests.resize(0); requests.resize(0);
@ -411,8 +411,8 @@ int main (int argc, char ** argv)
Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank); Grid.ShiftedRanks(mu,comm_proc,xmit_to_rank,recv_from_rank);
} }
int tid = omp_get_thread_num(); int tid = omp_get_thread_num();
tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank, tbytes= Grid.StencilSendToRecvFrom((void *)&xbuf[dir][0], xmit_to_rank,1,
(void *)&rbuf[dir][0], recv_from_rank, bytes,tid); (void *)&rbuf[dir][0], recv_from_rank,1, bytes,tid);
thread_critical { dbytes+=tbytes; } thread_critical { dbytes+=tbytes; }
} }

View File

@ -32,10 +32,9 @@
using namespace std; using namespace std;
using namespace Grid; using namespace Grid;
template<class d> ////////////////////////
struct scal { /// Move to domains ////
d internal; ////////////////////////
};
Gamma::Algebra Gmu [] = { Gamma::Algebra Gmu [] = {
Gamma::Algebra::GammaX, Gamma::Algebra::GammaX,
@ -44,6 +43,7 @@ struct scal {
Gamma::Algebra::GammaT Gamma::Algebra::GammaT
}; };
void Benchmark(int Ls, Coordinate Dirichlet);
int main (int argc, char ** argv) int main (int argc, char ** argv)
{ {
@ -52,24 +52,82 @@ int main (int argc, char ** argv)
int threads = GridThread::GetThreads(); int threads = GridThread::GetThreads();
Coordinate latt4 = GridDefaultLatt();
int Ls=16; int Ls=16;
for(int i=0;i<argc;i++) for(int i=0;i<argc;i++) {
if(std::string(argv[i]) == "-Ls"){ if(std::string(argv[i]) == "-Ls"){
std::stringstream ss(argv[i+1]); ss >> Ls; std::stringstream ss(argv[i+1]); ss >> Ls;
} }
}
//////////////////
// With comms
//////////////////
Coordinate Dirichlet(Nd+1,0);
std::cout << "\n\n\n\n\n\n" <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
std::cout << GridLogMessage<< " Testing with full communication " <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
Benchmark(Ls,Dirichlet);
//////////////////
// Domain decomposed
//////////////////
Coordinate latt4 = GridDefaultLatt();
Coordinate mpi = GridDefaultMpi();
Coordinate CommDim(Nd);
Coordinate shm;
GlobalSharedMemory::GetShmDims(mpi,shm);
//////////////////////
// Node level
//////////////////////
std::cout << "\n\n\n\n\n\n" <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
std::cout << GridLogMessage<< " Testing without internode communication " <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
for(int d=0;d<Nd;d++) CommDim[d]= (mpi[d]/shm[d])>1 ? 1 : 0;
Dirichlet[0] = 0;
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0] * shm[0];
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1] * shm[1];
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2] * shm[2];
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3] * shm[3];
Benchmark(Ls,Dirichlet);
std::cout << "\n\n\n\n\n\n" <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
std::cout << GridLogMessage<< " Testing without intranode communication " <<std::endl;
std::cout << GridLogMessage<< "++++++++++++++++++++++++++++++++++++++++++++++++" <<std::endl;
for(int d=0;d<Nd;d++) CommDim[d]= mpi[d]>1 ? 1 : 0;
Dirichlet[0] = 0;
Dirichlet[1] = CommDim[0]*latt4[0]/mpi[0];
Dirichlet[2] = CommDim[1]*latt4[1]/mpi[1];
Dirichlet[3] = CommDim[2]*latt4[2]/mpi[2];
Dirichlet[4] = CommDim[3]*latt4[3]/mpi[3];
Benchmark(Ls,Dirichlet);
Grid_finalize();
exit(0);
}
void Benchmark(int Ls, Coordinate Dirichlet)
{
Coordinate latt4 = GridDefaultLatt();
GridLogLayout(); GridLogLayout();
long unsigned int single_site_flops = 8*Nc*(7+16*Nc); long unsigned int single_site_flops = 8*Nc*(7+16*Nc);
GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi()); GridCartesian * UGrid = SpaceTimeGrid::makeFourDimGrid(GridDefaultLatt(), GridDefaultSimd(Nd,vComplexF::Nsimd()),GridDefaultMpi());
GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid); GridRedBlackCartesian * UrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(UGrid);
GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid); GridCartesian * FGrid = SpaceTimeGrid::makeFiveDimGrid(Ls,UGrid);
GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid); GridRedBlackCartesian * FrbGrid = SpaceTimeGrid::makeFiveDimRedBlackGrid(Ls,UGrid);
std::cout << GridLogMessage << "Making s innermost grids"<<std::endl;
GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi()); GridCartesian * sUGrid = SpaceTimeGrid::makeFourDimDWFGrid(GridDefaultLatt(),GridDefaultMpi());
GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid); GridRedBlackCartesian * sUrbGrid = SpaceTimeGrid::makeFourDimRedBlackGrid(sUGrid);
GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid); GridCartesian * sFGrid = SpaceTimeGrid::makeFiveDimDWFGrid(Ls,UGrid);
@ -80,12 +138,12 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl; std::cout << GridLogMessage << "Initialising 4d RNG" << std::endl;
GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG")); GridParallelRNG RNG4(UGrid); RNG4.SeedUniqueString(std::string("The 4D RNG"));
std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl; std::cout << GridLogMessage << "Initialising 5d RNG" << std::endl;
GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG")); GridParallelRNG RNG5(FGrid); RNG5.SeedUniqueString(std::string("The 5D RNG"));
std::cout << GridLogMessage << "Initialised RNGs" << std::endl;
LatticeFermionF src (FGrid); random(RNG5,src); LatticeFermionF src (FGrid); random(RNG5,src);
#if 0 #if 1
src = Zero(); src = Zero();
{ {
Coordinate origin({0,0,0,latt4[2]-1,0}); Coordinate origin({0,0,0,latt4[2]-1,0});
@ -100,7 +158,6 @@ int main (int argc, char ** argv)
src = src*N2; src = src*N2;
#endif #endif
LatticeFermionF result(FGrid); result=Zero(); LatticeFermionF result(FGrid); result=Zero();
LatticeFermionF ref(FGrid); ref=Zero(); LatticeFermionF ref(FGrid); ref=Zero();
LatticeFermionF tmp(FGrid); LatticeFermionF tmp(FGrid);
@ -108,29 +165,31 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage << "Drawing gauge field" << std::endl; std::cout << GridLogMessage << "Drawing gauge field" << std::endl;
LatticeGaugeFieldF Umu(UGrid); LatticeGaugeFieldF Umu(UGrid);
LatticeGaugeFieldF UmuCopy(UGrid);
SU<Nc>::HotConfiguration(RNG4,Umu); SU<Nc>::HotConfiguration(RNG4,Umu);
UmuCopy=Umu;
std::cout << GridLogMessage << "Random gauge initialised " << std::endl; std::cout << GridLogMessage << "Random gauge initialised " << std::endl;
#if 0
Umu=1.0; ////////////////////////////////////
for(int mu=0;mu<Nd;mu++){ // Apply BCs
LatticeColourMatrixF ttmp(UGrid); ////////////////////////////////////
ttmp = PeekIndex<LorentzIndex>(Umu,mu); Coordinate Block(4);
// if (mu !=2 ) ttmp = 0; for(int d=0;d<4;d++) Block[d]= Dirichlet[d+1];
// ttmp = ttmp* pow(10.0,mu);
PokeIndex<LorentzIndex>(Umu,ttmp,mu); std::cout << GridLogMessage << "Applying BCs for Dirichlet Block5 " << Dirichlet << std::endl;
} std::cout << GridLogMessage << "Applying BCs for Dirichlet Block4 " << Block << std::endl;
std::cout << GridLogMessage << "Forced to diagonal " << std::endl;
#endif DirichletFilter<LatticeGaugeFieldF> Filter(Block);
Filter.applyFilter(Umu);
//////////////////////////////////// ////////////////////////////////////
// Naive wilson implementation // Naive wilson implementation
//////////////////////////////////// ////////////////////////////////////
// replicate across fifth dimension
// LatticeGaugeFieldF Umu5d(FGrid);
std::vector<LatticeColourMatrixF> U(4,UGrid); std::vector<LatticeColourMatrixF> U(4,UGrid);
for(int mu=0;mu<Nd;mu++){ for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu); U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
} }
std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl; std::cout << GridLogMessage << "Setting up Cshift based reference " << std::endl;
if (1) if (1)
@ -191,11 +250,13 @@ int main (int argc, char ** argv)
std::cout << GridLogMessage<< "*****************************************************************" <<std::endl; std::cout << GridLogMessage<< "*****************************************************************" <<std::endl;
DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5); DomainWallFermionF Dw(Umu,*FGrid,*FrbGrid,*UGrid,*UrbGrid,mass,M5);
Dw.DirichletBlock(Dirichlet);
Dw.ImportGauge(Umu);
int ncall =300; int ncall =300;
if (1) { if (1) {
FGrid->Barrier(); FGrid->Barrier();
Dw.ZeroCounters();
Dw.Dhop(src,result,0); Dw.Dhop(src,result,0);
std::cout<<GridLogMessage<<"Called warmup"<<std::endl; std::cout<<GridLogMessage<<"Called warmup"<<std::endl;
double t0=usecond(); double t0=usecond();
@ -220,29 +281,20 @@ int main (int argc, char ** argv)
double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.); double data_mem = (volume * (2*Nd+1)*Nd*Nc + (volume/Ls) *2*Nd*Nc*Nc) * simdwidth / nsimd * ncall / (1024.*1024.*1024.);
std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl; std::cout<<GridLogMessage << "Called Dw "<<ncall<<" times in "<<t1-t0<<" us"<<std::endl;
// std::cout<<GridLogMessage << "norm result "<< norm2(result)<<std::endl;
// std::cout<<GridLogMessage << "norm ref "<< norm2(ref)<<std::endl;
std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "mflop/s = "<< flops/(t1-t0)<<std::endl;
std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl; std::cout<<GridLogMessage << "mflop/s per rank = "<< flops/(t1-t0)/NP<<std::endl;
std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl; std::cout<<GridLogMessage << "mflop/s per node = "<< flops/(t1-t0)/NN<<std::endl;
std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl; // std::cout<<GridLogMessage << "RF GiB/s (base 2) = "<< 1000000. * data_rf/((t1-t0))<<std::endl;
std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl; // std::cout<<GridLogMessage << "mem GiB/s (base 2) = "<< 1000000. * data_mem/((t1-t0))<<std::endl;
err = ref-result; err = ref-result;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl; std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
//exit(0);
if(( norm2(err)>1.0e-4) ) { if(( norm2(err)>1.0e-4) ) {
/*
std::cout << "RESULT\n " << result<<std::endl;
std::cout << "REF \n " << ref <<std::endl;
std::cout << "ERR \n " << err <<std::endl;
*/
std::cout<<GridLogMessage << "WRONG RESULT" << std::endl; std::cout<<GridLogMessage << "WRONG RESULT" << std::endl;
FGrid->Barrier(); FGrid->Barrier();
exit(-1); exit(-1);
} }
assert (norm2(err)< 1.0e-4 ); assert (norm2(err)< 1.0e-4 );
Dw.Report();
} }
if (1) if (1)
@ -286,21 +338,20 @@ int main (int argc, char ** argv)
} }
ref = -0.5*ref; ref = -0.5*ref;
} }
// dump=1;
Dw.Dhop(src,result,1); Dw.Dhop(src,result,DaggerYes);
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl; std::cout << GridLogMessage << "Compare to naive wilson implementation Dag to verify correctness" << std::endl;
std::cout << GridLogMessage << "----------------------------------------------------------------" << std::endl;
std::cout<<GridLogMessage << "Called DwDag"<<std::endl; std::cout<<GridLogMessage << "Called DwDag"<<std::endl;
std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl; std::cout<<GridLogMessage << "norm dag result "<< norm2(result)<<std::endl;
std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl; std::cout<<GridLogMessage << "norm dag ref "<< norm2(ref)<<std::endl;
err = ref-result; err = ref-result;
std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl; std::cout<<GridLogMessage << "norm dag diff "<< norm2(err)<<std::endl;
if((norm2(err)>1.0e-4)){ assert((norm2(err)<1.0e-4));
/*
std::cout<< "DAG RESULT\n " <<ref << std::endl;
std::cout<< "DAG sRESULT\n " <<result << std::endl;
std::cout<< "DAG ERR \n " << err <<std::endl;
*/
}
LatticeFermionF src_e (FrbGrid); LatticeFermionF src_e (FrbGrid);
LatticeFermionF src_o (FrbGrid); LatticeFermionF src_o (FrbGrid);
LatticeFermionF r_e (FrbGrid); LatticeFermionF r_e (FrbGrid);
@ -330,7 +381,6 @@ int main (int argc, char ** argv)
if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl; if ( WilsonKernelsStatic::Opt == WilsonKernelsStatic::OptInlineAsm ) std::cout << GridLogMessage<< "* Using Asm Nc=3 WilsonKernels" <<std::endl;
std::cout << GridLogMessage<< "*********************************************************" <<std::endl; std::cout << GridLogMessage<< "*********************************************************" <<std::endl;
{ {
Dw.ZeroCounters();
FGrid->Barrier(); FGrid->Barrier();
Dw.DhopEO(src_o,r_e,DaggerNo); Dw.DhopEO(src_o,r_e,DaggerNo);
double t0=usecond(); double t0=usecond();
@ -352,7 +402,6 @@ int main (int argc, char ** argv)
std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl; std::cout<<GridLogMessage << "Deo mflop/s = "<< flops/(t1-t0)<<std::endl;
std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl; std::cout<<GridLogMessage << "Deo mflop/s per rank "<< flops/(t1-t0)/NP<<std::endl;
std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl; std::cout<<GridLogMessage << "Deo mflop/s per node "<< flops/(t1-t0)/NN<<std::endl;
Dw.Report();
} }
Dw.DhopEO(src_o,r_e,DaggerNo); Dw.DhopEO(src_o,r_e,DaggerNo);
Dw.DhopOE(src_e,r_o,DaggerNo); Dw.DhopOE(src_e,r_o,DaggerNo);
@ -367,13 +416,7 @@ int main (int argc, char ** argv)
err = r_eo-result; err = r_eo-result;
std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl; std::cout<<GridLogMessage << "norm diff "<< norm2(err)<<std::endl;
if((norm2(err)>1.0e-4)){ assert(norm2(err)<1.0e-4);
/*
std::cout<< "Deo RESULT\n " <<r_eo << std::endl;
std::cout<< "Deo REF\n " <<result << std::endl;
std::cout<< "Deo ERR \n " << err <<std::endl;
*/
}
pickCheckerboard(Even,src_e,err); pickCheckerboard(Even,src_e,err);
pickCheckerboard(Odd,src_o,err); pickCheckerboard(Odd,src_o,err);
@ -382,6 +425,4 @@ int main (int argc, char ** argv)
assert(norm2(src_e)<1.0e-4); assert(norm2(src_e)<1.0e-4);
assert(norm2(src_o)<1.0e-4); assert(norm2(src_o)<1.0e-4);
Grid_finalize();
exit(0);
} }

View File

@ -1,5 +1,5 @@
module load PrgEnv-gnu module load PrgEnv-gnu
module load rocm/4.3.0 module load rocm/4.5.0
module load gmp module load gmp
module load cray-fftw module load cray-fftw
module load craype-accel-amd-gfx908 module load craype-accel-amd-gfx908

View File

@ -1,25 +1,25 @@
tu-c0r0n00 - 0 device=0 binding=--interleave=0,1 tu-c0r3n00 - 0 device=0 binding=--interleave=0,1
tu-c0r0n00 - 1 device=1 binding=--interleave=2,3 tu-c0r3n00 - 1 device=1 binding=--interleave=2,3
tu-c0r0n09 - 1 device=1 binding=--interleave=2,3 tu-c0r3n00 - 2 device=2 binding=--interleave=4,5
tu-c0r0n00 - 2 device=2 binding=--interleave=4,5 tu-c0r3n00 - 3 device=3 binding=--interleave=6,7
tu-c0r0n06 - 0 device=0 binding=--interleave=0,1 tu-c0r3n06 - 1 device=1 binding=--interleave=2,3
tu-c0r0n06 - 1 device=1 binding=--interleave=2,3 tu-c0r3n06 - 3 device=3 binding=--interleave=6,7
tu-c0r0n09 - 0 device=0 binding=--interleave=0,1 tu-c0r3n06 - 0 device=0 binding=--interleave=0,1
tu-c0r0n09 - 2 device=2 binding=--interleave=4,5 tu-c0r3n06 - 2 device=2 binding=--interleave=4,5
tu-c0r0n03 - 1 device=1 binding=--interleave=2,3 tu-c0r3n03 - 1 device=1 binding=--interleave=2,3
tu-c0r0n06 - 2 device=2 binding=--interleave=4,5 tu-c0r3n03 - 2 device=2 binding=--interleave=4,5
tu-c0r0n09 - 3 device=3 binding=--interleave=6,7 tu-c0r3n03 - 0 device=0 binding=--interleave=0,1
tu-c0r0n00 - 3 device=3 binding=--interleave=6,7 tu-c0r3n03 - 3 device=3 binding=--interleave=6,7
tu-c0r0n03 - 0 device=0 binding=--interleave=0,1 tu-c0r3n09 - 0 device=0 binding=--interleave=0,1
tu-c0r0n03 - 2 device=2 binding=--interleave=4,5 tu-c0r3n09 - 1 device=1 binding=--interleave=2,3
tu-c0r0n06 - 3 device=3 binding=--interleave=6,7 tu-c0r3n09 - 2 device=2 binding=--interleave=4,5
tu-c0r0n03 - 3 device=3 binding=--interleave=6,7 tu-c0r3n09 - 3 device=3 binding=--interleave=6,7
OPENMPI detected OPENMPI detected
AcceleratorCudaInit: using default device AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no AcceleratorCudaInit: Configure options --enable-setdevice=no
AcceleratorCudaInit: ================================================
OPENMPI detected OPENMPI detected
AcceleratorCudaInit[0]: ======================== AcceleratorCudaInit[0]: ========================
AcceleratorCudaInit[0]: Device Number : 0 AcceleratorCudaInit[0]: Device Number : 0
@ -33,11 +33,41 @@ AcceleratorCudaInit[0]: pciBusID: 3
AcceleratorCudaInit[0]: pciDeviceID: 0 AcceleratorCudaInit[0]: pciDeviceID: 0
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535) AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
AcceleratorCudaInit: using default device AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no AcceleratorCudaInit: Configure options --enable-setdevice=no
AcceleratorCudaInit: ================================================
OPENMPI detected OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-setdevice=no
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-setdevice=no
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-setdevice=no
OPENMPI detected
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-setdevice=no
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-setdevice=no
AcceleratorCudaInit[0]: ======================== AcceleratorCudaInit[0]: ========================
AcceleratorCudaInit[0]: Device Number : 0 AcceleratorCudaInit[0]: Device Number : 0
AcceleratorCudaInit[0]: ======================== AcceleratorCudaInit[0]: ========================
@ -50,43 +80,25 @@ AcceleratorCudaInit[0]: pciBusID: 3
AcceleratorCudaInit[0]: pciDeviceID: 0 AcceleratorCudaInit[0]: pciDeviceID: 0
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535) AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
AcceleratorCudaInit: using default device AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or AcceleratorCudaInit: assume user either uses
AcceleratorCudaInit: a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no AcceleratorCudaInit: Configure options --enable-setdevice=no
local rank 1 device 0 bus id: 0000:44:00.0
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
OPENMPI detected local rank 0 device 0 bus id: 0000:03:00.0
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
OPENMPI detected
AcceleratorCudaInit: using default device
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
AcceleratorCudaInit: ================================================ AcceleratorCudaInit: ================================================
local rank 0 device 0 bus id: 0000:03:00.0
AcceleratorCudaInit: ================================================
AcceleratorCudaInit: ================================================
local rank 2 device 0 bus id: 0000:84:00.0
SharedMemoryMpi: World communicator of size 16 SharedMemoryMpi: World communicator of size 16
SharedMemoryMpi: Node communicator of size 4 SharedMemoryMpi: Node communicator of size 4
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x7fcd80000000 for comms buffers 0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x153960000000 for comms buffers
Setting up IPC Setting up IPC
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__ __|__|__|__|__|__|__|__|__|__|__|__|__|__|__
@ -116,7 +128,7 @@ This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. GNU General Public License for more details.
Current Grid git commit hash=9d2238148c56e3fbadfa95dcabf2b83d4bde14cd: (HEAD -> develop) uncommited changes Current Grid git commit hash=da06d15f73184ceb15d66d4e7e702b02fed7b940: (HEAD -> feature/dirichlet, develop) uncommited changes
Grid : Message : ================================================ Grid : Message : ================================================
Grid : Message : MPI is initialised and logging filters activated Grid : Message : MPI is initialised and logging filters activated
@ -124,122 +136,102 @@ Grid : Message : ================================================
Grid : Message : Requested 2147483648 byte stencil comms buffers Grid : Message : Requested 2147483648 byte stencil comms buffers
Grid : Message : MemoryManager Cache 34004218675 bytes Grid : Message : MemoryManager Cache 34004218675 bytes
Grid : Message : MemoryManager::Init() setting up Grid : Message : MemoryManager::Init() setting up
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8 Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 8 LARGE 2
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
Grid : Message : MemoryManager::Init() Using cudaMalloc Grid : Message : MemoryManager::Init() Using cudaMalloc
Grid : Message : 1.198523 s : Grid Layout Grid : Message : 1.875883 s : Grid Layout
Grid : Message : 1.198530 s : Global lattice size : 64 64 64 64 Grid : Message : 1.875893 s : Global lattice size : 64 64 64 64
Grid : Message : 1.198534 s : OpenMP threads : 4 Grid : Message : 1.875897 s : OpenMP threads : 4
Grid : Message : 1.198535 s : MPI tasks : 2 2 2 2 Grid : Message : 1.875898 s : MPI tasks : 2 2 2 2
Grid : Message : 1.397615 s : Making s innermost grids Grid : Message : 1.993571 s : Initialising 4d RNG
Grid : Message : 1.441828 s : Initialising 4d RNG Grid : Message : 2.881990 s : Intialising parallel RNG with unique string 'The 4D RNG'
Grid : Message : 1.547973 s : Intialising parallel RNG with unique string 'The 4D RNG' Grid : Message : 2.882370 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
Grid : Message : 1.547998 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1 Grid : Message : 2.495044 s : Initialising 5d RNG
Grid : Message : 1.954777 s : Initialising 5d RNG Grid : Message : 4.120900 s : Intialising parallel RNG with unique string 'The 5D RNG'
Grid : Message : 3.633825 s : Intialising parallel RNG with unique string 'The 5D RNG' Grid : Message : 4.121350 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
Grid : Message : 3.633869 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a Grid : Message : 15.268010 s : Drawing gauge field
Grid : Message : 12.162710 s : Initialised RNGs Grid : Message : 16.234025 s : Random gauge initialised
Grid : Message : 15.882520 s : Drawing gauge field Grid : Message : 16.234057 s : Applying BCs
Grid : Message : 15.816362 s : Random gauge initialised Grid : Message : 16.365565 s : Setting up Cshift based reference
Grid : Message : 17.279671 s : Setting up Cshift based reference Grid : Message : 44.512418 s : *****************************************************************
Grid : Message : 26.331426 s : ***************************************************************** Grid : Message : 44.512448 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
Grid : Message : 26.331452 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm Grid : Message : 44.512450 s : *****************************************************************
Grid : Message : 26.331454 s : ***************************************************************** Grid : Message : 44.512451 s : *****************************************************************
Grid : Message : 26.331456 s : ***************************************************************** Grid : Message : 44.512452 s : * Benchmarking DomainWallFermionR::Dhop
Grid : Message : 26.331458 s : * Benchmarking DomainWallFermionR::Dhop Grid : Message : 44.512453 s : * Vectorising space-time by 8
Grid : Message : 26.331459 s : * Vectorising space-time by 8 Grid : Message : 44.512454 s : * VComplexF size is 64 B
Grid : Message : 26.331463 s : * VComplexF size is 64 B Grid : Message : 44.512456 s : * SINGLE precision
Grid : Message : 26.331465 s : * SINGLE precision Grid : Message : 44.512459 s : * Using Overlapped Comms/Compute
Grid : Message : 26.331467 s : * Using Overlapped Comms/Compute Grid : Message : 44.512460 s : * Using GENERIC Nc WilsonKernels
Grid : Message : 26.331468 s : * Using GENERIC Nc WilsonKernels Grid : Message : 44.512461 s : *****************************************************************
Grid : Message : 26.331469 s : ***************************************************************** Grid : Message : 46.389070 s : Called warmup
Grid : Message : 28.413717 s : Called warmup Grid : Message : 49.211265 s : Called Dw 300 times in 2.82203e+06 us
Grid : Message : 56.418423 s : Called Dw 3000 times in 2.80047e+07 us Grid : Message : 49.211295 s : mflop/s = 3.76681e+07
Grid : Message : 56.418476 s : mflop/s = 3.79581e+07 Grid : Message : 49.211297 s : mflop/s per rank = 2.35425e+06
Grid : Message : 56.418479 s : mflop/s per rank = 2.37238e+06 Grid : Message : 49.211299 s : mflop/s per node = 9.41702e+06
Grid : Message : 56.418481 s : mflop/s per node = 9.48953e+06 Grid : Message : 49.211301 s : RF GiB/s (base 2) = 76540.6
Grid : Message : 56.418483 s : RF GiB/s (base 2) = 77130 Grid : Message : 49.211308 s : mem GiB/s (base 2) = 47837.9
Grid : Message : 56.418485 s : mem GiB/s (base 2) = 48206.3 Grid : Message : 49.214868 s : norm diff 1.06409e-13
Grid : Message : 56.422076 s : norm diff 1.03481e-13 Grid : Message : 92.647781 s : Compare to naive wilson implementation Dag to verify correctness
Grid : Message : 56.456894 s : #### Dhop calls report Grid : Message : 92.647816 s : Called DwDag
Grid : Message : 56.456899 s : WilsonFermion5D Number of DhopEO Calls : 6002 Grid : Message : 92.647817 s : norm dag result 12.0421
Grid : Message : 56.456903 s : WilsonFermion5D TotalTime /Calls : 4710.93 us Grid : Message : 92.801806 s : norm dag ref 12.0421
Grid : Message : 56.456905 s : WilsonFermion5D CommTime /Calls : 3196.15 us Grid : Message : 92.817724 s : norm dag diff 7.21921e-14
Grid : Message : 56.456908 s : WilsonFermion5D FaceTime /Calls : 494.392 us Grid : Message : 92.858973 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
Grid : Message : 56.456910 s : WilsonFermion5D ComputeTime1/Calls : 44.4107 us Grid : Message : 93.210378 s : src_e0.499997
Grid : Message : 56.456912 s : WilsonFermion5D ComputeTime2/Calls : 1037.75 us Grid : Message : 93.583286 s : src_o0.500003
Grid : Message : 56.456921 s : Average mflops/s per call : 3.55691e+09 Grid : Message : 93.682468 s : *********************************************************
Grid : Message : 56.456925 s : Average mflops/s per call per rank : 2.22307e+08 Grid : Message : 93.682471 s : * Benchmarking DomainWallFermionF::DhopEO
Grid : Message : 56.456928 s : Average mflops/s per call per node : 8.89228e+08 Grid : Message : 93.682472 s : * Vectorising space-time by 8
Grid : Message : 56.456930 s : Average mflops/s per call (full) : 3.82915e+07 Grid : Message : 93.682473 s : * SINGLE precision
Grid : Message : 56.456933 s : Average mflops/s per call per rank (full): 2.39322e+06 Grid : Message : 93.682475 s : * Using Overlapped Comms/Compute
Grid : Message : 56.456952 s : Average mflops/s per call per node (full): 9.57287e+06 Grid : Message : 93.682476 s : * Using GENERIC Nc WilsonKernels
Grid : Message : 56.456954 s : WilsonFermion5D Stencil Grid : Message : 93.682477 s : *********************************************************
Grid : Message : 56.457016 s : Stencil calls 3001 Grid : Message : 95.162342 s : Deo mflop/s = 3.92487e+07
Grid : Message : 56.457022 s : Stencil halogtime 0 Grid : Message : 95.162387 s : Deo mflop/s per rank 2.45305e+06
Grid : Message : 56.457024 s : Stencil gathertime 55.9154 Grid : Message : 95.162389 s : Deo mflop/s per node 9.81219e+06
Grid : Message : 56.457026 s : Stencil gathermtime 20.1073 Grid : Message : 95.232801 s : r_e6.02111
Grid : Message : 56.457028 s : Stencil mergetime 18.5585 Grid : Message : 95.240061 s : r_o6.02102
Grid : Message : 56.457030 s : Stencil decompresstime 0.0639787 Grid : Message : 95.245975 s : res12.0421
Grid : Message : 56.457032 s : Stencil comms_bytes 4.02653e+08 Grid : Message : 95.833402 s : norm diff 0
Grid : Message : 56.457034 s : Stencil commtime 6379.93 Grid : Message : 96.573829 s : norm diff even 0
Grid : Message : 56.457036 s : Stencil 63.1124 GB/s per rank Grid : Message : 96.868272 s : norm diff odd 0
Grid : Message : 56.457038 s : Stencil 252.45 GB/s per node Dirichlet block [0 64 64 32 32]
Grid : Message : 56.457040 s : WilsonFermion5D StencilEven Grid : Message : 97.756909 s : Grid Layout
Grid : Message : 56.457048 s : WilsonFermion5D StencilOdd Grid : Message : 97.756911 s : Global lattice size : 64 64 64 64
Grid : Message : 56.457062 s : WilsonFermion5D Stencil Reporti() Grid : Message : 97.756921 s : OpenMP threads : 4
Grid : Message : 56.457065 s : WilsonFermion5D StencilEven Reporti() Grid : Message : 97.756922 s : MPI tasks : 2 2 2 2
Grid : Message : 56.457066 s : WilsonFermion5D StencilOdd Reporti() Grid : Message : 97.897085 s : Initialising 4d RNG
Grid : Message : 79.259261 s : Compare to naive wilson implementation Dag to verify correctness Grid : Message : 97.965061 s : Intialising parallel RNG with unique string 'The 4D RNG'
Grid : Message : 79.259287 s : Called DwDag Grid : Message : 97.965097 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
Grid : Message : 79.259288 s : norm dag result 12.0421 Grid : Message : 98.367431 s : Initialising 5d RNG
Grid : Message : 79.271740 s : norm dag ref 12.0421 Grid : Message : 99.752745 s : Intialising parallel RNG with unique string 'The 5D RNG'
Grid : Message : 79.287759 s : norm dag diff 7.63236e-14 Grid : Message : 99.752790 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
Grid : Message : 79.328100 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec Grid : Message : 111.290148 s : Drawing gauge field
Grid : Message : 79.955951 s : src_e0.499997 Grid : Message : 112.349289 s : Random gauge initialised
Grid : Message : 80.633620 s : src_o0.500003 Grid : Message : 112.349320 s : Applying BCs
Grid : Message : 80.164163 s : ********************************************************* Grid : Message : 113.948740 s : Setting up Cshift based reference
Grid : Message : 80.164168 s : * Benchmarking DomainWallFermionF::DhopEO Grid : Message : 140.320415 s : *****************************************************************
Grid : Message : 80.164170 s : * Vectorising space-time by 8 Grid : Message : 140.320443 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
Grid : Message : 80.164172 s : * SINGLE precision Grid : Message : 140.320444 s : *****************************************************************
Grid : Message : 80.164174 s : * Using Overlapped Comms/Compute Grid : Message : 140.320445 s : *****************************************************************
Grid : Message : 80.164177 s : * Using GENERIC Nc WilsonKernels Grid : Message : 140.320446 s : * Benchmarking DomainWallFermionR::Dhop
Grid : Message : 80.164178 s : ********************************************************* Grid : Message : 140.320447 s : * Vectorising space-time by 8
Grid : Message : 93.797635 s : Deo mflop/s = 3.93231e+07 Grid : Message : 140.320448 s : * VComplexF size is 64 B
Grid : Message : 93.797670 s : Deo mflop/s per rank 2.45769e+06 Grid : Message : 140.320450 s : * SINGLE precision
Grid : Message : 93.797672 s : Deo mflop/s per node 9.83077e+06 Grid : Message : 140.320451 s : * Using Overlapped Comms/Compute
Grid : Message : 93.797674 s : #### Dhop calls report Grid : Message : 140.320452 s : * Using GENERIC Nc WilsonKernels
Grid : Message : 93.797675 s : WilsonFermion5D Number of DhopEO Calls : 3001 Grid : Message : 140.320453 s : *****************************************************************
Grid : Message : 93.797677 s : WilsonFermion5D TotalTime /Calls : 4542.83 us Grid : Message : 142.296150 s : Called warmup
Grid : Message : 93.797679 s : WilsonFermion5D CommTime /Calls : 2978.97 us Grid : Message : 144.397678 s : Called Dw 300 times in 2.36719e+06 us
Grid : Message : 93.797681 s : WilsonFermion5D FaceTime /Calls : 602.287 us Grid : Message : 144.397700 s : mflop/s = 4.49058e+07
Grid : Message : 93.797683 s : WilsonFermion5D ComputeTime1/Calls : 67.1416 us Grid : Message : 144.397702 s : mflop/s per rank = 2.80661e+06
Grid : Message : 93.797685 s : WilsonFermion5D ComputeTime2/Calls : 1004.07 us Grid : Message : 144.397704 s : mflop/s per node = 1.12265e+07
Grid : Message : 93.797713 s : Average mflops/s per call : 3.30731e+09 Grid : Message : 144.397706 s : RF GiB/s (base 2) = 91247.6
Grid : Message : 93.797717 s : Average mflops/s per call per rank : 2.06707e+08 Grid : Message : 144.397708 s : mem GiB/s (base 2) = 57029.7
Grid : Message : 93.797719 s : Average mflops/s per call per node : 8.26827e+08 Grid : Message : 144.401269 s : norm diff 9.78944e-14
Grid : Message : 93.797721 s : Average mflops/s per call (full) : 3.97084e+07 Grid : Message : 186.885460 s : Compare to naive wilson implementation Dag to verify correctness
Grid : Message : 93.797727 s : Average mflops/s per call per rank (full): 2.48178e+06 Grid : Message : 186.885492 s : Called DwDag
Grid : Message : 93.797732 s : Average mflops/s per call per node (full): 9.92711e+06 Grid : Message : 186.885493 s : norm dag result 10.4157
Grid : Message : 93.797735 s : WilsonFermion5D Stencil Grid : Message : 186.897154 s : norm dag ref 11.2266
Grid : Message : 93.797746 s : WilsonFermion5D StencilEven Grid : Message : 186.912538 s : norm dag diff 0.484633
Grid : Message : 93.797758 s : WilsonFermion5D StencilOdd
Grid : Message : 93.797769 s : Stencil calls 3001
Grid : Message : 93.797773 s : Stencil halogtime 0
Grid : Message : 93.797776 s : Stencil gathertime 56.7458
Grid : Message : 93.797780 s : Stencil gathermtime 22.6504
Grid : Message : 93.797782 s : Stencil mergetime 21.1913
Grid : Message : 93.797786 s : Stencil decompresstime 0.0556481
Grid : Message : 93.797788 s : Stencil comms_bytes 2.01327e+08
Grid : Message : 93.797791 s : Stencil commtime 2989.33
Grid : Message : 93.797795 s : Stencil 67.3484 GB/s per rank
Grid : Message : 93.797798 s : Stencil 269.394 GB/s per node
Grid : Message : 93.797801 s : WilsonFermion5D Stencil Reporti()
Grid : Message : 93.797803 s : WilsonFermion5D StencilEven Reporti()
Grid : Message : 93.797805 s : WilsonFermion5D StencilOdd Reporti()
Grid : Message : 93.873429 s : r_e6.02111
Grid : Message : 93.879931 s : r_o6.02102
Grid : Message : 93.885912 s : res12.0421
Grid : Message : 94.876555 s : norm diff 0
Grid : Message : 95.485643 s : norm diff even 0
Grid : Message : 95.581236 s : norm diff odd 0

View File

@ -1,14 +1,13 @@
#!/bin/bash #!/bin/bash
#SBATCH -J dslash #SBATCH -J dslash
#SBATCH -A tc002 #SBATCH -A dp207
#SBATCH -t 2:20:00
#SBATCH --nodelist=tu-c0r0n[00,03,06,09]
#SBATCH --exclusive #SBATCH --exclusive
#SBATCH --nodes=4 #SBATCH --nodes=4
#SBATCH --ntasks=16 #SBATCH --ntasks=16
#SBATCH --qos=standard
#SBATCH --ntasks-per-node=4 #SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=8 #SBATCH --cpus-per-task=8
#SBATCH --time=12:00:00 #SBATCH --time=0:05:00
#SBATCH --partition=gpu #SBATCH --partition=gpu
#SBATCH --gres=gpu:4 #SBATCH --gres=gpu:4
#SBATCH --output=%x.%j.out #SBATCH --output=%x.%j.out