2017-02-07 06:20:39 +00:00
|
|
|
/*************************************************************************************
|
2016-06-03 11:24:26 +01:00
|
|
|
|
|
|
|
Grid physics library, www.github.com/paboyle/Grid
|
|
|
|
|
|
|
|
Source file: ./lib/Stencil.h
|
|
|
|
|
|
|
|
Copyright (C) 2015
|
|
|
|
|
|
|
|
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along
|
|
|
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
|
|
|
|
See the full license in the file "LICENSE" in the top level distribution directory
|
2018-01-13 00:11:53 +00:00
|
|
|
*************************************************************************************/
|
|
|
|
/* END LEGAL */
|
2017-02-07 06:20:39 +00:00
|
|
|
#ifndef GRID_STENCIL_H
|
|
|
|
#define GRID_STENCIL_H
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2017-04-20 13:16:55 +01:00
|
|
|
#include <Grid/stencil/SimpleCompressor.h> // subdir aggregate
|
2017-02-07 06:20:39 +00:00
|
|
|
#include <Grid/stencil/Lebesgue.h> // subdir aggregate
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2018-01-13 00:11:53 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Must not lose sight that goal is to be able to construct really efficient
|
|
|
|
// gather to a point stencil code. CSHIFT is not the best way, so need
|
|
|
|
// additional stencil support.
|
|
|
|
//
|
|
|
|
// Stencil based code will exchange haloes and use a table lookup for neighbours.
|
|
|
|
// This will be done with generality to allow easier efficient implementations.
|
|
|
|
// Overlap of comms and compute is enabled by tabulating off-node connected,
|
|
|
|
//
|
|
|
|
// Generic services
|
|
|
|
// 0) Prebuild neighbour tables
|
|
|
|
// 1) Compute sizes of all haloes/comms buffers; allocate them.
|
|
|
|
// 2) Gather all faces, and communicate.
|
|
|
|
// 3) Loop over result sites, giving nbr index/offnode info for each
|
|
|
|
//
|
|
|
|
//////////////////////////////////////////////////////////////////////////////////////////
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2018-01-13 00:11:30 +00:00
|
|
|
NAMESPACE_BEGIN(Grid);
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2017-02-20 22:51:31 +00:00
|
|
|
///////////////////////////////////////////////////////////////////
|
|
|
|
// Gather for when there *is* need to SIMD split with compression
|
|
|
|
///////////////////////////////////////////////////////////////////
|
|
|
|
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
|
2018-01-13 00:11:53 +00:00
|
|
|
int off,std::vector<std::pair<int,int> > & table);
|
2016-10-13 22:28:50 +01:00
|
|
|
|
2016-12-27 17:45:19 +00:00
|
|
|
template<class vobj,class cobj,class compressor>
|
|
|
|
void Gather_plane_simple_table (std::vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline));
|
2016-10-18 09:51:37 +01:00
|
|
|
|
2016-12-27 17:45:19 +00:00
|
|
|
template<class vobj,class cobj,class compressor>
|
|
|
|
void Gather_plane_simple_table (std::vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
|
2016-10-18 09:51:37 +01:00
|
|
|
{
|
2016-12-27 17:45:19 +00:00
|
|
|
int num=table.size();
|
2018-01-28 01:01:14 +00:00
|
|
|
thread_loop( (int i=0;i<num;i++), {
|
2018-01-26 23:08:45 +00:00
|
|
|
compress.Compress(&buffer[off],table[i].first,rhs[so+table[i].second]);
|
2018-01-28 01:01:14 +00:00
|
|
|
});
|
2016-10-18 09:51:37 +01:00
|
|
|
}
|
|
|
|
|
2017-02-20 22:51:31 +00:00
|
|
|
///////////////////////////////////////////////////////////////////
|
|
|
|
// Gather for when there *is* need to SIMD split with compression
|
|
|
|
///////////////////////////////////////////////////////////////////
|
|
|
|
template<class cobj,class vobj,class compressor>
|
|
|
|
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
|
|
|
|
std::vector<cobj *> pointers,int dimension,int plane,int cbmask,compressor &compress,int type) __attribute__((noinline));
|
|
|
|
|
|
|
|
template<class cobj,class vobj,class compressor>
|
|
|
|
void Gather_plane_exchange_table(std::vector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,
|
|
|
|
std::vector<cobj *> pointers,int dimension,int plane,int cbmask,
|
|
|
|
compressor &compress,int type)
|
|
|
|
{
|
|
|
|
assert( (table.size()&0x1)==0);
|
|
|
|
int num=table.size()/2;
|
2018-01-27 00:04:12 +00:00
|
|
|
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
2018-01-28 01:01:14 +00:00
|
|
|
thread_loop( (int j=0;j<num;j++), {
|
2018-01-26 23:08:45 +00:00
|
|
|
compress.CompressExchange(&pointers[0][0],&pointers[1][0],&rhs[0],
|
2017-04-20 09:28:27 +01:00
|
|
|
j,so+table[2*j].second,so+table[2*j+1].second,type);
|
2018-01-28 01:01:14 +00:00
|
|
|
});
|
2017-02-20 22:51:31 +00:00
|
|
|
}
|
|
|
|
|
2018-01-13 00:11:53 +00:00
|
|
|
struct StencilEntry {
|
|
|
|
uint64_t _offset;
|
|
|
|
uint64_t _byte_offset;
|
|
|
|
uint16_t _is_local;
|
|
|
|
uint16_t _permute;
|
|
|
|
uint16_t _around_the_world; //256 bits, 32 bytes, 1/2 cacheline
|
|
|
|
uint16_t _pad;
|
|
|
|
};
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
////////////////////////////////////////
|
|
|
|
// The Stencil Class itself
|
|
|
|
////////////////////////////////////////
|
2016-10-22 18:14:27 +01:00
|
|
|
template<class vobj,class cobj>
|
|
|
|
class CartesianStencil { // Stencil runs along coordinate axes only; NO diagonal fill in.
|
2018-01-13 00:11:53 +00:00
|
|
|
public:
|
2016-10-22 18:14:27 +01:00
|
|
|
|
|
|
|
typedef typename cobj::vector_type vector_type;
|
|
|
|
typedef typename cobj::scalar_type scalar_type;
|
|
|
|
typedef typename cobj::scalar_object scalar_object;
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
///////////////////////////////////////////
|
|
|
|
// Helper structs
|
|
|
|
///////////////////////////////////////////
|
|
|
|
struct Packet {
|
|
|
|
void * send_buf;
|
|
|
|
void * recv_buf;
|
|
|
|
Integer to_rank;
|
|
|
|
Integer from_rank;
|
|
|
|
Integer bytes;
|
|
|
|
};
|
|
|
|
struct Merge {
|
|
|
|
cobj * mpointer;
|
|
|
|
std::vector<scalar_object *> rpointers;
|
|
|
|
std::vector<cobj *> vpointers;
|
|
|
|
Integer buffer_size;
|
|
|
|
Integer type;
|
|
|
|
};
|
|
|
|
struct Decompress {
|
|
|
|
cobj * kernel_p;
|
|
|
|
cobj * mpi_p;
|
|
|
|
Integer buffer_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
////////////////////////////////////////
|
|
|
|
// Basic Grid and stencil info
|
|
|
|
////////////////////////////////////////
|
|
|
|
int face_table_computed;
|
|
|
|
std::vector<std::vector<std::pair<int,int> > > face_table ;
|
|
|
|
|
|
|
|
|
|
|
|
int _checkerboard;
|
|
|
|
int _npoints; // Move to template param?
|
2018-01-27 00:04:12 +00:00
|
|
|
protected:
|
2017-04-22 11:33:50 +01:00
|
|
|
GridBase * _grid;
|
2018-01-27 00:04:12 +00:00
|
|
|
public:
|
|
|
|
GridBase *Grid(void) const { return _grid; }
|
2017-04-22 11:33:50 +01:00
|
|
|
// npoints of these
|
|
|
|
std::vector<int> _directions;
|
|
|
|
std::vector<int> _distances;
|
|
|
|
std::vector<int> _comm_buf_size;
|
|
|
|
std::vector<int> _permute_type;
|
|
|
|
|
|
|
|
Vector<StencilEntry> _entries;
|
|
|
|
std::vector<Packet> Packets;
|
|
|
|
std::vector<Merge> Mergers;
|
|
|
|
std::vector<Merge> MergersSHM;
|
|
|
|
std::vector<Decompress> Decompressions;
|
|
|
|
std::vector<Decompress> DecompressionsSHM;
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////
|
|
|
|
// Unified Comms buffers for all directions
|
|
|
|
///////////////////////////////////////////////////////////
|
|
|
|
// Vectors that live on the symmetric heap in case of SHMEM
|
|
|
|
// These are used; either SHM objects or refs to the above symmetric heap vectors
|
|
|
|
// depending on comms target
|
|
|
|
cobj* u_recv_buf_p;
|
|
|
|
cobj* u_send_buf_p;
|
|
|
|
std::vector<cobj *> u_simd_send_buf;
|
|
|
|
std::vector<cobj *> u_simd_recv_buf;
|
|
|
|
|
|
|
|
int u_comm_offset;
|
|
|
|
int _unified_buffer_size;
|
|
|
|
cobj *CommBuf(void) { return u_recv_buf_p; }
|
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
/////////////////////////////////////////
|
|
|
|
// Timing info; ugly; possibly temporary
|
|
|
|
/////////////////////////////////////////
|
|
|
|
double commtime;
|
2017-08-25 09:25:54 +01:00
|
|
|
double mpi3synctime;
|
|
|
|
double mpi3synctime_g;
|
|
|
|
double shmmergetime;
|
2017-04-20 09:28:27 +01:00
|
|
|
double gathertime;
|
|
|
|
double gathermtime;
|
|
|
|
double halogtime;
|
|
|
|
double mergetime;
|
|
|
|
double decompresstime;
|
|
|
|
double comms_bytes;
|
|
|
|
double splicetime;
|
|
|
|
double nosplicetime;
|
|
|
|
double calls;
|
2017-08-25 09:25:54 +01:00
|
|
|
std::vector<double> comm_bytes_thr;
|
|
|
|
std::vector<double> comm_time_thr;
|
|
|
|
std::vector<double> comm_enter_thr;
|
|
|
|
std::vector<double> comm_leave_thr;
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
////////////////////////////////////////
|
|
|
|
// Stencil query
|
|
|
|
////////////////////////////////////////
|
2017-04-24 17:06:15 +01:00
|
|
|
inline int SameNode(int point) {
|
|
|
|
|
|
|
|
int dimension = _directions[point];
|
|
|
|
int displacement = _distances[point];
|
|
|
|
assert( (displacement==1) || (displacement==-1));
|
|
|
|
|
|
|
|
int pd = _grid->_processors[dimension];
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int ld = _grid->_ldimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
|
|
|
|
int recv_from_rank;
|
|
|
|
int xmit_to_rank;
|
|
|
|
|
|
|
|
if ( ! comm_dim ) return 1;
|
|
|
|
|
|
|
|
int nbr_proc;
|
|
|
|
if (displacement==1) nbr_proc = 1;
|
|
|
|
else nbr_proc = pd-1;
|
|
|
|
|
|
|
|
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
|
|
|
|
|
|
|
void *shm = (void *) _grid->ShmBufferTranslate(recv_from_rank,u_recv_buf_p);
|
|
|
|
|
|
|
|
if ( shm==NULL ) return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
inline int GetNodeLocal(int osite,int point) {
|
2017-04-25 08:50:26 +01:00
|
|
|
return _entries[point+_npoints*osite]._is_local;
|
2017-04-22 19:28:37 +01:00
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
inline StencilEntry * GetEntry(int &ptype,int point,int osite) {
|
|
|
|
ptype = _permute_type[point]; return & _entries[point+_npoints*osite];
|
|
|
|
}
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
inline uint64_t GetInfo(int &ptype,int &local,int &perm,int point,int ent,uint64_t base) {
|
|
|
|
uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
|
|
|
|
local = _entries[ent]._is_local;
|
|
|
|
perm = _entries[ent]._permute;
|
|
|
|
if (perm) ptype = _permute_type[point];
|
|
|
|
if (local) {
|
|
|
|
return base + _entries[ent]._byte_offset;
|
|
|
|
} else {
|
|
|
|
return cbase + _entries[ent]._byte_offset;
|
|
|
|
}
|
|
|
|
}
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
inline uint64_t GetPFInfo(int ent,uint64_t base) {
|
|
|
|
uint64_t cbase = (uint64_t)&u_recv_buf_p[0];
|
|
|
|
int local = _entries[ent]._is_local;
|
|
|
|
if (local) return base + _entries[ent]._byte_offset;
|
|
|
|
else return cbase + _entries[ent]._byte_offset;
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
//////////////////////////////////////////
|
|
|
|
// Comms packet queue for asynch thread
|
|
|
|
//////////////////////////////////////////
|
2017-06-24 23:10:24 +01:00
|
|
|
void CommunicateThreaded()
|
2016-10-22 18:14:27 +01:00
|
|
|
{
|
2017-08-20 01:10:50 +01:00
|
|
|
#ifdef GRID_OMP
|
2017-07-29 18:06:53 +01:00
|
|
|
// must be called in parallel region
|
|
|
|
int mythread = omp_get_thread_num();
|
|
|
|
int nthreads = CartesianCommunicator::nCommThreads;
|
2017-08-20 01:10:50 +01:00
|
|
|
#else
|
|
|
|
int mythread = 0;
|
|
|
|
int nthreads = 1;
|
|
|
|
#endif
|
|
|
|
if (nthreads == -1) nthreads = 1;
|
2017-07-29 18:06:53 +01:00
|
|
|
if (mythread < nthreads) {
|
2017-08-25 09:25:54 +01:00
|
|
|
comm_enter_thr[mythread] = usecond();
|
2017-07-29 18:06:53 +01:00
|
|
|
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
|
|
|
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
|
|
|
Packets[i].to_rank,
|
|
|
|
Packets[i].recv_buf,
|
|
|
|
Packets[i].from_rank,
|
|
|
|
Packets[i].bytes,i);
|
2017-08-25 09:25:54 +01:00
|
|
|
comm_bytes_thr[mythread] += bytes;
|
2017-06-24 23:10:24 +01:00
|
|
|
}
|
2017-08-25 09:25:54 +01:00
|
|
|
comm_leave_thr[mythread]= usecond();
|
|
|
|
comm_time_thr[mythread] += comm_leave_thr[mythread] - comm_enter_thr[mythread];
|
2017-06-24 23:10:24 +01:00
|
|
|
}
|
|
|
|
}
|
2017-08-25 09:25:54 +01:00
|
|
|
|
|
|
|
void CollateThreads(void)
|
|
|
|
{
|
|
|
|
int nthreads = CartesianCommunicator::nCommThreads;
|
|
|
|
double first=0.0;
|
|
|
|
double last =0.0;
|
|
|
|
|
|
|
|
for(int t=0;t<nthreads;t++) {
|
|
|
|
|
|
|
|
double t0 = comm_enter_thr[t];
|
|
|
|
double t1 = comm_leave_thr[t];
|
|
|
|
comms_bytes+=comm_bytes_thr[t];
|
|
|
|
|
|
|
|
comm_enter_thr[t] = 0.0;
|
|
|
|
comm_leave_thr[t] = 0.0;
|
|
|
|
comm_time_thr[t] = 0.0;
|
|
|
|
comm_bytes_thr[t]=0;
|
|
|
|
|
|
|
|
if ( first == 0.0 ) first = t0; // first is t0
|
|
|
|
if ( (t0 > 0.0) && ( t0 < first ) ) first = t0; // min time seen
|
|
|
|
|
|
|
|
if ( t1 > last ) last = t1; // max time seen
|
|
|
|
|
|
|
|
}
|
|
|
|
commtime+= last-first;
|
|
|
|
}
|
2017-06-24 23:10:24 +01:00
|
|
|
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
|
|
|
|
{
|
|
|
|
reqs.resize(Packets.size());
|
|
|
|
commtime-=usecond();
|
|
|
|
for(int i=0;i<Packets.size();i++){
|
|
|
|
comms_bytes+=_grid->StencilSendToRecvFromBegin(reqs[i],
|
|
|
|
Packets[i].send_buf,
|
|
|
|
Packets[i].to_rank,
|
|
|
|
Packets[i].recv_buf,
|
|
|
|
Packets[i].from_rank,
|
|
|
|
Packets[i].bytes,i);
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
|
|
|
|
{
|
2017-02-21 10:24:27 +00:00
|
|
|
for(int i=0;i<Packets.size();i++){
|
2017-06-24 23:10:24 +01:00
|
|
|
_grid->StencilSendToRecvFromComplete(reqs[i],i);
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
|
|
|
commtime+=usecond();
|
|
|
|
}
|
2017-08-25 09:25:54 +01:00
|
|
|
void Communicate(void)
|
|
|
|
{
|
2018-01-28 01:01:14 +00:00
|
|
|
thread_region
|
2017-08-25 09:25:54 +01:00
|
|
|
{
|
|
|
|
// must be called in parallel region
|
2018-01-28 01:01:14 +00:00
|
|
|
int mythread = thread_num();
|
|
|
|
int maxthreads= thread_max();
|
2017-08-25 09:25:54 +01:00
|
|
|
int nthreads = CartesianCommunicator::nCommThreads;
|
|
|
|
assert(nthreads <= maxthreads);
|
|
|
|
if (nthreads == -1) nthreads = 1;
|
|
|
|
if (mythread < nthreads) {
|
|
|
|
for (int i = mythread; i < Packets.size(); i += nthreads) {
|
|
|
|
double start = usecond();
|
|
|
|
comm_bytes_thr[mythread] += _grid->StencilSendToRecvFrom(Packets[i].send_buf,
|
|
|
|
Packets[i].to_rank,
|
|
|
|
Packets[i].recv_buf,
|
|
|
|
Packets[i].from_rank,
|
|
|
|
Packets[i].bytes,i);
|
|
|
|
comm_time_thr[mythread] += usecond() - start;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
template<class compressor> void HaloExchange(const Lattice<vobj> &source,compressor &compress)
|
|
|
|
{
|
|
|
|
std::vector<std::vector<CommsRequest_t> > reqs;
|
|
|
|
Prepare();
|
|
|
|
HaloGather(source,compress);
|
2017-08-25 09:25:54 +01:00
|
|
|
// Concurrent
|
2017-08-25 11:41:01 +01:00
|
|
|
//CommunicateBegin(reqs);
|
|
|
|
//CommunicateComplete(reqs);
|
|
|
|
// Sequential, possibly threaded
|
|
|
|
Communicate();
|
2017-04-22 11:33:50 +01:00
|
|
|
CommsMergeSHM(compress);
|
|
|
|
CommsMerge(compress);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class compressor> int HaloGatherDir(const Lattice<vobj> &source,compressor &compress,int point,int & face_idx)
|
|
|
|
{
|
|
|
|
int dimension = _directions[point];
|
|
|
|
int displacement = _distances[point];
|
2017-05-05 17:36:43 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
|
|
|
|
// Map to always positive shift modulo global full dimension.
|
|
|
|
int shift = (displacement+fd)%fd;
|
|
|
|
|
2018-01-26 23:08:45 +00:00
|
|
|
assert (source.Checkerboard()== _checkerboard);
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
// the permute type
|
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
|
|
|
|
|
|
|
int same_node = 1;
|
|
|
|
// Gather phase
|
|
|
|
int sshift [2];
|
|
|
|
if ( comm_dim ) {
|
|
|
|
sshift[0] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Even);
|
|
|
|
sshift[1] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Odd);
|
|
|
|
if ( sshift[0] == sshift[1] ) {
|
|
|
|
if (splice_dim) {
|
|
|
|
splicetime-=usecond();
|
2017-09-16 12:51:26 +01:00
|
|
|
auto tmp = GatherSimd(source,dimension,shift,0x3,compress,face_idx);
|
|
|
|
same_node = same_node && tmp;
|
2017-04-22 11:33:50 +01:00
|
|
|
splicetime+=usecond();
|
|
|
|
} else {
|
|
|
|
nosplicetime-=usecond();
|
2017-09-16 12:51:26 +01:00
|
|
|
auto tmp = Gather(source,dimension,shift,0x3,compress,face_idx);
|
|
|
|
same_node = same_node && tmp;
|
2017-04-22 11:33:50 +01:00
|
|
|
nosplicetime+=usecond();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if(splice_dim){
|
|
|
|
splicetime-=usecond();
|
|
|
|
// if checkerboard is unfavourable take two passes
|
|
|
|
// both with block stride loop iteration
|
2017-09-16 12:51:26 +01:00
|
|
|
auto tmp1 = GatherSimd(source,dimension,shift,0x1,compress,face_idx);
|
|
|
|
auto tmp2 = GatherSimd(source,dimension,shift,0x2,compress,face_idx);
|
|
|
|
same_node = same_node && tmp1 && tmp2;
|
2017-04-22 11:33:50 +01:00
|
|
|
splicetime+=usecond();
|
|
|
|
} else {
|
|
|
|
nosplicetime-=usecond();
|
2017-09-16 12:51:26 +01:00
|
|
|
auto tmp1 = Gather(source,dimension,shift,0x1,compress,face_idx);
|
|
|
|
auto tmp2 = Gather(source,dimension,shift,0x2,compress,face_idx);
|
|
|
|
same_node = same_node && tmp1 && tmp2;
|
2017-04-22 11:33:50 +01:00
|
|
|
nosplicetime+=usecond();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return same_node;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class compressor>
|
|
|
|
void HaloGather(const Lattice<vobj> &source,compressor &compress)
|
|
|
|
{
|
2017-08-25 09:25:54 +01:00
|
|
|
mpi3synctime_g-=usecond();
|
2017-04-22 11:33:50 +01:00
|
|
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
2017-08-25 09:25:54 +01:00
|
|
|
mpi3synctime_g+=usecond();
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2018-01-27 00:04:12 +00:00
|
|
|
// conformable(source.Grid(),_grid);
|
|
|
|
assert(source.Grid()==_grid);
|
2017-04-22 11:33:50 +01:00
|
|
|
halogtime-=usecond();
|
|
|
|
|
|
|
|
u_comm_offset=0;
|
|
|
|
|
|
|
|
// Gather all comms buffers
|
|
|
|
int face_idx=0;
|
|
|
|
for(int point = 0 ; point < _npoints; point++) {
|
|
|
|
compress.Point(point);
|
|
|
|
HaloGatherDir(source,compress,point,face_idx);
|
|
|
|
}
|
|
|
|
face_table_computed=1;
|
|
|
|
|
|
|
|
assert(u_comm_offset==_unified_buffer_size);
|
|
|
|
halogtime+=usecond();
|
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////
|
|
|
|
// Implementation
|
|
|
|
/////////////////////////
|
2017-04-20 09:28:27 +01:00
|
|
|
void Prepare(void)
|
|
|
|
{
|
|
|
|
Decompressions.resize(0);
|
2017-04-22 11:33:50 +01:00
|
|
|
DecompressionsSHM.resize(0);
|
2017-04-20 09:28:27 +01:00
|
|
|
Mergers.resize(0);
|
2017-04-22 11:33:50 +01:00
|
|
|
MergersSHM.resize(0);
|
2017-04-20 09:28:27 +01:00
|
|
|
Packets.resize(0);
|
|
|
|
calls++;
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
void AddPacket(void *xmit,void * rcv, Integer to,Integer from,Integer bytes){
|
|
|
|
Packet p;
|
|
|
|
p.send_buf = xmit;
|
|
|
|
p.recv_buf = rcv;
|
|
|
|
p.to_rank = to;
|
|
|
|
p.from_rank= from;
|
|
|
|
p.bytes = bytes;
|
|
|
|
Packets.push_back(p);
|
|
|
|
}
|
|
|
|
void AddDecompress(cobj *k_p,cobj *m_p,Integer buffer_size,std::vector<Decompress> &dv) {
|
2017-04-20 09:28:27 +01:00
|
|
|
Decompress d;
|
|
|
|
d.kernel_p = k_p;
|
|
|
|
d.mpi_p = m_p;
|
|
|
|
d.buffer_size = buffer_size;
|
2017-04-22 11:33:50 +01:00
|
|
|
dv.push_back(d);
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
void AddMerge(cobj *merge_p,std::vector<cobj *> &rpointers,Integer buffer_size,Integer type,std::vector<Merge> &mv) {
|
2017-02-15 11:11:04 +00:00
|
|
|
Merge m;
|
|
|
|
m.type = type;
|
|
|
|
m.mpointer = merge_p;
|
|
|
|
m.vpointers= rpointers;
|
|
|
|
m.buffer_size = buffer_size;
|
2017-04-22 11:33:50 +01:00
|
|
|
mv.push_back(m);
|
2017-02-15 11:11:04 +00:00
|
|
|
}
|
2017-04-22 19:28:37 +01:00
|
|
|
template<class decompressor> void CommsMerge(decompressor decompress) {
|
|
|
|
CommsMerge(decompress,Mergers,Decompressions);
|
|
|
|
}
|
|
|
|
template<class decompressor> void CommsMergeSHM(decompressor decompress) {
|
2017-08-25 09:25:54 +01:00
|
|
|
mpi3synctime-=usecond();
|
2017-04-22 19:28:37 +01:00
|
|
|
_grid->StencilBarrier();// Synch shared memory on a single nodes
|
2017-08-25 09:25:54 +01:00
|
|
|
mpi3synctime+=usecond();
|
|
|
|
shmmergetime-=usecond();
|
2017-04-22 19:28:37 +01:00
|
|
|
CommsMerge(decompress,MergersSHM,DecompressionsSHM);
|
2017-08-25 09:25:54 +01:00
|
|
|
shmmergetime+=usecond();
|
2017-04-22 19:28:37 +01:00
|
|
|
}
|
2017-02-15 11:11:04 +00:00
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
template<class decompressor>
|
2017-04-22 11:33:50 +01:00
|
|
|
void CommsMerge(decompressor decompress,std::vector<Merge> &mm,std::vector<Decompress> &dd) {
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
for(int i=0;i<mm.size();i++){
|
2016-10-22 18:14:27 +01:00
|
|
|
mergetime-=usecond();
|
2018-01-28 01:01:14 +00:00
|
|
|
thread_loop( (int o=0;o<mm[i].buffer_size/2;o++), {
|
2017-04-22 11:33:50 +01:00
|
|
|
decompress.Exchange(mm[i].mpointer,
|
|
|
|
mm[i].vpointers[0],
|
|
|
|
mm[i].vpointers[1],
|
|
|
|
mm[i].type,o);
|
2018-01-28 01:01:14 +00:00
|
|
|
});
|
2016-10-22 18:14:27 +01:00
|
|
|
mergetime+=usecond();
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
for(int i=0;i<dd.size();i++){
|
2017-04-20 09:28:27 +01:00
|
|
|
decompresstime-=usecond();
|
2018-01-28 01:01:14 +00:00
|
|
|
thread_loop( (int o=0;o<dd[i].buffer_size;o++),{
|
2017-04-22 11:33:50 +01:00
|
|
|
decompress.Decompress(dd[i].kernel_p,dd[i].mpi_p,o);
|
2018-01-28 01:01:14 +00:00
|
|
|
});
|
2017-04-20 09:28:27 +01:00
|
|
|
decompresstime+=usecond();
|
|
|
|
}
|
2016-10-21 22:44:10 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
}
|
2016-10-22 18:14:27 +01:00
|
|
|
////////////////////////////////////////
|
2017-04-22 11:33:50 +01:00
|
|
|
// Set up routines
|
2016-10-22 18:14:27 +01:00
|
|
|
////////////////////////////////////////
|
|
|
|
void PrecomputeByteOffsets(void){
|
|
|
|
for(int i=0;i<_entries.size();i++){
|
|
|
|
if( _entries[i]._is_local ) {
|
|
|
|
_entries[i]._byte_offset = _entries[i]._offset*sizeof(vobj);
|
|
|
|
} else {
|
|
|
|
_entries[i]._byte_offset = _entries[i]._offset*sizeof(cobj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2018-01-13 00:11:53 +00:00
|
|
|
CartesianStencil(GridBase *grid,
|
|
|
|
int npoints,
|
|
|
|
int checkerboard,
|
|
|
|
const std::vector<int> &directions,
|
|
|
|
const std::vector<int> &distances)
|
|
|
|
: _permute_type(npoints),
|
|
|
|
_comm_buf_size(npoints),
|
|
|
|
comm_bytes_thr(npoints),
|
|
|
|
comm_enter_thr(npoints),
|
|
|
|
comm_leave_thr(npoints),
|
|
|
|
comm_time_thr(npoints)
|
2016-10-22 18:14:27 +01:00
|
|
|
{
|
|
|
|
face_table_computed=0;
|
|
|
|
_npoints = npoints;
|
|
|
|
_grid = grid;
|
|
|
|
_directions = directions;
|
|
|
|
_distances = distances;
|
|
|
|
_unified_buffer_size=0;
|
|
|
|
|
|
|
|
int osites = _grid->oSites();
|
|
|
|
|
|
|
|
_entries.resize(_npoints* osites);
|
|
|
|
for(int ii=0;ii<npoints;ii++){
|
|
|
|
|
|
|
|
int i = ii; // reverse direction to get SIMD comms done first
|
|
|
|
int point = i;
|
|
|
|
|
|
|
|
int dimension = directions[i];
|
|
|
|
int displacement = distances[i];
|
|
|
|
int shift = displacement;
|
|
|
|
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
_permute_type[point]=_grid->PermuteType(dimension);
|
|
|
|
|
|
|
|
_checkerboard = checkerboard;
|
|
|
|
|
2017-02-07 06:20:39 +00:00
|
|
|
//////////////////////////
|
2016-10-22 18:14:27 +01:00
|
|
|
// the permute type
|
2017-02-07 06:20:39 +00:00
|
|
|
//////////////////////////
|
2016-10-22 18:14:27 +01:00
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
int splice_dim = _grid->_simd_layout[dimension]>1 && (comm_dim);
|
|
|
|
int rotate_dim = _grid->_simd_layout[dimension]>2;
|
|
|
|
|
|
|
|
assert ( (rotate_dim && comm_dim) == false) ; // Do not think spread out is supported
|
|
|
|
|
|
|
|
int sshift[2];
|
|
|
|
|
2017-02-07 06:20:39 +00:00
|
|
|
//////////////////////////
|
2016-10-22 18:14:27 +01:00
|
|
|
// Underlying approach. For each local site build
|
|
|
|
// up a table containing the npoint "neighbours" and whether they
|
|
|
|
// live in lattice or a comms buffer.
|
2017-02-07 06:20:39 +00:00
|
|
|
//////////////////////////
|
2016-10-22 18:14:27 +01:00
|
|
|
if ( !comm_dim ) {
|
|
|
|
sshift[0] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Even);
|
|
|
|
sshift[1] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Odd);
|
|
|
|
|
|
|
|
if ( sshift[0] == sshift[1] ) {
|
|
|
|
Local(point,dimension,shift,0x3);
|
|
|
|
} else {
|
|
|
|
Local(point,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
|
|
|
|
Local(point,dimension,shift,0x2);// both with block stride loop iteration
|
|
|
|
}
|
2017-02-07 06:20:39 +00:00
|
|
|
} else {
|
|
|
|
// All permute extract done in comms phase prior to Stencil application
|
2016-10-22 18:14:27 +01:00
|
|
|
// So tables are the same whether comm_dim or splice_dim
|
|
|
|
sshift[0] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Even);
|
|
|
|
sshift[1] = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,Odd);
|
|
|
|
if ( sshift[0] == sshift[1] ) {
|
|
|
|
Comms(point,dimension,shift,0x3);
|
|
|
|
} else {
|
|
|
|
Comms(point,dimension,shift,0x1);// if checkerboard is unfavourable take two passes
|
|
|
|
Comms(point,dimension,shift,0x2);// both with block stride loop iteration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-03 11:24:26 +01:00
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
/////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Try to allocate for receiving in a shared memory region, fall back to buffer
|
|
|
|
/////////////////////////////////////////////////////////////////////////////////
|
2016-10-23 06:10:05 +01:00
|
|
|
const int Nsimd = grid->Nsimd();
|
|
|
|
|
2016-10-24 17:30:43 +01:00
|
|
|
_grid->ShmBufferFreeAll();
|
2016-10-23 06:10:05 +01:00
|
|
|
|
|
|
|
u_simd_send_buf.resize(Nsimd);
|
|
|
|
u_simd_recv_buf.resize(Nsimd);
|
2016-10-24 17:30:43 +01:00
|
|
|
u_send_buf_p=(cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj));
|
|
|
|
u_recv_buf_p=(cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj));
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2017-02-20 22:51:31 +00:00
|
|
|
for(int l=0;l<2;l++){
|
2017-04-20 09:28:27 +01:00
|
|
|
u_simd_recv_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj));
|
|
|
|
u_simd_send_buf[l] = (cobj *)_grid->ShmBufferMalloc(_unified_buffer_size*sizeof(cobj));
|
2017-02-20 22:51:31 +00:00
|
|
|
}
|
2016-01-10 19:20:16 +00:00
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
PrecomputeByteOffsets();
|
|
|
|
}
|
2016-01-10 19:20:16 +00:00
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
void Local (int point, int dimension,int shiftpm,int cbmask)
|
|
|
|
{
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
int ld = _grid->_ldimensions[dimension];
|
|
|
|
int gd = _grid->_gdimensions[dimension];
|
|
|
|
int ly = _grid->_simd_layout[dimension];
|
|
|
|
|
|
|
|
// Map to always positive shift modulo global full dimension.
|
|
|
|
int shift = (shiftpm+fd)%fd;
|
|
|
|
|
|
|
|
// the permute type
|
|
|
|
int permute_dim =_grid->PermuteDim(dimension);
|
|
|
|
|
|
|
|
for(int x=0;x<rd;x++){
|
|
|
|
|
2018-01-24 13:12:14 +00:00
|
|
|
// int o = 0;
|
2016-10-22 18:14:27 +01:00
|
|
|
int bo = x * _grid->_ostride[dimension];
|
|
|
|
|
|
|
|
int cb= (cbmask==0x2)? Odd : Even;
|
|
|
|
|
|
|
|
int sshift = _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,cb);
|
|
|
|
int sx = (x+sshift)%rd;
|
|
|
|
|
|
|
|
int wraparound=0;
|
|
|
|
if ( (shiftpm==-1) && (sx>x) ) {
|
|
|
|
wraparound = 1;
|
|
|
|
}
|
|
|
|
if ( (shiftpm== 1) && (sx<x) ) {
|
|
|
|
wraparound = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int permute_slice=0;
|
|
|
|
if(permute_dim){
|
|
|
|
int wrap = sshift/rd;
|
|
|
|
int num = sshift%rd;
|
|
|
|
if ( x< rd-num ) permute_slice=wrap;
|
|
|
|
else permute_slice = (wrap+1)%ly;
|
|
|
|
}
|
|
|
|
|
|
|
|
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Comms (int point,int dimension,int shiftpm,int cbmask)
|
|
|
|
{
|
|
|
|
GridBase *grid=_grid;
|
|
|
|
const int Nsimd = grid->Nsimd();
|
|
|
|
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int ld = _grid->_ldimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
int pd = _grid->_processors[dimension];
|
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
|
|
|
|
assert(comm_dim==1);
|
|
|
|
int shift = (shiftpm + fd) %fd;
|
|
|
|
assert(shift>=0);
|
|
|
|
assert(shift<fd);
|
|
|
|
|
2017-02-07 06:20:39 +00:00
|
|
|
// done in reduced dims, so SIMD factored
|
|
|
|
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
_comm_buf_size[point] = buffer_size; // Size of _one_ plane. Multiple planes may be gathered and
|
2017-02-07 06:20:39 +00:00
|
|
|
|
2016-10-22 18:14:27 +01:00
|
|
|
// send to one or more remote nodes.
|
|
|
|
|
|
|
|
int cb= (cbmask==0x2)? Odd : Even;
|
|
|
|
int sshift= _grid->CheckerBoardShiftForCB(_checkerboard,dimension,shift,cb);
|
|
|
|
|
|
|
|
for(int x=0;x<rd;x++){
|
|
|
|
|
|
|
|
int permute_type=grid->PermuteType(dimension);
|
|
|
|
|
|
|
|
int sx = (x+sshift)%rd;
|
|
|
|
|
|
|
|
int offnode = 0;
|
|
|
|
if ( simd_layout > 1 ) {
|
|
|
|
|
|
|
|
for(int i=0;i<Nsimd;i++){
|
|
|
|
|
|
|
|
int inner_bit = (Nsimd>>(permute_type+1));
|
|
|
|
int ic= (i&inner_bit)? 1:0;
|
|
|
|
int my_coor = rd*ic + x;
|
|
|
|
int nbr_coor = my_coor+sshift;
|
|
|
|
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
|
|
|
|
|
|
|
if ( nbr_proc ) {
|
|
|
|
offnode =1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
int comm_proc = ((x+sshift)/rd)%pd;
|
|
|
|
offnode = (comm_proc!= 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int wraparound=0;
|
|
|
|
if ( (shiftpm==-1) && (sx>x) && (grid->_processor_coor[dimension]==0) ) {
|
|
|
|
wraparound = 1;
|
|
|
|
}
|
|
|
|
if ( (shiftpm== 1) && (sx<x) && (grid->_processor_coor[dimension]==grid->_processors[dimension]-1) ) {
|
|
|
|
wraparound = 1;
|
|
|
|
}
|
|
|
|
if (!offnode) {
|
|
|
|
|
|
|
|
int permute_slice=0;
|
|
|
|
CopyPlane(point,dimension,x,sx,cbmask,permute_slice,wraparound);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
int words = buffer_size;
|
|
|
|
if (cbmask != 0x3) words=words>>1;
|
|
|
|
|
2018-01-24 13:12:14 +00:00
|
|
|
// int rank = grid->_processor;
|
|
|
|
// int recv_from_rank;
|
|
|
|
// int xmit_to_rank;
|
2016-10-22 18:14:27 +01:00
|
|
|
|
|
|
|
int unified_buffer_offset = _unified_buffer_size;
|
|
|
|
_unified_buffer_size += words;
|
|
|
|
|
|
|
|
ScatterPlane(point,dimension,x,cbmask,unified_buffer_offset,wraparound); // permute/extract/merge is done in comms phase
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Routine builds up integer table for each site in _offsets, _is_local, _permute
|
|
|
|
void CopyPlane(int point, int dimension,int lplane,int rplane,int cbmask,int permute,int wrap)
|
|
|
|
{
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
|
|
|
|
if ( !_grid->CheckerBoarded(dimension) ) {
|
|
|
|
|
|
|
|
int o = 0; // relative offset to base within plane
|
|
|
|
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
|
|
|
int lo = lplane*_grid->_ostride[dimension]; // offset in buffer
|
|
|
|
|
|
|
|
// Simple block stride gather of SIMD objects
|
|
|
|
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
|
|
|
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
|
|
|
int idx=point+(lo+o+b)*_npoints;
|
|
|
|
_entries[idx]._offset =ro+o+b;
|
|
|
|
_entries[idx]._permute=permute;
|
|
|
|
_entries[idx]._is_local=1;
|
|
|
|
_entries[idx]._around_the_world=wrap;
|
|
|
|
}
|
|
|
|
o +=_grid->_slice_stride[dimension];
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
int ro = rplane*_grid->_ostride[dimension]; // base offset for start of plane
|
|
|
|
int lo = lplane*_grid->_ostride[dimension]; // base offset for start of plane
|
|
|
|
int o = 0; // relative offset to base within plane
|
|
|
|
|
|
|
|
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
|
|
|
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
|
|
|
|
|
|
|
int ocb=1<<_grid->CheckerBoardFromOindex(o+b);
|
|
|
|
|
|
|
|
if ( ocb&cbmask ) {
|
|
|
|
int idx = point+(lo+o+b)*_npoints;
|
|
|
|
_entries[idx]._offset =ro+o+b;
|
|
|
|
_entries[idx]._is_local=1;
|
|
|
|
_entries[idx]._permute=permute;
|
|
|
|
_entries[idx]._around_the_world=wrap;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
o +=_grid->_slice_stride[dimension];
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Routine builds up integer table for each site in _offsets, _is_local, _permute
|
|
|
|
void ScatterPlane (int point,int dimension,int plane,int cbmask,int offset, int wrap)
|
|
|
|
{
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
|
|
|
|
if ( !_grid->CheckerBoarded(dimension) ) {
|
|
|
|
|
|
|
|
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
|
|
|
int o = 0; // relative offset to base within plane
|
|
|
|
int bo = 0; // offset in buffer
|
|
|
|
|
|
|
|
// Simple block stride gather of SIMD objects
|
|
|
|
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
|
|
|
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
|
|
|
int idx=point+(so+o+b)*_npoints;
|
|
|
|
_entries[idx]._offset =offset+(bo++);
|
|
|
|
_entries[idx]._is_local=0;
|
|
|
|
_entries[idx]._permute=0;
|
|
|
|
_entries[idx]._around_the_world=wrap;
|
|
|
|
}
|
|
|
|
o +=_grid->_slice_stride[dimension];
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
int so = plane*_grid->_ostride[dimension]; // base offset for start of plane
|
|
|
|
int o = 0; // relative offset to base within plane
|
|
|
|
int bo = 0; // offset in buffer
|
|
|
|
|
|
|
|
for(int n=0;n<_grid->_slice_nblock[dimension];n++){
|
|
|
|
for(int b=0;b<_grid->_slice_block[dimension];b++){
|
|
|
|
|
|
|
|
int ocb=1<<_grid->CheckerBoardFromOindex(o+b);// Could easily be a table lookup
|
|
|
|
if ( ocb & cbmask ) {
|
|
|
|
int idx = point+(so+o+b)*_npoints;
|
|
|
|
_entries[idx]._offset =offset+(bo++);
|
|
|
|
_entries[idx]._is_local=0;
|
|
|
|
_entries[idx]._permute =0;
|
|
|
|
_entries[idx]._around_the_world=wrap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
o +=_grid->_slice_stride[dimension];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class compressor>
|
2017-04-22 11:33:50 +01:00
|
|
|
int Gather(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor & compress,int &face_idx)
|
2016-10-22 18:14:27 +01:00
|
|
|
{
|
|
|
|
typedef typename cobj::vector_type vector_type;
|
|
|
|
typedef typename cobj::scalar_type scalar_type;
|
|
|
|
|
2018-01-27 00:04:12 +00:00
|
|
|
assert(rhs.Grid()==_grid);
|
|
|
|
// conformable(_grid,rhs.Grid());
|
2016-10-22 18:14:27 +01:00
|
|
|
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
int pd = _grid->_processors[dimension];
|
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
assert(simd_layout==1);
|
|
|
|
assert(comm_dim==1);
|
|
|
|
assert(shift>=0);
|
|
|
|
assert(shift<fd);
|
|
|
|
|
|
|
|
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
|
|
|
|
|
|
|
int cb= (cbmask==0x2)? Odd : Even;
|
2018-01-26 23:08:45 +00:00
|
|
|
int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
int shm_receive_only = 1;
|
2016-10-22 18:14:27 +01:00
|
|
|
for(int x=0;x<rd;x++){
|
|
|
|
|
|
|
|
int sx = (x+sshift)%rd;
|
|
|
|
int comm_proc = ((x+sshift)/rd)%pd;
|
|
|
|
|
|
|
|
if (comm_proc) {
|
|
|
|
|
|
|
|
int words = buffer_size;
|
|
|
|
if (cbmask != 0x3) words=words>>1;
|
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
int bytes = words * compress.CommDatumSize();
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2018-01-27 00:04:12 +00:00
|
|
|
int so = sx*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
|
2016-10-22 18:14:27 +01:00
|
|
|
if ( !face_table_computed ) {
|
|
|
|
face_table.resize(face_idx+1);
|
2017-02-20 22:51:31 +00:00
|
|
|
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
|
|
|
|
2018-01-24 13:12:14 +00:00
|
|
|
// int rank = _grid->_processor;
|
2016-10-22 18:14:27 +01:00
|
|
|
int recv_from_rank;
|
|
|
|
int xmit_to_rank;
|
|
|
|
_grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
|
|
|
|
|
|
|
|
assert (xmit_to_rank != _grid->ThisRank());
|
|
|
|
assert (recv_from_rank != _grid->ThisRank());
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////
|
|
|
|
// try the direct copy if possible
|
|
|
|
/////////////////////////////////////////////////////////
|
2017-04-22 11:33:50 +01:00
|
|
|
cobj *send_buf;
|
|
|
|
cobj *recv_buf;
|
|
|
|
if ( compress.DecompressionStep() ) {
|
|
|
|
recv_buf=u_simd_recv_buf[0];
|
|
|
|
} else {
|
|
|
|
recv_buf=u_recv_buf_p;
|
|
|
|
}
|
|
|
|
|
|
|
|
send_buf = (cobj *)_grid->ShmBufferTranslate(xmit_to_rank,recv_buf);
|
2016-10-25 01:45:53 +01:00
|
|
|
if ( send_buf==NULL ) {
|
2016-10-24 23:45:31 +01:00
|
|
|
send_buf = u_send_buf_p;
|
2017-02-07 06:20:39 +00:00
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
// Find out if we get the direct copy.
|
|
|
|
void *success = (void *) _grid->ShmBufferTranslate(recv_from_rank,u_send_buf_p);
|
|
|
|
if (success==NULL) {
|
|
|
|
// we found a packet that comes from MPI and contributes to this leg of stencil
|
|
|
|
shm_receive_only = 0;
|
|
|
|
}
|
2017-04-20 09:28:27 +01:00
|
|
|
|
|
|
|
gathertime-=usecond();
|
2016-10-24 23:45:31 +01:00
|
|
|
assert(send_buf!=NULL);
|
2017-02-07 06:20:39 +00:00
|
|
|
Gather_plane_simple_table(face_table[face_idx],rhs,send_buf,compress,u_comm_offset,so); face_idx++;
|
2016-10-22 18:14:27 +01:00
|
|
|
gathertime+=usecond();
|
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
if ( compress.DecompressionStep() ) {
|
2016-10-22 18:14:27 +01:00
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
if ( shm_receive_only ) { // Early decompress before MPI is finished is possible
|
|
|
|
AddDecompress(&u_recv_buf_p[u_comm_offset],
|
|
|
|
&recv_buf[u_comm_offset],
|
|
|
|
words,DecompressionsSHM);
|
|
|
|
} else { // Decompress after MPI is finished
|
|
|
|
AddDecompress(&u_recv_buf_p[u_comm_offset],
|
|
|
|
&recv_buf[u_comm_offset],
|
|
|
|
words,Decompressions);
|
|
|
|
}
|
2017-02-07 06:20:39 +00:00
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
AddPacket((void *)&send_buf[u_comm_offset],
|
|
|
|
(void *)&recv_buf[u_comm_offset],
|
|
|
|
xmit_to_rank,
|
|
|
|
recv_from_rank,
|
|
|
|
bytes);
|
2017-02-07 06:20:39 +00:00
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
} else {
|
|
|
|
AddPacket((void *)&send_buf[u_comm_offset],
|
|
|
|
(void *)&u_recv_buf_p[u_comm_offset],
|
|
|
|
xmit_to_rank,
|
|
|
|
recv_from_rank,
|
|
|
|
bytes);
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
2017-04-20 09:28:27 +01:00
|
|
|
u_comm_offset+=words;
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
return shm_receive_only;
|
2016-10-22 18:14:27 +01:00
|
|
|
}
|
2017-04-20 09:28:27 +01:00
|
|
|
|
2017-02-15 11:11:04 +00:00
|
|
|
template<class compressor>
|
2017-04-22 11:33:50 +01:00
|
|
|
int GatherSimd(const Lattice<vobj> &rhs,int dimension,int shift,int cbmask,compressor &compress,int & face_idx)
|
2017-02-15 11:11:04 +00:00
|
|
|
{
|
|
|
|
const int Nsimd = _grid->Nsimd();
|
|
|
|
|
|
|
|
const int maxl =2;// max layout in a direction
|
|
|
|
int fd = _grid->_fdimensions[dimension];
|
|
|
|
int rd = _grid->_rdimensions[dimension];
|
|
|
|
int ld = _grid->_ldimensions[dimension];
|
|
|
|
int pd = _grid->_processors[dimension];
|
|
|
|
int simd_layout = _grid->_simd_layout[dimension];
|
|
|
|
int comm_dim = _grid->_processors[dimension] >1 ;
|
|
|
|
assert(comm_dim==1);
|
|
|
|
// This will not work with a rotate dim
|
|
|
|
assert(simd_layout==maxl);
|
|
|
|
assert(shift>=0);
|
|
|
|
assert(shift<fd);
|
2017-02-20 22:51:31 +00:00
|
|
|
|
2017-02-15 11:11:04 +00:00
|
|
|
int permute_type=_grid->PermuteType(dimension);
|
|
|
|
// std::cout << "SimdNew permute type "<<permute_type<<std::endl;
|
|
|
|
|
|
|
|
///////////////////////////////////////////////
|
|
|
|
// Simd direction uses an extract/merge pair
|
|
|
|
///////////////////////////////////////////////
|
|
|
|
int buffer_size = _grid->_slice_nblock[dimension]*_grid->_slice_block[dimension];
|
2018-01-24 13:12:14 +00:00
|
|
|
// int words = sizeof(cobj)/sizeof(vector_type);
|
2017-02-15 11:11:04 +00:00
|
|
|
|
|
|
|
assert(cbmask==0x3); // Fixme think there is a latent bug if not true
|
|
|
|
|
2017-02-20 22:51:31 +00:00
|
|
|
int reduced_buffer_size = buffer_size;
|
|
|
|
if (cbmask != 0x3) reduced_buffer_size=buffer_size>>1;
|
|
|
|
|
2017-04-20 11:20:26 +01:00
|
|
|
int datum_bytes = compress.CommDatumSize();
|
|
|
|
int bytes = (reduced_buffer_size*datum_bytes)/simd_layout;
|
|
|
|
assert(bytes*simd_layout == reduced_buffer_size*datum_bytes);
|
2017-02-15 11:11:04 +00:00
|
|
|
|
|
|
|
std::vector<cobj *> rpointers(maxl);
|
|
|
|
std::vector<cobj *> spointers(maxl);
|
|
|
|
|
|
|
|
///////////////////////////////////////////
|
|
|
|
// Work out what to send where
|
|
|
|
///////////////////////////////////////////
|
|
|
|
|
|
|
|
int cb = (cbmask==0x2)? Odd : Even;
|
2018-01-26 23:08:45 +00:00
|
|
|
int sshift= _grid->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
|
2017-02-15 11:11:04 +00:00
|
|
|
|
|
|
|
// loop over outer coord planes orthog to dim
|
2017-04-22 11:33:50 +01:00
|
|
|
int shm_receive_only = 1;
|
2017-02-15 11:11:04 +00:00
|
|
|
for(int x=0;x<rd;x++){
|
|
|
|
|
|
|
|
int any_offnode = ( ((x+sshift)%fd) >= rd );
|
|
|
|
|
|
|
|
if ( any_offnode ) {
|
|
|
|
|
|
|
|
for(int i=0;i<maxl;i++){
|
2017-04-20 09:28:27 +01:00
|
|
|
spointers[i] = (cobj *) &u_simd_send_buf[i][u_comm_offset];
|
2017-02-15 11:11:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int sx = (x+sshift)%rd;
|
|
|
|
|
2017-02-20 22:51:31 +00:00
|
|
|
if ( !face_table_computed ) {
|
|
|
|
face_table.resize(face_idx+1);
|
|
|
|
Gather_plane_table_compute ((GridBase *)_grid,dimension,sx,cbmask,u_comm_offset,face_table[face_idx]);
|
|
|
|
}
|
|
|
|
gathermtime-=usecond();
|
|
|
|
Gather_plane_exchange_table(face_table[face_idx],rhs,spointers,dimension,sx,cbmask,compress,permute_type); face_idx++;
|
|
|
|
gathermtime+=usecond();
|
2017-02-15 11:11:04 +00:00
|
|
|
//spointers[0] -- low
|
|
|
|
//spointers[1] -- high
|
|
|
|
|
|
|
|
for(int i=0;i<maxl;i++){
|
|
|
|
|
|
|
|
int my_coor = rd*i + x; // self explanatory
|
|
|
|
int nbr_coor = my_coor+sshift; // self explanatory
|
|
|
|
|
|
|
|
int nbr_proc = ((nbr_coor)/ld) % pd;// relative shift in processors
|
|
|
|
int nbr_lcoor= (nbr_coor%ld); // local plane coor on neighbour node
|
|
|
|
int nbr_ic = (nbr_lcoor)/rd; // inner coord of peer simd lane "i"
|
|
|
|
int nbr_ox = (nbr_lcoor%rd); // outer coord of peer "x"
|
|
|
|
|
|
|
|
int nbr_plane = nbr_ic;
|
|
|
|
assert (sx == nbr_ox);
|
|
|
|
|
2017-04-20 09:28:27 +01:00
|
|
|
auto rp = &u_simd_recv_buf[i ][u_comm_offset];
|
|
|
|
auto sp = &u_simd_send_buf[nbr_plane][u_comm_offset];
|
2017-02-15 11:11:04 +00:00
|
|
|
|
|
|
|
if(nbr_proc){
|
|
|
|
|
|
|
|
int recv_from_rank;
|
|
|
|
int xmit_to_rank;
|
|
|
|
|
|
|
|
_grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
|
|
|
|
|
|
|
|
// shm == receive pointer if offnode
|
|
|
|
// shm == Translate[send pointer] if on node -- my view of his send pointer
|
|
|
|
cobj *shm = (cobj *) _grid->ShmBufferTranslate(recv_from_rank,sp);
|
|
|
|
if (shm==NULL) {
|
|
|
|
shm = rp;
|
2017-04-22 19:28:37 +01:00
|
|
|
// we found a packet that comes from MPI and contributes to this shift.
|
|
|
|
// same_node is only used in the WilsonStencil, and gets set for this point in the stencil.
|
|
|
|
// Kernel will add the exterior_terms except if same_node.
|
|
|
|
shm_receive_only = 0;
|
2017-04-22 11:33:50 +01:00
|
|
|
// leg of stencil
|
2017-02-15 11:11:04 +00:00
|
|
|
}
|
|
|
|
// if Direct, StencilSendToRecvFrom will suppress copy to a peer on node
|
|
|
|
// assuming above pointer flip
|
2017-04-22 11:33:50 +01:00
|
|
|
rpointers[i] = shm;
|
|
|
|
|
2017-02-15 11:11:04 +00:00
|
|
|
AddPacket((void *)sp,(void *)rp,xmit_to_rank,recv_from_rank,bytes);
|
|
|
|
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
rpointers[i] = sp;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-22 11:33:50 +01:00
|
|
|
if ( shm_receive_only ) {
|
|
|
|
AddMerge(&u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,MergersSHM);
|
|
|
|
} else {
|
|
|
|
AddMerge(&u_recv_buf_p[u_comm_offset],rpointers,reduced_buffer_size,permute_type,Mergers);
|
|
|
|
}
|
2017-02-15 11:11:04 +00:00
|
|
|
|
|
|
|
u_comm_offset +=buffer_size;
|
|
|
|
}
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
return shm_receive_only;
|
2017-02-15 11:11:04 +00:00
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
void ZeroCounters(void) {
|
|
|
|
gathertime = 0.;
|
|
|
|
commtime = 0.;
|
2017-08-25 09:25:54 +01:00
|
|
|
mpi3synctime=0.;
|
|
|
|
mpi3synctime_g=0.;
|
|
|
|
shmmergetime=0.;
|
|
|
|
for(int i=0;i<_npoints;i++){
|
|
|
|
comm_time_thr[i]=0;
|
|
|
|
comm_bytes_thr[i]=0;
|
|
|
|
comm_enter_thr[i]=0;
|
|
|
|
comm_leave_thr[i]=0;
|
|
|
|
}
|
2017-04-22 11:33:50 +01:00
|
|
|
halogtime = 0.;
|
|
|
|
mergetime = 0.;
|
|
|
|
decompresstime = 0.;
|
|
|
|
gathermtime = 0.;
|
|
|
|
splicetime = 0.;
|
|
|
|
nosplicetime = 0.;
|
|
|
|
comms_bytes = 0.;
|
|
|
|
calls = 0.;
|
|
|
|
};
|
|
|
|
|
|
|
|
void Report(void) {
|
|
|
|
#define AVERAGE(A) _grid->GlobalSum(A);A/=NP;
|
|
|
|
#define PRINTIT(A) AVERAGE(A); std::cout << GridLogMessage << " Stencil " << #A << " "<< A/calls<<std::endl;
|
|
|
|
RealD NP = _grid->_Nprocessors;
|
|
|
|
RealD NN = _grid->NodeCount();
|
2017-07-29 18:06:53 +01:00
|
|
|
double t = 0;
|
2017-08-25 09:25:54 +01:00
|
|
|
// if comm_time_thr is set they were all done in parallel so take the max
|
2017-07-29 18:06:53 +01:00
|
|
|
// but add up the bytes
|
2017-08-25 09:25:54 +01:00
|
|
|
int threaded = 0 ;
|
2017-07-29 18:06:53 +01:00
|
|
|
for (int i = 0; i < 8; ++i) {
|
2017-08-25 09:25:54 +01:00
|
|
|
if ( comm_time_thr[i]>0.0 ) {
|
|
|
|
threaded = 1;
|
|
|
|
comms_bytes += comm_bytes_thr[i];
|
|
|
|
if (t < comm_time_thr[i]) t = comm_time_thr[i];
|
|
|
|
}
|
2017-07-29 18:06:53 +01:00
|
|
|
}
|
2017-08-25 09:25:54 +01:00
|
|
|
if (threaded) commtime += t;
|
2017-04-22 11:33:50 +01:00
|
|
|
|
|
|
|
_grid->GlobalSum(commtime); commtime/=NP;
|
|
|
|
if ( calls > 0. ) {
|
|
|
|
std::cout << GridLogMessage << " Stencil calls "<<calls<<std::endl;
|
|
|
|
PRINTIT(halogtime);
|
|
|
|
PRINTIT(gathertime);
|
|
|
|
PRINTIT(gathermtime);
|
|
|
|
PRINTIT(mergetime);
|
|
|
|
PRINTIT(decompresstime);
|
|
|
|
if(comms_bytes>1.0){
|
|
|
|
PRINTIT(comms_bytes);
|
|
|
|
PRINTIT(commtime);
|
|
|
|
std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000. << " GB/s per rank"<<std::endl;
|
|
|
|
std::cout << GridLogMessage << " Stencil " << comms_bytes/commtime/1000.*NP/NN << " GB/s per node"<<std::endl;
|
|
|
|
}
|
2017-08-25 09:25:54 +01:00
|
|
|
PRINTIT(mpi3synctime);
|
|
|
|
PRINTIT(mpi3synctime_g);
|
|
|
|
PRINTIT(shmmergetime);
|
2017-04-22 11:33:50 +01:00
|
|
|
PRINTIT(splicetime);
|
|
|
|
PRINTIT(nosplicetime);
|
|
|
|
}
|
|
|
|
#undef PRINTIT
|
|
|
|
#undef AVERAGE
|
|
|
|
};
|
2016-10-22 18:14:27 +01:00
|
|
|
|
|
|
|
};
|
2018-01-13 00:11:30 +00:00
|
|
|
NAMESPACE_END(Grid);
|
|
|
|
|
2015-04-10 04:24:01 +01:00
|
|
|
#endif
|