mirror of
https://github.com/paboyle/Grid.git
synced 2025-04-04 19:25:56 +01:00
Pipeline mode getting better -- 2 nodes @ 10TF/s per node on Aurora
This commit is contained in:
parent
74a4f43946
commit
d6b2727f86
@ -136,7 +136,7 @@ public:
|
|||||||
for(int d=0;d<_ndimension;d++){
|
for(int d=0;d<_ndimension;d++){
|
||||||
column.resize(_processors[d]);
|
column.resize(_processors[d]);
|
||||||
column[0] = accum;
|
column[0] = accum;
|
||||||
std::vector<CommsRequest_t> list;
|
std::vector<MpiCommsRequest_t> list;
|
||||||
for(int p=1;p<_processors[d];p++){
|
for(int p=1;p<_processors[d];p++){
|
||||||
ShiftedRanks(d,p,source,dest);
|
ShiftedRanks(d,p,source,dest);
|
||||||
SendToRecvFromBegin(list,
|
SendToRecvFromBegin(list,
|
||||||
@ -166,8 +166,8 @@ public:
|
|||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
// Face exchange, buffer swap in translational invariant way
|
// Face exchange, buffer swap in translational invariant way
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
void CommsComplete(std::vector<CommsRequest_t> &list);
|
void CommsComplete(std::vector<MpiCommsRequest_t> &list);
|
||||||
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
void SendToRecvFromBegin(std::vector<MpiCommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
void *recv,
|
void *recv,
|
||||||
|
@ -317,7 +317,7 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
|
|||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
|
void CartesianCommunicator::SendToRecvFromBegin(std::vector<MpiCommsRequest_t> &list,
|
||||||
void *xmit,
|
void *xmit,
|
||||||
int dest,
|
int dest,
|
||||||
void *recv,
|
void *recv,
|
||||||
@ -342,7 +342,7 @@ void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &lis
|
|||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list)
|
void CartesianCommunicator::CommsComplete(std::vector<MpiCommsRequest_t> &list)
|
||||||
{
|
{
|
||||||
int nreq=list.size();
|
int nreq=list.size();
|
||||||
|
|
||||||
@ -361,7 +361,7 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
|
|||||||
int from,
|
int from,
|
||||||
int bytes)
|
int bytes)
|
||||||
{
|
{
|
||||||
std::vector<CommsRequest_t> reqs(0);
|
std::vector<MpiCommsRequest_t> reqs(0);
|
||||||
unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
unsigned long xcrc = crc32(0L, Z_NULL, 0);
|
||||||
unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
unsigned long rcrc = crc32(0L, Z_NULL, 0);
|
||||||
|
|
||||||
@ -404,6 +404,29 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
int from,int dor,
|
int from,int dor,
|
||||||
int xbytes,int rbytes,int dir)
|
int xbytes,int rbytes,int dir)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Bring sequence from Stencil.h down to lower level.
|
||||||
|
* Assume using XeLink is ok
|
||||||
|
#warning "Using COPY VIA HOST BUFFERS IN STENCIL"
|
||||||
|
// Introduce a host buffer with a cheap slab allocator and zero cost wipe all
|
||||||
|
Packets[i].host_send_buf = _grid->HostBufferMalloc(Packets[i].xbytes);
|
||||||
|
Packets[i].host_recv_buf = _grid->HostBufferMalloc(Packets[i].rbytes);
|
||||||
|
if ( Packets[i].do_send ) {
|
||||||
|
acceleratorCopyFromDevice(Packets[i].send_buf, Packets[i].host_send_buf,Packets[i].xbytes);
|
||||||
|
}
|
||||||
|
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
||||||
|
Packets[i].host_send_buf,
|
||||||
|
Packets[i].to_rank,Packets[i].do_send,
|
||||||
|
Packets[i].host_recv_buf,
|
||||||
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
|
Packets[i].xbytes,Packets[i].rbytes,i);
|
||||||
|
}
|
||||||
|
for(int i=0;i<Packets.size();i++){
|
||||||
|
if ( Packets[i].do_recv ) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_grid->HostBufferFreeAll();
|
||||||
|
*/
|
||||||
int ncomm =communicator_halo.size();
|
int ncomm =communicator_halo.size();
|
||||||
int commdir=dir%ncomm;
|
int commdir=dir%ncomm;
|
||||||
|
|
||||||
@ -421,28 +444,60 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
|
|||||||
double off_node_bytes=0.0;
|
double off_node_bytes=0.0;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
|
void * host_recv = NULL;
|
||||||
|
void * host_xmit = NULL;
|
||||||
|
|
||||||
if ( dor ) {
|
if ( dor ) {
|
||||||
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( (gfrom ==MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+from*32;
|
tag= dir+from*32;
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
ierr=MPI_Irecv(recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
ierr=MPI_Irecv(recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(rrq);
|
list.push_back(rrq);
|
||||||
|
#else
|
||||||
|
host_recv = this->HostBufferMalloc(rbytes);
|
||||||
|
ierr=MPI_Irecv(host_recv, rbytes, MPI_CHAR,from,tag,communicator_halo[commdir],&rrq);
|
||||||
|
assert(ierr==0);
|
||||||
|
CommsRequest_t srq;
|
||||||
|
srq.PacketType = InterNodeRecv;
|
||||||
|
srq.bytes = rbytes;
|
||||||
|
srq.req = rrq;
|
||||||
|
srq.host_buf = host_recv;
|
||||||
|
srq.device_buf = recv;
|
||||||
|
list.push_back(srq);
|
||||||
|
#endif
|
||||||
off_node_bytes+=rbytes;
|
off_node_bytes+=rbytes;
|
||||||
}
|
} else{
|
||||||
#ifdef NVLINK_GET
|
#ifdef NVLINK_GET
|
||||||
void *shm = (void *) this->ShmBufferTranslate(from,xmit);
|
void *shm = (void *) this->ShmBufferTranslate(from,xmit);
|
||||||
assert(shm!=NULL);
|
assert(shm!=NULL);
|
||||||
acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes);
|
acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dox) {
|
if (dox) {
|
||||||
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
|
||||||
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
|
||||||
tag= dir+_processor*32;
|
tag= dir+_processor*32;
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.push_back(xrq);
|
list.push_back(xrq);
|
||||||
|
#else
|
||||||
|
std::cout << " send via host bounce "<<std::endl;
|
||||||
|
host_xmit = this->HostBufferMalloc(xbytes);
|
||||||
|
acceleratorCopyFromDevice(xmit, host_xmit,xbytes);
|
||||||
|
ierr =MPI_Isend(host_xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
|
||||||
|
assert(ierr==0);
|
||||||
|
CommsRequest_t srq;
|
||||||
|
srq.PacketType = InterNodeXmit;
|
||||||
|
srq.bytes = xbytes;
|
||||||
|
srq.req = xrq;
|
||||||
|
srq.host_buf = host_xmit;
|
||||||
|
srq.device_buf = xmit;
|
||||||
|
list.push_back(srq);
|
||||||
|
#endif
|
||||||
off_node_bytes+=xbytes;
|
off_node_bytes+=xbytes;
|
||||||
} else {
|
} else {
|
||||||
#ifndef NVLINK_GET
|
#ifndef NVLINK_GET
|
||||||
@ -463,11 +518,25 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsReque
|
|||||||
acceleratorCopySynchronise();
|
acceleratorCopySynchronise();
|
||||||
|
|
||||||
if (nreq==0) return;
|
if (nreq==0) return;
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
std::vector<MPI_Status> status(nreq);
|
std::vector<MPI_Status> status(nreq);
|
||||||
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
|
||||||
assert(ierr==0);
|
assert(ierr==0);
|
||||||
list.resize(0);
|
list.resize(0);
|
||||||
|
#else
|
||||||
|
// Wait individually and immediately copy receives to device
|
||||||
|
// Promition to Asynch copy and single wait is easy
|
||||||
|
MPI_Status status;
|
||||||
|
for(int r=0;r<nreq;r++){
|
||||||
|
int ierr = MPI_Wait(&list[r].req,&status);
|
||||||
|
assert(ierr==0);
|
||||||
|
if ( list[r].PacketType==InterNodeRecv ) {
|
||||||
|
acceleratorCopyToDevice(list[r].host_buf,list[r].device_buf,list[r].bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
list.resize(0);
|
||||||
|
this->HostBufferFreeAll();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
void CartesianCommunicator::StencilBarrier(void)
|
void CartesianCommunicator::StencilBarrier(void)
|
||||||
{
|
{
|
||||||
|
@ -46,8 +46,22 @@ NAMESPACE_BEGIN(Grid);
|
|||||||
|
|
||||||
#if defined (GRID_COMMS_MPI3)
|
#if defined (GRID_COMMS_MPI3)
|
||||||
typedef MPI_Comm Grid_MPI_Comm;
|
typedef MPI_Comm Grid_MPI_Comm;
|
||||||
|
typedef MPI_Request MpiCommsRequest_t;
|
||||||
|
#ifdef ACCELERATOR_AWARE_MPI
|
||||||
typedef MPI_Request CommsRequest_t;
|
typedef MPI_Request CommsRequest_t;
|
||||||
|
#else
|
||||||
|
enum PacketType_t { InterNodeXmit, InterNodeRecv, IntraNodeXmit, IntraNodeRecv };
|
||||||
|
typedef struct {
|
||||||
|
PacketType_t PacketType;
|
||||||
|
void *host_buf;
|
||||||
|
void *device_buf;
|
||||||
|
unsigned long bytes;
|
||||||
|
MpiCommsRequest_t req;
|
||||||
|
} CommsRequest_t;
|
||||||
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
typedef int MpiCommsRequest_t;
|
||||||
typedef int CommsRequest_t;
|
typedef int CommsRequest_t;
|
||||||
typedef int Grid_MPI_Comm;
|
typedef int Grid_MPI_Comm;
|
||||||
#endif
|
#endif
|
||||||
|
@ -543,7 +543,7 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
|
|||||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
#ifndef ACCELERATOR_AWARE_MPI
|
#ifndef ACCELERATOR_AWARE_MPI
|
||||||
printf("Host buffer allocate for GPU non-aware MPI\n");
|
printf("Host buffer allocate for GPU non-aware MPI\n");
|
||||||
HostCommBuf= malloc(bytes);
|
HostCommBuf= malloc(bytes); /// CHANGE THIS TO malloc_host
|
||||||
#ifdef NUMA_PLACE_HOSTBUF
|
#ifdef NUMA_PLACE_HOSTBUF
|
||||||
int numa;
|
int numa;
|
||||||
char *numa_name=(char *)getenv("MPI_BUF_NUMA");
|
char *numa_name=(char *)getenv("MPI_BUF_NUMA");
|
||||||
|
@ -467,8 +467,8 @@ public:
|
|||||||
send_buf.resize(buffer_size*2*depth);
|
send_buf.resize(buffer_size*2*depth);
|
||||||
recv_buf.resize(buffer_size*2*depth);
|
recv_buf.resize(buffer_size*2*depth);
|
||||||
|
|
||||||
std::vector<CommsRequest_t> fwd_req;
|
std::vector<MpiCommsRequest_t> fwd_req;
|
||||||
std::vector<CommsRequest_t> bwd_req;
|
std::vector<MpiCommsRequest_t> bwd_req;
|
||||||
|
|
||||||
int words = buffer_size;
|
int words = buffer_size;
|
||||||
int bytes = words * sizeof(vobj);
|
int bytes = words * sizeof(vobj);
|
||||||
|
@ -368,7 +368,6 @@ public:
|
|||||||
// accelerator_barrier(); // All kernels should ALREADY be complete
|
// accelerator_barrier(); // All kernels should ALREADY be complete
|
||||||
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
|
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
|
||||||
// But the HaloGather had a barrier too.
|
// But the HaloGather had a barrier too.
|
||||||
#ifdef ACCELERATOR_AWARE_MPI
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
||||||
Packets[i].send_buf,
|
Packets[i].send_buf,
|
||||||
@ -377,23 +376,6 @@ public:
|
|||||||
Packets[i].from_rank,Packets[i].do_recv,
|
Packets[i].from_rank,Packets[i].do_recv,
|
||||||
Packets[i].xbytes,Packets[i].rbytes,i);
|
Packets[i].xbytes,Packets[i].rbytes,i);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
#warning "Using COPY VIA HOST BUFFERS IN STENCIL"
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
|
||||||
// Introduce a host buffer with a cheap slab allocator and zero cost wipe all
|
|
||||||
Packets[i].host_send_buf = _grid->HostBufferMalloc(Packets[i].xbytes);
|
|
||||||
Packets[i].host_recv_buf = _grid->HostBufferMalloc(Packets[i].rbytes);
|
|
||||||
if ( Packets[i].do_send ) {
|
|
||||||
acceleratorCopyFromDevice(Packets[i].send_buf, Packets[i].host_send_buf,Packets[i].xbytes);
|
|
||||||
}
|
|
||||||
_grid->StencilSendToRecvFromBegin(MpiReqs,
|
|
||||||
Packets[i].host_send_buf,
|
|
||||||
Packets[i].to_rank,Packets[i].do_send,
|
|
||||||
Packets[i].host_recv_buf,
|
|
||||||
Packets[i].from_rank,Packets[i].do_recv,
|
|
||||||
Packets[i].xbytes,Packets[i].rbytes,i);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
// Get comms started then run checksums
|
// Get comms started then run checksums
|
||||||
// Having this PRIOR to the dslash seems to make Sunspot work... (!)
|
// Having this PRIOR to the dslash seems to make Sunspot work... (!)
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
@ -411,15 +393,6 @@ public:
|
|||||||
else DslashLogFull();
|
else DslashLogFull();
|
||||||
// acceleratorCopySynchronise();// is in the StencilSendToRecvFromComplete
|
// acceleratorCopySynchronise();// is in the StencilSendToRecvFromComplete
|
||||||
// accelerator_barrier();
|
// accelerator_barrier();
|
||||||
#ifndef ACCELERATOR_AWARE_MPI
|
|
||||||
#warning "Using COPY VIA HOST BUFFERS IN STENCIL"
|
|
||||||
for(int i=0;i<Packets.size();i++){
|
|
||||||
if ( Packets[i].do_recv ) {
|
|
||||||
acceleratorCopyToDevice(Packets[i].host_recv_buf, Packets[i].recv_buf,Packets[i].rbytes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_grid->HostBufferFreeAll();
|
|
||||||
#endif // run any checksums
|
|
||||||
_grid->StencilBarrier();
|
_grid->StencilBarrier();
|
||||||
for(int i=0;i<Packets.size();i++){
|
for(int i=0;i<Packets.size();i++){
|
||||||
if ( Packets[i].do_recv )
|
if ( Packets[i].do_recv )
|
||||||
|
@ -29,7 +29,7 @@ export MPICH_OFI_NIC_POLICY=GPU
|
|||||||
|
|
||||||
CMD="mpiexec -np 12 -ppn 12 -envall \
|
CMD="mpiexec -np 12 -ppn 12 -envall \
|
||||||
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.96 \
|
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.96 \
|
||||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
|
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
|
||||||
|
|
||||||
echo $CMD
|
echo $CMD
|
||||||
$CMD
|
$CMD
|
||||||
|
@ -1,58 +1,36 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
#PBS -q EarlyAppAccess
|
##PBS -q EarlyAppAccess
|
||||||
|
#PBS -q debug
|
||||||
#PBS -l select=2
|
#PBS -l select=2
|
||||||
#PBS -l walltime=00:20:00
|
#PBS -l walltime=00:20:00
|
||||||
#PBS -A LatticeQCD_aesp_CNDA
|
#PBS -A LatticeQCD_aesp_CNDA
|
||||||
|
|
||||||
#export OMP_PROC_BIND=spread
|
|
||||||
#unset OMP_PLACES
|
|
||||||
|
|
||||||
cd $PBS_O_WORKDIR
|
cd $PBS_O_WORKDIR
|
||||||
|
|
||||||
source ../sourceme.sh
|
source ../sourceme.sh
|
||||||
#module load pti-gpu
|
|
||||||
|
|
||||||
|
|
||||||
cp $PBS_NODEFILE nodefile
|
cp $PBS_NODEFILE nodefile
|
||||||
|
|
||||||
export OMP_NUM_THREADS=4
|
export OMP_NUM_THREADS=4
|
||||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
export MPICH_OFI_NIC_POLICY=GPU
|
||||||
|
|
||||||
|
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||||
export MPICH_OFI_NIC_POLICY=GPU
|
|
||||||
|
|
||||||
# 12 ppn, 2 nodes, 24 ranks
|
|
||||||
#
|
|
||||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
|
||||||
./gpu_tile.sh \
|
|
||||||
./Benchmark_comms_host_device --mpi 2.2.2.3 --grid 24.32.32.24 \
|
|
||||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
|
||||||
#$CMD | tee 2node.comms.hbm
|
|
||||||
|
|
||||||
|
|
||||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||||
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.32.64.48 \
|
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.64.64.96 \
|
||||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap --debug-signals"
|
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
|
||||||
|
|
||||||
#for f in 1 2 3 4 5 6 7 8
|
|
||||||
for f in 1
|
|
||||||
do
|
|
||||||
echo $CMD
|
echo $CMD
|
||||||
$CMD | tee 2node.32.32.64.48.dwf.hbm.$f
|
$CMD
|
||||||
done
|
|
||||||
|
|
||||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
|
||||||
./gpu_tile.sh \
|
|
||||||
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 64.64.64.96 \
|
|
||||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
|
||||||
#$CMD | tee 2node.64.64.64.96.dwf.hbm
|
|
||||||
|
|
||||||
|
@ -28,8 +28,8 @@ echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_A
|
|||||||
|
|
||||||
if [ $PALS_RANKID = "0" ]
|
if [ $PALS_RANKID = "0" ]
|
||||||
then
|
then
|
||||||
# numactl -m $NUMAM -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
numactl -m $NUMAM -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
|
||||||
numactl -m $NUMAM -N $NUMAP "$@"
|
# numactl -m $NUMAM -N $NUMAP "$@"
|
||||||
else
|
else
|
||||||
numactl -m $NUMAM -N $NUMAP "$@"
|
numactl -m $NUMAM -N $NUMAP "$@"
|
||||||
fi
|
fi
|
||||||
|
Loading…
x
Reference in New Issue
Block a user