1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

Pipeline mode getting better -- 2 nodes @ 10TF/s per node on Aurora

This commit is contained in:
2025-01-29 09:22:21 +00:00
parent 74a4f43946
commit d6b2727f86
9 changed files with 112 additions and 78 deletions

View File

@ -29,7 +29,7 @@ export MPICH_OFI_NIC_POLICY=GPU
CMD="mpiexec -np 12 -ppn 12 -envall \
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.96 \
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
echo $CMD
$CMD

View File

@ -1,58 +1,36 @@
#!/bin/bash
#PBS -q EarlyAppAccess
##PBS -q EarlyAppAccess
#PBS -q debug
#PBS -l select=2
#PBS -l walltime=00:20:00
#PBS -A LatticeQCD_aesp_CNDA
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
cd $PBS_O_WORKDIR
source ../sourceme.sh
#module load pti-gpu
cp $PBS_NODEFILE nodefile
export OMP_NUM_THREADS=4
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
export MPICH_OFI_NIC_POLICY=GPU
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
export MPICH_OFI_NIC_POLICY=GPU
# 12 ppn, 2 nodes, 24 ranks
#
CMD="mpiexec -np 24 -ppn 12 -envall \
./gpu_tile.sh \
./Benchmark_comms_host_device --mpi 2.2.2.3 --grid 24.32.32.24 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
#$CMD | tee 2node.comms.hbm
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
CMD="mpiexec -np 24 -ppn 12 -envall \
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.32.64.48 \
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap --debug-signals"
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.64.64.96 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
#for f in 1 2 3 4 5 6 7 8
for f in 1
do
echo $CMD
$CMD | tee 2node.32.32.64.48.dwf.hbm.$f
done
CMD="mpiexec -np 24 -ppn 12 -envall \
./gpu_tile.sh \
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 64.64.64.96 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
#$CMD | tee 2node.64.64.64.96.dwf.hbm
$CMD

View File

@ -28,8 +28,8 @@ echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_A
if [ $PALS_RANKID = "0" ]
then
# numactl -m $NUMAM -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
numactl -m $NUMAM -N $NUMAP "$@"
numactl -m $NUMAM -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
# numactl -m $NUMAM -N $NUMAP "$@"
else
numactl -m $NUMAM -N $NUMAP "$@"
fi