#!/bin/bash ## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00 #PBS -q EarlyAppAccess #PBS -l select=2 #PBS -l walltime=01:00:00 #PBS -A LatticeQCD_aesp_CNDA #export OMP_PROC_BIND=spread #export OMP_NUM_THREADS=3 #unset OMP_PLACES cd $PBS_O_WORKDIR source ../sourceme.sh #echo Jobid: $PBS_JOBID #echo Running on host `hostname` #echo Running on nodes `cat $PBS_NODEFILE` #echo NODES #cat $PBS_NODEFILE NNODES=`wc -l < $PBS_NODEFILE` NRANKS=12 # Number of MPI ranks per node NDEPTH=3 # Number of hardware threads per rank, spacing between MPI ranks on a node NTHREADS=$OMP_NUM_THREADS # Number of OMP threads per rank, given to OMP_NUM_THREADS NTOTRANKS=$(( NNODES * NRANKS )) CMD="mpiexec -np 2 -ppn 1 -d ${NDEPTH} -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 1.1.1.2 --grid 32.24.32.192 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 #$CMD | tee 1-to-1.comms.hmem0 export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 1-to-1.comms.hmem1 CMD="mpiexec -np 4 -ppn 2 -d ${NDEPTH} --cpu-bind=depth -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 2.2.1.1 --grid 32.24.32.96 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 2-to-2.comms.hmem1 export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 $CMD | tee 2-to-2.comms.hmem0 CMD="mpiexec -np 6 -ppn 3 -d ${NDEPTH} --cpu-bind=depth -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 3.2.1.1 --grid 32.24.32.96 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 3-to-3.comms.hmem1 export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 #$CMD | tee 3-to-3.comms.hmem0 CMD="mpiexec -np 8 -ppn 4 -d ${NDEPTH} --cpu-bind=depth -envall \ ./gpu_tile_compact4a.sh \ ./Benchmark_comms_host_device --mpi 2.2.2.1 --grid 32.24.32.96 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 4-to-4.comms.hmem1.nic-affinity export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 $CMD | tee 4-to-4.comms.hmem0 #mpiexec -np 1 --ppn 1 -d 1 numactl -H | tee numa.log CMD="mpiexec -np 12 -ppn 6 -d ${NDEPTH} --cpu-bind=depth -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 3.2.2.1 --grid 32.24.32.96 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 6-to-6.comms.hmem1 export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 $CMD | tee 6-to-6.comms.hmem0 CMD="mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind=depth -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 3.2.2.2 --grid 32.24.32.192 \ --shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32" export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1 #$CMD | tee 12-to-12.comms.hmem1 export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=0 $CMD | tee 12-to-12.comms.hmem0