mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-14 13:57:07 +01:00
Merge branch 'develop' into feature/scidac-wp1
This commit is contained in:
@ -1,16 +1,16 @@
|
||||
TOOLS=$HOME/tools
|
||||
|
||||
../../configure \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-comms=mpi-auto \
|
||||
--enable-accelerator-cshift \
|
||||
--disable-gparity \
|
||||
--disable-fermion-reps \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=sycl \
|
||||
--enable-accelerator-aware-mpi=no\
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=icpx \
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/ -L${MKLROOT}/lib -qmkl=parallel " \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include -qmkl=parallel"
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -lsycl" \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -qmkl=parallel"
|
||||
|
||||
|
2
systems/Aurora/sourceme-sunspot-deterministic.sh
Normal file
2
systems/Aurora/sourceme-sunspot-deterministic.sh
Normal file
@ -0,0 +1,2 @@
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
41
systems/Aurora/tests/repro128.pbs
Normal file
41
systems/Aurora/tests/repro128.pbs
Normal file
@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=128
|
||||
#PBS -l walltime=02:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 16 nodes, 192 ranks
|
||||
# 12 ppn, 128 nodes, 1536 ranks
|
||||
CMD="mpiexec -np 1536 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Test_dwf_mixedcg_prec --mpi 4.4.4.24 --grid 128.128.128.384 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 7000 --comms-overlap "
|
||||
$CMD
|
@ -2,26 +2,39 @@
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=16
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -l select=16:system=sunspot,place=scatter
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -N dwf
|
||||
#PBS -k doe
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
#source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
#export MPICH_COLL_SYNC=1
|
||||
#export MPICH_ENV_DISPLAY=1
|
||||
export MPICH_
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
||||
#export LD_LIBRARY_PATH=/soft/restricted/CNDA/updates/2023.05.15.001/oneapi/compiler/eng-20230512/compiler/linux/lib/:$LD_LIBRARY_PATH
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
@ -32,9 +45,17 @@ export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 16 nodes, 192 ranks
|
||||
DIR=repro.$PBS_JOBID
|
||||
mkdir $DIR
|
||||
cd $DIR
|
||||
|
||||
CMD="mpiexec -np 192 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Test_dwf_mixedcg_prec --mpi 2.4.4.6 --grid 64.128.128.192 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000"
|
||||
../gpu_tile_compact.sh \
|
||||
../Test_dwf_mixedcg_prec --mpi 2.4.4.6 --grid 64.128.128.192 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000 --debug-stdout --log Message,Iterative"
|
||||
#--comms-overlap
|
||||
$CMD
|
||||
|
||||
grep Oops Grid.stderr.* > failures.$PBS_JOBID
|
||||
rm core.*
|
||||
|
||||
|
82
systems/Aurora/tests/repro1gpu.pbs
Normal file
82
systems/Aurora/tests/repro1gpu.pbs
Normal file
@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -l select=16:system=sunspot,place=scatter
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
#PBS -l walltime=02:00:00
|
||||
#PBS -N repro1gpu
|
||||
#PBS -k doe
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
||||
|
||||
# 56 cores / 6 threads ~9
|
||||
export OMP_NUM_THREADS=6
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
NN=`cat $PBS_NODEFILE | wc -l`
|
||||
echo $PBS_NODEFILE
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
echo $NN nodes in node file
|
||||
for n in `eval echo {1..$NN}`
|
||||
do
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
echo Node $n is $THIS_NODE
|
||||
|
||||
|
||||
for g in {0..11}
|
||||
do
|
||||
export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 )
|
||||
export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 )
|
||||
export GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 )
|
||||
|
||||
export numa=${NUMA_MAP[$g]}
|
||||
export gpu_id=${GPU_MAP[$g]}
|
||||
export tile_id=${TILE_MAP[$g]}
|
||||
export gpu=$gpu_id.$tile_id
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
DIR=repro.1gpu.$PBS_JOBID/node-$n-$THIS_NODE-GPU-$gpu
|
||||
mkdir -p $DIR
|
||||
cd $DIR
|
||||
|
||||
echo $THIS_NODE > nodefile
|
||||
echo $gpu > gpu
|
||||
|
||||
export ZE_AFFINITY_MASK=$gpu
|
||||
export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
||||
|
||||
CMD="mpiexec -np 1 -ppn 1 -envall --hostfile nodefile \
|
||||
numactl -N $numa -m $numa ../../Test_dwf_mixedcg_prec --mpi 1.1.1.1 --grid 16.16.32.32 \
|
||||
--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message"
|
||||
echo $CMD
|
||||
$CMD &
|
||||
|
||||
done
|
||||
done
|
||||
|
||||
wait
|
||||
|
98
systems/Aurora/tests/reproN.pbs
Normal file
98
systems/Aurora/tests/reproN.pbs
Normal file
@ -0,0 +1,98 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -l select=32:system=sunspot,place=scatter
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
#PBS -l walltime=02:00:00
|
||||
#PBS -N reproN
|
||||
#PBS -k doe
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
||||
|
||||
# 56 cores / 6 threads ~9
|
||||
export OMP_NUM_THREADS=6
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
||||
|
||||
export GRID_PRINT_ENTIRE_LOG=0
|
||||
export GRID_CHECKSUM_RECV_BUF=0
|
||||
export GRID_CHECKSUM_SEND_BUF=0
|
||||
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
NN=`cat $PBS_NODEFILE | wc -l`
|
||||
echo $PBS_NODEFILE
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
echo $NN nodes in node file
|
||||
for n in `eval echo {1..$NN}`
|
||||
do
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
echo Node $n is $THIS_NODE
|
||||
|
||||
DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE
|
||||
|
||||
mkdir -p $DIR
|
||||
cd $DIR
|
||||
|
||||
echo $THIS_NODE > nodefile
|
||||
|
||||
#CMD="mpiexec -np 12 -ppn 12 -envall --hostfile nodefile \
|
||||
# ../../gpu_tile_compact.sh \
|
||||
# ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \
|
||||
# --shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap"
|
||||
|
||||
CMD="mpiexec -np 12 -ppn 12 -envall --hostfile nodefile \
|
||||
../../gpu_tile_compact.sh \
|
||||
../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap"
|
||||
|
||||
echo $CMD > command-line
|
||||
env > environment
|
||||
$CMD &
|
||||
|
||||
done
|
||||
|
||||
# Suspicious wait is allowing jobs to collide and knock out
|
||||
#wait
|
||||
|
||||
sleep 6500
|
||||
|
||||
for n in ` eval echo {1..$NN} `
|
||||
do
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE
|
||||
|
||||
cd $DIR
|
||||
|
||||
grep Oops Grid.stderr.* > failures.$PBS_JOBID
|
||||
rm core.*
|
||||
|
||||
done
|
@ -36,5 +36,5 @@ export MPICH_OFI_NIC_POLICY=GPU
|
||||
CMD="mpiexec -np 192 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Test_staggered_cg_prec --mpi 2.4.4.6 --grid 128.128.128.192 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000"
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000 --comms-overlap"
|
||||
$CMD
|
||||
|
@ -1,4 +1,4 @@
|
||||
TOOLS=$HOME/tools
|
||||
|
||||
../../configure \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=64 \
|
||||
@ -11,6 +11,6 @@ TOOLS=$HOME/tools
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=icpx \
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/" \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include"
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -lsycl" \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -qmkl=parallel"
|
||||
|
||||
|
2
systems/Sunspot/sourceme.sh
Normal file
2
systems/Sunspot/sourceme.sh
Normal file
@ -0,0 +1,2 @@
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
81
systems/Sunspot/tests/repro1gpu.pbs
Normal file
81
systems/Sunspot/tests/repro1gpu.pbs
Normal file
@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -l select=16:system=sunspot,place=scatter
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
#PBS -l walltime=02:00:00
|
||||
#PBS -N repro1gpu
|
||||
#PBS -k doe
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
||||
|
||||
# 56 cores / 6 threads ~9
|
||||
export OMP_NUM_THREADS=6
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
NN=`cat $PBS_NODEFILE | wc -l`
|
||||
echo $PBS_NODEFILE
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
echo $NN nodes in node file
|
||||
for n in `eval echo {1..$NN}`
|
||||
do
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
echo Node $n is $THIS_NODE
|
||||
|
||||
|
||||
for g in {0..11}
|
||||
do
|
||||
export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 )
|
||||
export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 )
|
||||
export GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 )
|
||||
|
||||
export numa=${NUMA_MAP[$g]}
|
||||
export gpu_id=${GPU_MAP[$g]}
|
||||
export tile_id=${TILE_MAP[$g]}
|
||||
export gpu=$gpu_id.$tile_id
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
DIR=repro.1gpu.$PBS_JOBID/node-$n-$THIS_NODE-GPU-$gpu
|
||||
mkdir -p $DIR
|
||||
cd $DIR
|
||||
|
||||
echo $THIS_NODE > nodefile
|
||||
echo $gpu > gpu
|
||||
|
||||
export ZE_AFFINITY_MASK=$gpu
|
||||
export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
||||
|
||||
CMD="mpiexec -np 1 -ppn 1 -envall --hostfile nodefile \
|
||||
numactl -N $numa -m $numa ../../Test_dwf_mixedcg_prec --mpi 1.1.1.1 --grid 16.16.32.32 \
|
||||
--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message"
|
||||
echo $CMD
|
||||
$CMD &
|
||||
|
||||
done
|
||||
done
|
||||
|
||||
wait
|
||||
|
97
systems/Sunspot/tests/reproN.pbs
Normal file
97
systems/Sunspot/tests/reproN.pbs
Normal file
@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
|
||||
#PBS -l select=32:system=sunspot,place=scatter
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
#PBS -l walltime=02:00:00
|
||||
#PBS -N reproN
|
||||
#PBS -k doe
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
module load oneapi/eng-compiler/2023.05.15.003
|
||||
module load mpich/51.2/icc-all-deterministic-pmix-gpu
|
||||
|
||||
# 56 cores / 6 threads ~9
|
||||
export OMP_NUM_THREADS=6
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||
|
||||
export GRID_PRINT_ENTIRE_LOG=0
|
||||
export GRID_CHECKSUM_RECV_BUF=1
|
||||
export GRID_CHECKSUM_SEND_BUF=0
|
||||
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
|
||||
export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
|
||||
unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
|
||||
unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
NN=`cat $PBS_NODEFILE | wc -l`
|
||||
echo $PBS_NODEFILE
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
echo $NN nodes in node file
|
||||
for n in `eval echo {1..$NN}`
|
||||
do
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
echo Node $n is $THIS_NODE
|
||||
|
||||
DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE
|
||||
|
||||
mkdir -p $DIR
|
||||
cd $DIR
|
||||
|
||||
echo $THIS_NODE > nodefile
|
||||
|
||||
#CMD="mpiexec -np 12 -ppn 12 -envall --hostfile nodefile \
|
||||
# ../../gpu_tile_compact.sh \
|
||||
# ../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \
|
||||
# --shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap"
|
||||
|
||||
CMD="mpiexec -np 12 -ppn 12 -envall --hostfile nodefile \
|
||||
../../gpu_tile_compact.sh \
|
||||
../../Test_dwf_mixedcg_prec --mpi 1.2.2.3 --grid 32.64.64.96 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --comms-overlap"
|
||||
|
||||
echo $CMD > command-line
|
||||
env > environment
|
||||
$CMD &
|
||||
|
||||
done
|
||||
|
||||
# Suspicious wait is allowing jobs to collide and knock out
|
||||
#wait
|
||||
|
||||
sleep 6500
|
||||
|
||||
for n in ` eval echo {1..$NN} `
|
||||
do
|
||||
|
||||
THIS_NODE=`head -n$n $PBS_NODEFILE | tail -n1 `
|
||||
DIR=reproN.$PBS_JOBID/node-$n-$THIS_NODE
|
||||
|
||||
cd $DIR
|
||||
|
||||
grep Oops Grid.stderr.* > failures.$PBS_JOBID
|
||||
rm core.*
|
||||
|
||||
done
|
Reference in New Issue
Block a user