1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-12 20:27:06 +01:00

Merge branch 'feature/deprecate-uvm' into develop

This commit is contained in:
2025-01-31 16:32:36 +00:00
129 changed files with 3562 additions and 3000 deletions

View File

@ -0,0 +1,23 @@
#Ahead of time compile for PVC
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -fsycl-targets=spir64_gen -Xs -device -Xs pvc "
#JIT compile
#export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
#export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions "
../../configure \
--enable-simd=GPU \
--enable-gen-simd-width=64 \
--enable-comms=mpi-auto \
--enable-debug \
--disable-gparity \
--disable-fermion-reps \
--with-lime=$CLIME \
--enable-shm=nvlink \
--enable-accelerator=sycl \
--enable-accelerator-aware-mpi=yes\
--enable-unified=no \
MPICXX=mpicxx \
CXX=icpx

View File

@ -0,0 +1,15 @@
#module load oneapi/release/2023.12.15.001
#module load mpich/icc-all-debug-pmix-gpu/52.2
#module load mpich-config/mode/deterministic
#module load intel_compute_runtime/release/821.35
source ~/spack/share/spack/setup-env.sh
spack load c-lime
spack load openssl
export CLIME=`spack find --paths c-lime | grep ^c-lime | awk '{print $2}' `
export HTTP_PROXY=http://proxy.alcf.anl.gov:3128
export HTTPS_PROXY=http://proxy.alcf.anl.gov:3128
export http_proxy=http://proxy.alcf.anl.gov:3128
export https_proxy=http://proxy.alcf.anl.gov:3128
git config --global http.proxy http://proxy.alcf.anl.gov:3128
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"

View File

@ -0,0 +1,74 @@
#!/bin/bash
#PBS -l select=512
#PBS -q EarlyAppAccess
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=6:00:00
#PBS -N reproBigJob
#PBS -k doe
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
#module load oneapi/eng-compiler/2023.05.15.003
#module load mpich/51.2/icc-all-deterministic-pmix-gpu
# 56 cores / 6 threads ~9
export OMP_NUM_THREADS=6
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=10485760
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
#export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
export GRID_PRINT_ENTIRE_LOG=0
export GRID_CHECKSUM_RECV_BUF=0
export GRID_CHECKSUM_SEND_BUF=0
export MPICH_OFI_NIC_POLICY=GPU
#export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
#export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
#export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
#unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
#unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
#unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
cd $PBS_O_WORKDIR
cp $PBS_NODEFILE nodefile
DIR=reproBigJob.$PBS_JOBID
mkdir -p $DIR
cd $DIR
cp $PBS_NODEFILE nodefile
BINARY=../Test_dwf_mixedcg_prec
echo > pingjob <<EOF
while read node ;
do
echo ssh $node killall -s USR1 -- ../Test_dwf_mixedcg_prec
done < nodefile
EOF
CMD="mpiexec -np 6144 -ppn 12 -envall --hostfile nodefile \
../gpu_tile_compact.sh \
$BINARY --mpi 8.8.8.12 --grid 128.128.128.288 \
--shm-mpi 0 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 18000 --debug-stdout --log Message --debug-signals --comms-overlap"
echo $CMD > command-line
env > environment
$CMD
grep Oops Grid.stderr.* > failures.$PBS_JOBID
rm core.*

View File

@ -1,27 +1,24 @@
#!/bin/bash
##PBS -q EarlyAppAccess
#PBS -q debug
#PBS -l select=1
#PBS -l walltime=00:20:00
#PBS -A LatticeQCD_aesp_CNDA
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
cd $PBS_O_WORKDIR
source ../sourceme.sh
module load pti-gpu
#cat $PBS_NODEFILE
cp $PBS_NODEFILE nodefile
export OMP_NUM_THREADS=4
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
export MPICH_OFI_NIC_POLICY=GPU
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
@ -29,39 +26,11 @@ export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
export MPICH_OFI_NIC_POLICY=GPU
# 12 ppn, 2 nodes, 24 ranks
#
CMD="mpiexec -np 1 -ppn 1 -envall \
./gpu_tile_compact.sh \
./Benchmark_usqcd --mpi 1.1.1.1 --grid 24.32.32.24 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
$CMD | tee usqcd.log
CMD="mpiexec -np 1 -ppn 1 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 "
#$CMD | tee 1tile.dwf
CMD="mpiexec -np 12 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 32.32.32.48 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
#$CMD | tee 1node.32.32.32.48.dwf
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.1.2.3 --grid 32.32.64.96 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 8 "
CMD="mpiexec -np 12 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.64.32.96 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
#$CMD | tee 1node.64.64.32.96.dwf
CMD="mpiexec -np 12 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.32.32.48 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
#$CMD | tee 1node.64.32.32.48.dwf
echo $CMD
$CMD

View File

@ -1,55 +1,48 @@
#!/bin/bash
#PBS -q workq
##PBS -q EarlyAppAccess
#PBS -q debug
#PBS -l select=2
#PBS -l walltime=00:20:00
#PBS -A LatticeQCD_aesp_CNDA
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
cd $PBS_O_WORKDIR
source ../sourceme.sh
module load pti-gpu
#cat $PBS_NODEFILE
cp $PBS_NODEFILE nodefile
export OMP_NUM_THREADS=4
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
export MPICH_OFI_NIC_POLICY=GPU
#export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
#export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
export MPICH_OFI_NIC_POLICY=GPU
# 12 ppn, 2 nodes, 24 ranks
#
# Local vol 16.16.16.32
#
#VOL=32.64.64.96
for VOL in 32.32.32.96 32.64.64.96
do
for AT in 32
do
CMD="mpiexec -np 24 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_comms_host_device --mpi 2.2.2.3 --grid 24.32.32.24 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
$CMD | tee 2node.comms
./gpu_tile.sh ./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid $VOL \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads $AT --comms-overlap "
CMD="mpiexec -np 24 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.32.64.48 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
$CMD | tee 2node.32.32.64.48.dwf
CMD="mpiexec -np 24 -ppn 12 -envall \
./gpu_tile_compact.sh \
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 64.64.64.96 \
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
$CMD | tee 2node.64.64.64.96.dwf
echo $CMD
$CMD
done
done

View File

@ -4,10 +4,12 @@
#export NUMA_MAP=(0 0 1 1 0 0 1 1 0 0 1 1);
#export GPU_MAP=(0.0 0.1 3.0 3.1 1.0 1.1 4.0 4.1 2.0 2.1 5.0 5.1)
export NUMA_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 );
export NUMA_PMAP=(0 0 0 1 1 1 0 0 0 1 1 1 );
export NUMA_HMAP=(2 2 2 3 3 3 3 2 2 2 2 3 3 3 );
export GPU_MAP=(0.0 1.0 2.0 3.0 4.0 5.0 0.1 1.1 2.1 3.1 4.1 5.1 )
export NUMA=${NUMA_MAP[$PALS_LOCAL_RANKID]}
export NUMAP=${NUMA_PMAP[$PALS_LOCAL_RANKID]}
export NUMAH=${NUMA_HMAP[$PALS_LOCAL_RANKID]}
export gpu_id=${GPU_MAP[$PALS_LOCAL_RANKID]}
unset EnableWalkerPartition
@ -17,18 +19,19 @@ export ONEAPI_DEVICE_FILTER=gpu,level_zero
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:5
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:3
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
#export MPI_BUF_NUMA=$NUMAH
echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NUMA $NUMA "
if [ $PALS_RANKID = "0" ]
then
# numactl -m $NUMA -N $NUMA onetrace --chrome-device-timeline "$@"
# numactl -m $NUMA -N $NUMA unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
numactl -m $NUMA -N $NUMA "$@"
numactl -p $NUMAP -N $NUMAP unitrace --chrome-kernel-logging --chrome-mpi-logging --chrome-sycl-logging --demangle "$@"
# numactl -p $NUMAP -N $NUMAP "$@"
else
numactl -m $NUMA -N $NUMA "$@"
numactl -p $NUMAP -N $NUMAP "$@"
fi

View File

@ -1,8 +1,15 @@
#Ahead of time compile for PVC
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-targets=spir64_gen -Xs -device -Xs pvc -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl -lnuma -L/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/lib"
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions -I/opt/aurora/24.180.3/spack/unified/0.8.0/install/linux-sles15-x86_64/oneapi-2024.07.30.002/numactl-2.0.14-7v6edad/include/"
#JIT compile
#export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
#export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -Wno-tautological-compare -qmkl=parallel -fsycl -fno-exceptions "
export LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L${MKLROOT}/lib -qmkl=parallel -fsycl -lsycl "
export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -qmkl=parallel -fsycl -fno-exceptions "
../../configure \
--enable-simd=GPU \
--enable-reduction=grid \
--enable-gen-simd-width=64 \
--enable-comms=mpi-auto \
--enable-debug \
@ -11,7 +18,7 @@ export CXXFLAGS="-O3 -fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -
--with-lime=$CLIME \
--enable-shm=nvlink \
--enable-accelerator=sycl \
--enable-accelerator-aware-mpi=yes\
--enable-accelerator-aware-mpi=no\
--enable-unified=no \
MPICXX=mpicxx \
CXX=icpx

View File

@ -1,40 +1,16 @@
#module load oneapi/release/2023.12.15.001
#module load mpich/icc-all-debug-pmix-gpu/52.2
#module load mpich-config/mode/deterministic
#module load intel_compute_runtime/release/821.35
module load pti-gpu
source ~/spack/share/spack/setup-env.sh
spack load c-lime
spack load openssl
export CLIME=`spack find --paths c-lime | grep ^c-lime | awk '{print $2}' `
#spack load libefence
#export EFENCE=`spack find --paths libefence | grep ^libefence | awk '{print $2}' `
#export LD_LIBRARY_PATH=${EFENCE}/lib:$LD_LIBRARY_PATH
#spack load gperftools
export TCMALLOC=/home/paboyle/gperftools/install
export LD_LIBRARY_PATH=${TCMALLOC}/lib:$LD_LIBRARY_PATH
export INTELGT_AUTO_ATTACH_DISABLE=1
#export ONEAPI_DEVICE_SELECTOR=level_zero:0.0
#module load oneapi/release/2023.12.15.001
#module use /soft/modulefiles
#module load intel_compute_runtime/release/agama-devel-682.22
#export FI_CXI_DEFAULT_CQ_SIZE=131072
#export FI_CXI_CQ_FILL_PERCENT=20
#export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
#export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-intel-enable-auto-large-GRF-mode"
#
# -ftarget-register-alloc-mode=pvc:default
# -ftarget-register-alloc-mode=pvc:small
# -ftarget-register-alloc-mode=pvc:large
# -ftarget-register-alloc-mode=pvc:auto
#export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1
export HTTP_PROXY=http://proxy.alcf.anl.gov:3128
export HTTPS_PROXY=http://proxy.alcf.anl.gov:3128
export http_proxy=http://proxy.alcf.anl.gov:3128
export https_proxy=http://proxy.alcf.anl.gov:3128
git config --global http.proxy http://proxy.alcf.anl.gov:3128
#source ~/spack/share/spack/setup-env.sh
#spack load gperftools
#export TCMALLOC=`spack find --paths gperftools | grep ^gperftools | awk '{print $2}' `
#export LD_LIBRARY_PATH=${TCMALLOC}/lib:$LD_LIBRARY_PATH
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"

View File

@ -2,7 +2,8 @@
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
#PBS -l select=16:system=sunspot,place=scatter
#PBS -l select=16
#PBS -q EarlyAppAccess
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=01:00:00
#PBS -N dwf
@ -13,19 +14,14 @@
cd $PBS_O_WORKDIR
#source ../sourceme.sh
source ../sourceme.sh
cat $PBS_NODEFILE
#export MPICH_COLL_SYNC=1
#export MPICH_ENV_DISPLAY=1
export MPICH_
export OMP_NUM_THREADS=3
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
module load oneapi/eng-compiler/2023.05.15.003
module load mpich/51.2/icc-all-deterministic-pmix-gpu
#export LD_LIBRARY_PATH=/soft/restricted/CNDA/updates/2023.05.15.001/oneapi/compiler/eng-20230512/compiler/linux/lib/:$LD_LIBRARY_PATH
#module load mpich/51.2/icc-all-deterministic-pmix-gpu
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST

View File

@ -1,6 +1,7 @@
#!/bin/bash
#PBS -l select=16:system=sunspot,place=scatter
#PBS -l select=16
#PBS -q EarlyAppAccess
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=02:00:00
#PBS -N repro1gpu
@ -9,8 +10,9 @@
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
module load oneapi/eng-compiler/2023.05.15.003
module load mpich/51.2/icc-all-deterministic-pmix-gpu
#module load oneapi/eng-compiler/2023.05.15.003
#module load mpich/51.2/icc-all-deterministic-pmix-gpu
# 56 cores / 6 threads ~9
export OMP_NUM_THREADS=6
@ -34,6 +36,8 @@ export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
cd $PBS_O_WORKDIR
source ../sourceme.sh
NN=`cat $PBS_NODEFILE | wc -l`
echo $PBS_NODEFILE
cat $PBS_NODEFILE

View File

@ -0,0 +1,74 @@
#!/bin/bash
#PBS -l select=32
#PBS -q EarlyAppAccess
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=02:00:00
#PBS -N reproBigJob
#PBS -k doe
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
#module load oneapi/eng-compiler/2023.05.15.003
#module load mpich/51.2/icc-all-deterministic-pmix-gpu
# 56 cores / 6 threads ~9
export OMP_NUM_THREADS=6
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=10485760
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
#export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=1
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
export GRID_PRINT_ENTIRE_LOG=0
export GRID_CHECKSUM_RECV_BUF=0
export GRID_CHECKSUM_SEND_BUF=0
export MPICH_OFI_NIC_POLICY=GPU
#export MPIR_CVAR_ALLREDUCE_DEVICE_COLLECTIVE=0
#export MPIR_CVAR_REDUCE_DEVICE_COLLECTIVE=0
#export MPIR_CVAR_ALLREDUCE_INTRA_ALGORITHM=recursive_doubling
#unset MPIR_CVAR_CH4_COLL_SELECTION_TUNING_JSON_FILE
#unset MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE
#unset MPIR_CVAR_CH4_POSIX_COLL_SELECTION_TUNING_JSON_FILE
cd $PBS_O_WORKDIR
cp $PBS_NODEFILE nodefile
DIR=reproBigJob.$PBS_JOBID
mkdir -p $DIR
cd $DIR
cp $PBS_NODEFILE nodefile
BINARY=../Test_dwf_mixedcg_prec
echo > pingjob <<EOF
while read node ;
do
echo ssh $node killall -s USR1 -- ../Test_dwf_mixedcg_prec
done < nodefile
EOF
CMD="mpiexec -np 384 -ppn 12 -envall --hostfile nodefile \
../gpu_tile_compact.sh \
$BINARY --mpi 4.4.4.6 --grid 128.128.128.96 \
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 6000 --debug-stdout --log Message --debug-signals"
echo $CMD > command-line
env > environment
$CMD
grep Oops Grid.stderr.* > failures.$PBS_JOBID
rm core.*

View File

@ -1,6 +1,7 @@
#!/bin/bash
#PBS -l select=32:system=sunspot,place=scatter
#PBS -l select=16
#PBS -q EarlyAppAccess
#PBS -A LatticeQCD_aesp_CNDA
#PBS -l walltime=02:00:00
#PBS -N reproN
@ -9,8 +10,8 @@
#export OMP_PROC_BIND=spread
#unset OMP_PLACES
module load oneapi/eng-compiler/2023.05.15.003
module load mpich/51.2/icc-all-deterministic-pmix-gpu
#module load oneapi/eng-compiler/2023.05.15.003
#module load mpich/51.2/icc-all-deterministic-pmix-gpu
# 56 cores / 6 threads ~9
export OMP_NUM_THREADS=6

View File

@ -0,0 +1,18 @@
../../configure \
--enable-comms=mpi \
--enable-simd=GPU \
--enable-gen-simd-width=64 \
--enable-shm=nvlink \
--with-lime=$CLIME \
--with-hdf5=$HDF5 \
--with-fftw=$FFTW \
--with-gmp=$GMP \
--with-mpfr=$MPFR \
--enable-accelerator=cuda \
--disable-gparity \
--disable-fermion-reps \
--disable-unified \
CXX=nvcc \
LDFLAGS="-cudart shared -L$NVIDIALIB -lcublas" \
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++17 -cudart shared"

View File

@ -0,0 +1,16 @@
. /home/paboyle/spack/share/spack/setup-env.sh
spack load cuda@12.0.0
spack load c-lime
spack load gmp
spack load mpfr
spack load hdf5
spack load fftw
spack load openmpi
export FFTW=`spack find --paths fftw | grep fftw | cut -c 14-`
export HDF5=`spack find --paths hdf5 | grep hdf5 | cut -c 14-`
export CUDA=`spack find --paths cuda@11.8.0 | grep cuda | cut -c 14-`
export CLIME=`spack find --paths c-lime | grep c-lime| cut -c 15-`
export GMP=`spack find --paths gmp | grep gmp | cut -c 12-`
export MPFR=`spack find --paths mpfr | grep mpfr | cut -c 13-`
export NVIDIALIB=$CUDA/targets/x86_64-linux/lib/
export LD_LIBRARY_PATH=$NVIDIALIB:$LD_LIBRARY_PATH:$HDF5/lib:$FFTW/lib:$CLIME/lib/:$MPFR/lib

View File

@ -1,7 +1,7 @@
spack load c-lime
spack load gmp
spack load mpfr
CLIME=`spack find --paths c-lime | grep c-lime| cut -c 15-`
CLIME=`spack find --paths c-lime | grep c-lime| cut -c 13-`
GMP=`spack find --paths gmp | grep gmp | cut -c 12-`
MPFR=`spack find --paths mpfr | grep mpfr | cut -c 13-`
echo clime X$CLIME