mirror of
https://github.com/paboyle/Grid.git
synced 2025-06-13 04:37:05 +01:00
Merge branch 'develop' into feature/scidac-wp1
This commit is contained in:
60
systems/Aurora/benchmarks/bench1024.pbs
Normal file
60
systems/Aurora/benchmarks/bench1024.pbs
Normal file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=1024
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
#export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
export FI_CXI_CQ_FILL_PERCENT=10
|
||||
export FI_CXI_DEFAULT_CQ_SIZE=262144
|
||||
#export FI_CXI_DEFAULT_CQ_SIZE=131072
|
||||
#export FI_CXI_CQ_FILL_PERCENT=20
|
||||
|
||||
# 12 ppn, 32 nodes, 384 ranks
|
||||
#
|
||||
CMD="mpiexec -np 12288 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 8.6.16.16 --grid 64.48.64.284 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 12288 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 8.8.8.24 --grid 128.128.128.384 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 1024node.dwf.small.cq
|
||||
|
||||
CMD="mpiexec -np 12288 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 16.8.8.12 --grid 256.256.256.384 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 1024node.dwf.cq
|
||||
|
||||
|
60
systems/Aurora/benchmarks/bench12.pbs
Normal file
60
systems/Aurora/benchmarks/bench12.pbs
Normal file
@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=2
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 2.3.2.2 --grid 32.24.32.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
#$CMD
|
||||
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.3.2.2 --grid 64.96.64.64 --comms-overlap \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
#$CMD
|
||||
|
||||
CMD="mpiexec -np 1 -ppn 1 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf --mpi 1.1.1.1 --grid 16.32.32.32 --comms-sequential \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 1 -ppn 1 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 --comms-sequential \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
56
systems/Aurora/benchmarks/bench2048.pbs
Normal file
56
systems/Aurora/benchmarks/bench2048.pbs
Normal file
@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=2048
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 32 nodes, 384 ranks
|
||||
#
|
||||
CMD="mpiexec -np 24576 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 8.12.16.16 --grid 64.48.64.284 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 24576 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 16.8.8.24 --grid 128.128.128.384 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 2048node.dwf.small
|
||||
|
||||
CMD="mpiexec -np 24576 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 16.8.8.24 --grid 256.256.256.768 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 2048node.dwf
|
||||
|
||||
|
48
systems/Aurora/benchmarks/bench256.pbs
Normal file
48
systems/Aurora/benchmarks/bench256.pbs
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=256
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 32 nodes, 384 ranks
|
||||
#
|
||||
CMD="mpiexec -np 3072 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 8.6.8.8 --grid 32.24.32.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 3072 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 8.8.4.12 --grid 128.128.128.768 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 256node.dwf.large
|
48
systems/Aurora/benchmarks/bench512.pbs
Normal file
48
systems/Aurora/benchmarks/bench512.pbs
Normal file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=512
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 32 nodes, 384 ranks
|
||||
#
|
||||
CMD="mpiexec -np 6144 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 8.6.8.16 --grid 32.24.32.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 6144 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 8.8.8.12 --grid 256.128.128.768 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 512node.dwf.large
|
80
systems/Aurora/benchmarks/bench_scaling.pbs
Normal file
80
systems/Aurora/benchmarks/bench_scaling.pbs
Normal file
@ -0,0 +1,80 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=32
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 32 nodes, 384 ranks
|
||||
#
|
||||
CMD="mpiexec -np 384 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_comms_host_device --mpi 4.6.4.4 --grid 32.24.32.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
||||
CMD="mpiexec -np 12 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 1.2.2.3 --grid 16.64.64.96 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 1node.dwf
|
||||
|
||||
|
||||
CMD="mpiexec -np 24 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.2.2.3 --grid 32.64.64.96 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 2node.dwf
|
||||
|
||||
CMD="mpiexec -np 48 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.2.2.6 --grid 32.64.64.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 4node.dwf
|
||||
|
||||
CMD="mpiexec -np 96 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.2.4.6 --grid 32.64.128.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 8node.dwf
|
||||
|
||||
CMD="mpiexec -np 192 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 2.4.4.6 --grid 32.128.128.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 16node.dwf
|
||||
|
||||
|
||||
CMD="mpiexec -np 384 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Benchmark_dwf_fp32 --mpi 4.4.4.6 --grid 64.128.128.192 \
|
||||
--shm-mpi 1 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap"
|
||||
$CMD | tee 32node.dwf
|
33
systems/Aurora/benchmarks/gpu_tile_compact.sh
Executable file
33
systems/Aurora/benchmarks/gpu_tile_compact.sh
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
export NUMA_MAP=(2 2 2 3 3 3 2 2 2 3 3 3 )
|
||||
#export NUMA_MAP=(0 0 0 1 1 1 0 0 0 1 1 1 )
|
||||
export NUMA_PMAP=(0 0 0 1 1 1 0 0 0 1 1 1 )
|
||||
export NIC_MAP=(0 1 2 4 5 6 0 1 2 4 5 6 )
|
||||
export GPU_MAP=(0 1 2 3 4 5 0 1 2 3 4 5 )
|
||||
export TILE_MAP=(0 0 0 0 0 0 1 1 1 1 1 1 )
|
||||
|
||||
export NUMA=${NUMA_MAP[$PALS_LOCAL_RANKID]}
|
||||
export NUMAP=${NUMA_PMAP[$PALS_LOCAL_RANKID]}
|
||||
export NIC=${NIC_MAP[$PALS_LOCAL_RANKID]}
|
||||
export gpu_id=${GPU_MAP[$PALS_LOCAL_RANKID]}
|
||||
export tile_id=${TILE_MAP[$PALS_LOCAL_RANKID]}
|
||||
|
||||
#export GRID_MPICH_NIC_BIND=$NIC
|
||||
#export ONEAPI_DEVICE_SELECTOR=level_zero:$gpu_id.$tile_id
|
||||
|
||||
unset EnableWalkerPartition
|
||||
export EnableImplicitScaling=0
|
||||
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
|
||||
export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
||||
|
||||
#export ZE_ENABLE_PCI_ID_DEVICE_ORDER=1
|
||||
#export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
|
||||
|
||||
#echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NUMA $NUMA "
|
||||
|
||||
numactl -m $NUMA -N $NUMAP "$@"
|
29
systems/Aurora/benchmarks/gpu_tile_compact4.sh
Executable file
29
systems/Aurora/benchmarks/gpu_tile_compact4.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
export NUMA_MAP=(2 2 3 3 2 2 3 3 )
|
||||
export PROC_MAP=(0 0 1 1 0 0 1 1 )
|
||||
export NIC_MAP=(0 0 4 4 1 1 5 5 )
|
||||
export GPU_MAP=(0 1 3 4 0 1 3 4 )
|
||||
export TILE_MAP=(0 0 0 0 1 1 1 1 )
|
||||
export NUMA=${NUMA_MAP[$PALS_LOCAL_RANKID]}
|
||||
export NIC=${NIC_MAP[$PALS_LOCAL_RANKID]}
|
||||
export gpu_id=${GPU_MAP[$PALS_LOCAL_RANKID]}
|
||||
export tile_id=${TILE_MAP[$PALS_LOCAL_RANKID]}
|
||||
|
||||
#export GRID_MPICH_NIC_BIND=$NIC
|
||||
|
||||
unset EnableWalkerPartition
|
||||
export EnableImplicitScaling=0
|
||||
export ZE_ENABLE_PCI_ID_DEVICE_ORDER=1
|
||||
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
|
||||
#export ONEAPI_DEVICE_SELECTOR=level_zero:$gpu_id.$tile_id
|
||||
export ONEAPI_DEVICE_FILTER=gpu,level_zero
|
||||
export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=0
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0:2
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE_FOR_D2D_COPY=1
|
||||
#export SYCL_PI_LEVEL_ZERO_USM_RESIDENT=1
|
||||
|
||||
echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK ; NIC $GRID_MPICH_NIC_BIND ; NUMA domain $NUMA"
|
||||
|
||||
numactl -m $NUMA -N $PROC_MAP "$@"
|
16
systems/Aurora/config-command
Normal file
16
systems/Aurora/config-command
Normal file
@ -0,0 +1,16 @@
|
||||
TOOLS=$HOME/tools
|
||||
../../configure \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-comms=mpi-auto \
|
||||
--enable-accelerator-cshift \
|
||||
--disable-gparity \
|
||||
--disable-fermion-reps \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=sycl \
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=icpx \
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/ -L${MKLROOT}/lib -qmkl=parallel " \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include -qmkl=parallel"
|
||||
|
9
systems/Aurora/proxies.sh
Normal file
9
systems/Aurora/proxies.sh
Normal file
@ -0,0 +1,9 @@
|
||||
export HTTP_PROXY=http://proxy.alcf.anl.gov:3128
|
||||
export HTTPS_PROXY=http://proxy.alcf.anl.gov:3128
|
||||
export http_proxy=http://proxy.alcf.anl.gov:3128
|
||||
export https_proxy=http://proxy.alcf.anl.gov:3128
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1
|
||||
git config --global http.proxy http://proxy.alcf.anl.gov:3128
|
||||
module use /soft/modulefiles
|
||||
module load intel_compute_runtime/release/agama-devel-682.22
|
||||
|
26
systems/Aurora/sourceme.sh
Normal file
26
systems/Aurora/sourceme.sh
Normal file
@ -0,0 +1,26 @@
|
||||
#export ONEAPI_DEVICE_SELECTOR=level_zero:0.0
|
||||
|
||||
module use /soft/modulefiles
|
||||
module load intel_compute_runtime/release/agama-devel-682.22
|
||||
|
||||
export FI_CXI_DEFAULT_CQ_SIZE=131072
|
||||
export FI_CXI_CQ_FILL_PERCENT=20
|
||||
|
||||
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
||||
#export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-intel-enable-auto-large-GRF-mode"
|
||||
|
||||
#
|
||||
# -ftarget-register-alloc-mode=pvc:default
|
||||
# -ftarget-register-alloc-mode=pvc:small
|
||||
# -ftarget-register-alloc-mode=pvc:large
|
||||
# -ftarget-register-alloc-mode=pvc:auto
|
||||
#
|
||||
|
||||
export HTTP_PROXY=http://proxy.alcf.anl.gov:3128
|
||||
export HTTPS_PROXY=http://proxy.alcf.anl.gov:3128
|
||||
export http_proxy=http://proxy.alcf.anl.gov:3128
|
||||
export https_proxy=http://proxy.alcf.anl.gov:3128
|
||||
#export MPIR_CVAR_CH4_OFI_ENABLE_HMEM=1
|
||||
git config --global http.proxy http://proxy.alcf.anl.gov:3128
|
||||
|
||||
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
40
systems/Aurora/tests/repro16.pbs
Normal file
40
systems/Aurora/tests/repro16.pbs
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=16
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 16 nodes, 192 ranks
|
||||
CMD="mpiexec -np 192 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Test_dwf_mixedcg_prec --mpi 2.4.4.6 --grid 64.128.128.192 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000"
|
||||
$CMD
|
40
systems/Aurora/tests/solver/stag16.pbs
Normal file
40
systems/Aurora/tests/solver/stag16.pbs
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
## qsub -q EarlyAppAccess -A Aurora_Deployment -I -l select=1 -l walltime=60:00
|
||||
|
||||
#PBS -q EarlyAppAccess
|
||||
#PBS -l select=16
|
||||
#PBS -l walltime=01:00:00
|
||||
#PBS -A LatticeQCD_aesp_CNDA
|
||||
|
||||
#export OMP_PROC_BIND=spread
|
||||
#unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
source ../../sourceme.sh
|
||||
|
||||
cat $PBS_NODEFILE
|
||||
|
||||
export OMP_NUM_THREADS=3
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE
|
||||
#unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST
|
||||
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0
|
||||
export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16
|
||||
export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16
|
||||
export MPICH_OFI_NIC_POLICY=GPU
|
||||
|
||||
# 12 ppn, 16 nodes, 192 ranks
|
||||
CMD="mpiexec -np 192 -ppn 12 -envall \
|
||||
./gpu_tile_compact.sh \
|
||||
./Test_staggered_cg_prec --mpi 2.4.4.6 --grid 128.128.128.192 \
|
||||
--shm-mpi 1 --shm 4096 --device-mem 32000 --accelerator-threads 32 --seconds 3000"
|
||||
$CMD
|
70
systems/Booster/benchmarks/Benchmark_usqcd.csv
Normal file
70
systems/Booster/benchmarks/Benchmark_usqcd.csv
Normal file
@ -0,0 +1,70 @@
|
||||
Memory Bandwidth
|
||||
|
||||
Bytes, GB/s per node
|
||||
3145728, 225.900365
|
||||
50331648, 2858.859504
|
||||
254803968, 4145.556367
|
||||
805306368, 4905.772480
|
||||
1966080000, 4978.312557
|
||||
|
||||
|
||||
GEMM
|
||||
|
||||
M, N, K, BATCH, GF/s per rank
|
||||
16, 8, 16, 256, 1.713639
|
||||
16, 16, 16, 256, 288.268316
|
||||
16, 32, 16, 256, 597.053950
|
||||
32, 8, 32, 256, 557.382591
|
||||
32, 16, 32, 256, 1100.145311
|
||||
32, 32, 32, 256, 1885.080449
|
||||
64, 8, 64, 256, 1725.163599
|
||||
64, 16, 64, 256, 3389.336566
|
||||
64, 32, 64, 256, 4168.252422
|
||||
16, 8, 256, 256, 1326.262134
|
||||
16, 16, 256, 256, 2318.095475
|
||||
16, 32, 256, 256, 3555.436503
|
||||
32, 8, 256, 256, 1920.139170
|
||||
32, 16, 256, 256, 3486.174753
|
||||
32, 32, 256, 256, 5320.821724
|
||||
64, 8, 256, 256, 2539.597502
|
||||
64, 16, 256, 256, 5003.456775
|
||||
64, 32, 256, 256, 7837.531562
|
||||
8, 256, 16, 256, 1427.848170
|
||||
16, 256, 16, 256, 2222.147815
|
||||
32, 256, 16, 256, 2877.121715
|
||||
8, 256, 32, 256, 1922.890086
|
||||
16, 256, 32, 256, 3199.469082
|
||||
32, 256, 32, 256, 4845.405343
|
||||
8, 256, 64, 256, 2639.483343
|
||||
16, 256, 64, 256, 5012.800299
|
||||
32, 256, 64, 256, 7216.006882
|
||||
|
||||
|
||||
|
||||
Communications
|
||||
|
||||
Packet bytes, direction, GB/s per node
|
||||
4718592, 2, 206.570734
|
||||
4718592, 3, 207.501847
|
||||
4718592, 6, 189.730277
|
||||
4718592, 7, 204.301218
|
||||
15925248, 2, 307.882997
|
||||
15925248, 3, 287.901076
|
||||
15925248, 6, 295.603109
|
||||
15925248, 7, 300.682033
|
||||
37748736, 2, 331.740364
|
||||
37748736, 3, 338.610627
|
||||
37748736, 6, 332.580657
|
||||
37748736, 7, 336.336579
|
||||
|
||||
|
||||
Per node summary table
|
||||
|
||||
L , Wilson, DWF4, Staggered, GF/s per node
|
||||
|
||||
8 , 16, 1165, 10
|
||||
12 , 473, 4901, 163
|
||||
16 , 1436, 8464, 442
|
||||
24 , 4133, 10139, 1530
|
||||
32 , 5726, 11487, 2518
|
||||
|
|
@ -5,10 +5,12 @@ LIME=/p/home/jusers/boyle2/juwels/gm2dwf/boyle/
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=cuda \
|
||||
--disable-gparity \
|
||||
--disable-fermion-reps \
|
||||
--with-lime=$LIME \
|
||||
--disable-accelerator-cshift \
|
||||
--enable-accelerator-cshift \
|
||||
--disable-unified \
|
||||
CXX=nvcc \
|
||||
LDFLAGS="-cudart shared " \
|
||||
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++14 -cudart shared"
|
||||
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++17 -cudart shared -lcublas"
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
module load GCC/9.3.0
|
||||
module load GMP/6.2.0
|
||||
module load MPFR/4.1.0
|
||||
module load OpenMPI/4.1.0rc1
|
||||
module load CUDA/11.3
|
||||
module load GCC
|
||||
module load GMP
|
||||
module load MPFR
|
||||
module load OpenMPI
|
||||
module load CUDA
|
||||
|
@ -3,7 +3,7 @@ spack load c-lime
|
||||
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/sw/crusher/spack-envs/base/opt/cray-sles15-zen3/gcc-11.2.0/gperftools-2.9.1-72ubwtuc5wcz2meqltbfdb76epufgzo2/lib
|
||||
module load emacs
|
||||
module load PrgEnv-gnu
|
||||
module load rocm/5.3.0
|
||||
module load rocm
|
||||
module load cray-mpich/8.1.23
|
||||
module load gmp
|
||||
module load cray-fftw
|
||||
|
57
systems/Lumi/HMC/32cube/fthmc3gev.slurm
Normal file
57
systems/Lumi/HMC/32cube/fthmc3gev.slurm
Normal file
@ -0,0 +1,57 @@
|
||||
#!/bin/bash -l
|
||||
#SBATCH --job-name=fthmc3ge
|
||||
#SBATCH --partition=small-g
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks-per-node=8
|
||||
##SBATCH --cpus-per-task=8
|
||||
#SBATCH --gpus-per-node=8
|
||||
#SBATCH --time=2:00:00
|
||||
#SBATCH --account=project_465000546
|
||||
#SBATCH --gpu-bind=none
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --mem=0
|
||||
|
||||
|
||||
#sbatch --dependency=afterany:$SLURM_JOBID fthmc3gev.slurm
|
||||
|
||||
CPU_BIND="map_ldom:3,3,1,1,0,0,2,2"
|
||||
MEM_BIND="map_mem:3,3,1,1,0,0,2,2"
|
||||
echo $CPU_BIND
|
||||
|
||||
cat << EOF > ./select_gpu
|
||||
#!/bin/bash
|
||||
export GPU_MAP=(0 1 2 3 4 5 6 7)
|
||||
export NUMA_MAP=(3 3 1 1 0 0 2 2)
|
||||
export GPU=\${GPU_MAP[\$SLURM_LOCALID]}
|
||||
export NUM=\${NUMA_MAP[\$SLURM_LOCALID]}
|
||||
#export HIP_VISIBLE_DEVICES=\$GPU
|
||||
export ROCR_VISIBLE_DEVICES=\$GPU
|
||||
echo RANK \$SLURM_LOCALID using GPU \$GPU
|
||||
echo NUMA \$SLURM_LOCALID using NUMA \${NUM}
|
||||
echo numactl -m \$NUM -N \$NUM \$*
|
||||
exec numactl -m \$NUM -N \$NUM \$*
|
||||
EOF
|
||||
cat ./select_gpu
|
||||
|
||||
chmod +x ./select_gpu
|
||||
|
||||
root=/scratch/project_465000546/boylepet/Grid/systems/Lumi
|
||||
source ${root}/sourceme.sh
|
||||
|
||||
export OMP_NUM_THREADS=7
|
||||
export MPICH_SMP_SINGLE_COPY_MODE=CMA
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
|
||||
#cfg=`ls -rt ckpoint_*lat* | tail -n 1 `
|
||||
#traj="${cfg#*.}"
|
||||
#cfg=`ls -rt ckpoint_*lat* | tail -n 1 `
|
||||
traj=0
|
||||
|
||||
vol=32.32.32.64
|
||||
mpi=1.2.2.2
|
||||
PARAMS="--mpi $mpi --accelerator-threads 16 --comms-sequential --shm 2048 --shm-mpi 0 --grid $vol"
|
||||
#HMCPARAMS="--StartingType CheckpointStart --StartingTrajectory $traj --Trajectories 200"
|
||||
HMCPARAMS="--StartingType ColdStart --StartingTrajectory $traj --Trajectories 20"
|
||||
|
||||
srun ./select_gpu ../FTHMC2p1f_3GeV $HMCPARAMS $PARAMS
|
||||
|
@ -23,7 +23,7 @@ echo mpfr X$MPFR
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
CXX=hipcc MPICXX=mpicxx \
|
||||
CXXFLAGS="-fPIC --offload-arch=gfx90a -I/opt/rocm/include/ -std=c++14 -I/opt/cray/pe/mpich/8.1.23/ofi/gnu/9.1/include" \
|
||||
CXXFLAGS="-fPIC --offload-arch=gfx90a -I/opt/rocm/include/ -std=c++17 -I/opt/cray/pe/mpich/8.1.23/ofi/gnu/9.1/include" \
|
||||
LDFLAGS="-L/opt/cray/pe/mpich/8.1.23/ofi/gnu/9.1/lib -lmpi -L/opt/cray/pe/mpich/8.1.23/gtl/lib -lmpi_gtl_hsa -lamdhip64 -fopenmp"
|
||||
|
||||
|
||||
|
@ -1,3 +0,0 @@
|
||||
export https_proxy=http://proxy-chain.intel.com:911
|
||||
module load intel-release
|
||||
module load intel/mpich
|
@ -1,9 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
num_tile=2
|
||||
|
||||
gpu_id=$(( (MPI_LOCAL_RANKID % num_tile ) ))
|
||||
tile_id=$((MPI_LOCAL_RANKID / num_tile))
|
||||
gpu_id=$(( (MPI_LOCALRANKID / num_tile ) ))
|
||||
tile_id=$((MPI_LOCALRANKID % num_tile))
|
||||
|
||||
export ZE_AFFINITY_MASK=$gpu_id.$tile_id
|
||||
|
5
systems/PVC-OEM/setup.sh
Normal file
5
systems/PVC-OEM/setup.sh
Normal file
@ -0,0 +1,5 @@
|
||||
export https_proxy=http://proxy-chain.intel.com:911
|
||||
module load intel-release
|
||||
module load intel/mpich
|
||||
export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1
|
||||
export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file"
|
@ -1,62 +0,0 @@
|
||||
#!/bin/sh
|
||||
##SBATCH -p PVC-SPR-QZEH
|
||||
##SBATCH -p PVC-ICX-QZNW
|
||||
#SBATCH -p QZ1J-ICX-PVC
|
||||
##SBATCH -p QZ1J-SPR-PVC-2C
|
||||
|
||||
#source /nfs/site/home/paboylex/ATS/GridNew/Grid/systems/PVC-nightly/setup.sh
|
||||
|
||||
export NT=8
|
||||
|
||||
export I_MPI_OFFLOAD=1
|
||||
export I_MPI_OFFLOAD_TOPOLIB=level_zero
|
||||
export I_MPI_OFFLOAD_DOMAIN_SIZE=-1
|
||||
|
||||
# export IGC_EnableLSCFenceUGMBeforeEOT=0
|
||||
# export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file=False"
|
||||
export SYCL_DEVICE_FILTER=gpu,level_zero
|
||||
#export IGC_ShaderDumpEnable=1
|
||||
#export IGC_DumpToCurrentDir=1
|
||||
export I_MPI_OFFLOAD_CELL=tile
|
||||
export EnableImplicitScaling=0
|
||||
export EnableWalkerPartition=0
|
||||
export ZE_AFFINITY_MASK=0.0
|
||||
mpiexec -launcher ssh -n 1 -host localhost ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 32.32.32.32 --accelerator-threads $NT --comms-sequential --shm-mpi 1 --device-mem 32768
|
||||
|
||||
export ZE_AFFINITY_MASK=0
|
||||
export I_MPI_OFFLOAD_CELL=device
|
||||
export EnableImplicitScaling=1
|
||||
export EnableWalkerPartition=1
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#mpiexec -launcher ssh -n 2 -host localhost vtune -collect gpu-hotspots -knob gpu-sampling-interval=1 -data-limit=0 -r ./vtune_run4 -- ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-overlap --shm-mpi 1
|
||||
|
||||
#mpiexec -launcher ssh -n 1 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-overlap --shm-mpi 1
|
||||
|
||||
#mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-sequential --shm-mpi 1
|
||||
|
||||
#mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-overlap --shm-mpi 1
|
||||
|
||||
#mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-sequential --shm-mpi 0
|
||||
|
||||
#mpirun -np 2 ./wrap.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.2 --grid 16.32.32.64 --accelerator-threads $NT --comms-sequential --shm-mpi 0
|
||||
#mpirun -np 2 ./wrap.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.2 --grid 32.32.32.64 --accelerator-threads $NT --comms-sequential --shm-mpi 1
|
||||
|
@ -1,33 +0,0 @@
|
||||
#!/bin/bash
|
||||
##SBATCH -p PVC-SPR-QZEH
|
||||
##SBATCH -p PVC-ICX-QZNW
|
||||
|
||||
#SBATCH -p QZ1J-ICX-PVC
|
||||
|
||||
#source /nfs/site/home/paboylex/ATS/GridNew/Grid/systems/PVC-nightly/setup.sh
|
||||
|
||||
export NT=16
|
||||
|
||||
# export IGC_EnableLSCFenceUGMBeforeEOT=0
|
||||
# export SYCL_PROGRAM_COMPILE_OPTIONS="-ze-opt-large-register-file=False"
|
||||
#export IGC_ShaderDumpEnable=1
|
||||
#export IGC_DumpToCurrentDir=1
|
||||
export I_MPI_OFFLOAD=1
|
||||
export I_MPI_OFFLOAD_TOPOLIB=level_zero
|
||||
export I_MPI_OFFLOAD_DOMAIN_SIZE=-1
|
||||
export SYCL_DEVICE_FILTER=gpu,level_zero
|
||||
export I_MPI_OFFLOAD_CELL=tile
|
||||
export EnableImplicitScaling=0
|
||||
export EnableWalkerPartition=0
|
||||
#export SYCL_PI_LEVEL_ZERO_DEVICE_SCOPE_EVENTS=1
|
||||
#export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||
export SYCL_PI_LEVEL_ZERO_USE_COPY_ENGINE=0
|
||||
|
||||
for i in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
|
||||
do
|
||||
mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 1.1.1.2 --grid 32.32.32.64 --accelerator-threads $NT --shm-mpi 0 --device-mem 32768 > 1.1.1.2.log$i
|
||||
mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --shm-mpi 0 --device-mem 32768 > 2.1.1.1.log$i
|
||||
done
|
||||
|
||||
mpiexec -launcher ssh -n 2 -host localhost ./wrap.sh ./Benchmark_dwf_fp32 --mpi 2.1.1.1 --grid 64.32.32.32 --accelerator-threads $NT --comms-sequential --shm-mpi 0
|
||||
|
@ -1,9 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
export ZE_AFFINITY_MASK=0.$MPI_LOCALRANKID
|
||||
|
||||
echo Ranke $MPI_LOCALRANKID ZE_AFFINITY_MASK is $ZE_AFFINITY_MASK
|
||||
|
||||
|
||||
$@
|
||||
|
@ -1,16 +0,0 @@
|
||||
INSTALL=/nfs/site/home/paboylx/prereqs/
|
||||
../../configure \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-comms=mpi-auto \
|
||||
--disable-accelerator-cshift \
|
||||
--disable-gparity \
|
||||
--disable-fermion-reps \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=sycl \
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=dpcpp \
|
||||
LDFLAGS="-fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$INSTALL/lib" \
|
||||
CXXFLAGS="-fsycl-unnamed-lambda -fsycl -no-fma -I$INSTALL/include -Wno-tautological-compare"
|
||||
|
@ -1,18 +0,0 @@
|
||||
export https_proxy=http://proxy-chain.intel.com:911
|
||||
#export LD_LIBRARY_PATH=/nfs/site/home/azusayax/install/lib:$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH=$HOME/prereqs/lib/:$LD_LIBRARY_PATH
|
||||
|
||||
module load intel-release
|
||||
module load intel-comp-rt/embargo-ci-neo
|
||||
|
||||
#source /opt/intel/oneapi/PVC_setup.sh
|
||||
#source /opt/intel/oneapi/ATS_setup.sh
|
||||
#module load intel-nightly/20230331
|
||||
#module load intel-comp-rt/ci-neo-master/026093
|
||||
|
||||
#module load intel/mpich
|
||||
module load intel/mpich/pvc45.3
|
||||
export PATH=~/ATS/pti-gpu/tools/onetrace/:$PATH
|
||||
|
||||
#clsh embargo-ci-neo-022845
|
||||
#source /opt/intel/vtune_amplifier/amplxe-vars.sh
|
42
systems/SDCC-A100/bench.slurm
Normal file
42
systems/SDCC-A100/bench.slurm
Normal file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
#SBATCH --partition csi
|
||||
#SBATCH --time=00:10:00
|
||||
#SBATCH -A csigeneral
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks=4
|
||||
#SBATCH --qos csi
|
||||
#SBATCH --gres=gpu:4
|
||||
|
||||
source sourceme.sh
|
||||
|
||||
cat << EOF > select_gpu
|
||||
#!/bin/bash
|
||||
export GPU_MAP=(0 1 2 3)
|
||||
export GPU=\${GPU_MAP[\$SLURM_LOCALID]}
|
||||
export CUDA_VISIBLE_DEVICES=\$GPU
|
||||
unset ROCR_VISIBLE_DEVICES
|
||||
echo RANK \$SLURM_LOCALID using GPU \$GPU
|
||||
exec \$*
|
||||
EOF
|
||||
chmod +x ./select_gpu
|
||||
|
||||
|
||||
export OMP_NUM_THREADS=4
|
||||
export OMPI_MCA_btl=^uct,openib
|
||||
export UCX_TLS=cuda,gdr_copy,rc,rc_x,sm,cuda_copy,cuda_ipc
|
||||
export UCX_RNDV_SCHEME=put_zcopy
|
||||
export UCX_RNDV_THRESH=16384
|
||||
export UCX_IB_GPU_DIRECT_RDMA=no
|
||||
export UCX_MEMTYPE_CACHE=n
|
||||
|
||||
export OMP_NUM_THREAD=8
|
||||
#srun -N1 -n1 nvidia-smi
|
||||
#srun -N1 -n1 numactl -H > numa.txt
|
||||
srun -N1 -n1 lstopo A100-topo.pdf
|
||||
|
||||
# 4.35 TF/s
|
||||
#srun -N1 -n1 ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 --shm 2048 --shm-mpi 0 --accelerator-threads 16
|
||||
|
||||
srun -N1 -n4 ./select_gpu ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.2.2 --grid 32.32.64.64 --shm 2048 --shm-mpi 0 --accelerator-threads 16
|
||||
|
17
systems/SDCC-A100/config-command
Normal file
17
systems/SDCC-A100/config-command
Normal file
@ -0,0 +1,17 @@
|
||||
../../configure \
|
||||
--enable-comms=mpi-auto \
|
||||
--enable-unified=no \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=cuda \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-simd=GPU \
|
||||
--disable-accelerator-cshift \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
CXX=nvcc \
|
||||
MPICXX=mpicxx \
|
||||
LDFLAGS="-cudart shared " \
|
||||
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++17 -cudart shared"
|
||||
|
||||
|
||||
|
2
systems/SDCC-A100/sourceme.sh
Normal file
2
systems/SDCC-A100/sourceme.sh
Normal file
@ -0,0 +1,2 @@
|
||||
module load cuda/12.2
|
||||
module load openmpi
|
6
systems/SDCC-ARM/config-command-mpi
Normal file
6
systems/SDCC-ARM/config-command-mpi
Normal file
@ -0,0 +1,6 @@
|
||||
HDF=$HOME/paboyle/install
|
||||
|
||||
LDFLAGS=-L$HDF/lib CXX=clang++ ../../configure --enable-simd=NEONv8 --enable-comms=none --enable-unified=yes --disable-fermion-reps --disable-gparity --disable-debug --with-hdf5=$HDF
|
||||
#LDFLAGS=-L$HDF/lib CXX=clang++ ../../configure --enable-simd=GEN --enable-comms=none --enable-unified=yes --disable-fermion-reps --disable-gparity --disable-debug --with-hdf5=$HDF
|
||||
|
||||
|
31
systems/SDCC-ICE/bench.slurm
Normal file
31
systems/SDCC-ICE/bench.slurm
Normal file
@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
#SBATCH --partition lqcd
|
||||
#SBATCH --time=00:20:00
|
||||
#SBATCH -A lqcdtest
|
||||
#SBATCH --exclusive
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks=2
|
||||
#SBATCH --qos lqcd
|
||||
|
||||
source sourceme.sh
|
||||
|
||||
export OMP_NUM_THREAD=24
|
||||
#srun -N1 -n1 numactl -H > numa.txt
|
||||
#srun -N1 -n1 lstopo ice-topo.pdf
|
||||
|
||||
cat << EOF > select_socket
|
||||
#!/bin/bash
|
||||
export NUM_MAP=(0 1)
|
||||
export NUMA=\${NUMA_MAP[\$SLURM_LOCALID]}
|
||||
exec \$*
|
||||
EOF
|
||||
chmod +x ./select_socket
|
||||
|
||||
#for vol in 8.8.8.16 8.8.8.32 8.8.8.64
|
||||
#for vol in 8.8.16.16 8.8.16.32 8.8.16.64
|
||||
for vol in 8.16.16.16 8.16.16.32 8.16.16.64 16.16.16.32 16.16.16.64 24.24.24.64 32.32.32.32
|
||||
do
|
||||
srun --cpu-bind=ldoms -N1 -n2 ./select_socket ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.1.2 --grid $vol --dslash-asm > $vol.2socket.out
|
||||
srun --cpu-bind=ldoms -N1 -n1 ./select_socket ./benchmarks/Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid $vol --dslash-asm > $vol.1socket.out
|
||||
done
|
||||
|
19
systems/SDCC-ICE/config-command
Normal file
19
systems/SDCC-ICE/config-command
Normal file
@ -0,0 +1,19 @@
|
||||
../../configure \
|
||||
--enable-debug \
|
||||
--enable-comms=mpi-auto \
|
||||
--enable-unified=yes \
|
||||
--enable-shm=shmopen \
|
||||
--enable-shm-fast-path=shmopen \
|
||||
--enable-accelerator=none \
|
||||
--enable-simd=AVX512 \
|
||||
--disable-accelerator-cshift \
|
||||
--disable-fermion-reps \
|
||||
--disable-gparity \
|
||||
CXX=clang++ \
|
||||
MPICXX=mpicxx \
|
||||
LDFLAGS=-L/direct/sdcc+u/paboyle/spack/opt/spack/linux-almalinux8-icelake/gcc-8.5.0/hwloc-2.9.1-hgkscnt5pferhtde4ahctlupb6qf3vtl/lib/ \
|
||||
LIBS=-lhwloc \
|
||||
CXXFLAGS="-std=c++17"
|
||||
|
||||
|
||||
|
2
systems/SDCC-ICE/sourceme.sh
Normal file
2
systems/SDCC-ICE/sourceme.sh
Normal file
@ -0,0 +1,2 @@
|
||||
export LD_LIBRARY_PATH=/direct/sdcc+u/paboyle/spack/opt/spack/linux-almalinux8-icelake/gcc-8.5.0/llvm-12.0.1-agey6vtuw3e375rewhhobvkznjh5ltz4/lib/:$LD_LIBRARY_PATH
|
||||
module load openmpi
|
@ -20,7 +20,7 @@ unset OMP_PLACES
|
||||
|
||||
cd $PBS_O_WORKDIR
|
||||
|
||||
qsub jobscript.pbs
|
||||
#qsub jobscript.pbs
|
||||
|
||||
echo Jobid: $PBS_JOBID
|
||||
echo Running on host `hostname`
|
||||
@ -44,3 +44,4 @@ CMD="mpiexec -np ${NTOTRANKS} -ppn ${NRANKS} -d ${NDEPTH} --cpu-bind=depth -enva
|
||||
./Benchmark_dwf_fp32 --mpi 1.1.2.6 --grid 16.32.64.192 --comms-overlap \
|
||||
--shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32"
|
||||
|
||||
$CMD
|
||||
|
@ -45,8 +45,8 @@ echo "rank $PALS_RANKID ; local rank $PALS_LOCAL_RANKID ; ZE_AFFINITY_MASK=$ZE_A
|
||||
|
||||
if [ $PALS_LOCAL_RANKID = 0 ]
|
||||
then
|
||||
onetrace --chrome-device-timeline "$@"
|
||||
# "$@"
|
||||
# onetrace --chrome-device-timeline "$@"
|
||||
"$@"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
|
@ -11,6 +11,6 @@ TOOLS=$HOME/tools
|
||||
--enable-unified=no \
|
||||
MPICXX=mpicxx \
|
||||
CXX=icpx \
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -lapmidg -L$TOOLS/lib64/" \
|
||||
LDFLAGS="-fiopenmp -fsycl -fsycl-device-code-split=per_kernel -fsycl-device-lib=all -lze_loader -L$TOOLS/lib64/" \
|
||||
CXXFLAGS="-fiopenmp -fsycl-unnamed-lambda -fsycl -I$INSTALL/include -Wno-tautological-compare -I$HOME/ -I$TOOLS/include"
|
||||
|
||||
|
@ -1,3 +1,2 @@
|
||||
BREW=/opt/local/
|
||||
MPICXX=mpicxx ../../configure --enable-simd=GEN --enable-comms=mpi-auto --enable-unified=yes --prefix $HOME/QCD/GridInstall --with-lime=/Users/peterboyle/QCD/SciDAC/install/ --with-openssl=$BREW --disable-fermion-reps --disable-gparity --disable-debug
|
||||
CXXFLAGS=-I/opt/local/include LDFLAGS=-L/opt/local/lib/ CXX=c++-13 MPICXX=mpicxx ../../configure --enable-simd=GEN --enable-comms=mpi-auto --enable-unified=yes --prefix $HOME/QCD/GridInstall --with-lime=/Users/peterboyle/QCD/SciDAC/install/ --with-openssl=$BREW --disable-fermion-reps --disable-gparity --disable-debug
|
||||
|
||||
|
Reference in New Issue
Block a user