#!/bin/bash #PBS -q debug #PBS -l select=1 #PBS -l walltime=00:20:00 #PBS -A LatticeQCD_aesp_CNDA #export OMP_PROC_BIND=spread #unset OMP_PLACES cd $PBS_O_WORKDIR source ../sourceme.sh module load pti-gpu #cat $PBS_NODEFILE export OMP_NUM_THREADS=4 export MPIR_CVAR_CH4_OFI_ENABLE_GPU_PIPELINE=1 #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE #unset MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE #unset MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_D2H_ENGINE_TYPE=0 #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_H2D_ENGINE_TYPE=0 #export MPIR_CVAR_GPU_USE_IMMEDIATE_COMMAND_LIST=1 #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_BUFFER_SZ=1048576 #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_THRESHOLD=131072 #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_NUM_BUFFERS_PER_CHUNK=16 #export MPIR_CVAR_CH4_OFI_GPU_PIPELINE_MAX_NUM_BUFFERS=16 export MPICH_OFI_NIC_POLICY=GPU # 12 ppn, 2 nodes, 24 ranks # CMD="mpiexec -np 12 -ppn 12 -envall \ ./gpu_tile_compact.sh \ ./Benchmark_comms_host_device --mpi 2.2.1.3 --grid 24.32.32.24 \ --shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32" #$CMD | tee 1node.comms CMD="mpiexec -np 1 -ppn 1 -envall \ ./gpu_tile_compact.sh \ ./Benchmark_dwf_fp32 --mpi 1.1.1.1 --grid 16.32.32.32 \ --shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 " #$CMD | tee 1tile.dwf CMD="mpiexec -np 12 -ppn 12 -envall \ ./gpu_tile_compact.sh \ ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 32.32.32.48 \ --shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" $CMD | tee 1node.32.32.32.48.dwf CMD="mpiexec -np 12 -ppn 12 -envall \ ./gpu_tile_compact.sh \ ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.64.32.96 \ --shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" #$CMD | tee 1node.64.64.32.96.dwf CMD="mpiexec -np 12 -ppn 12 -envall \ ./gpu_tile_compact.sh \ ./Benchmark_dwf_fp32 --mpi 2.2.1.3 --grid 64.32.32.48 \ --shm-mpi 0 --shm 2048 --device-mem 32000 --accelerator-threads 32 --comms-overlap" #$CMD | tee 1node.64.32.32.48.dwf