mirror of
https://github.com/paboyle/Grid.git
synced 2024-11-09 23:45:36 +00:00
Udpdate
This commit is contained in:
parent
1fb6aaf150
commit
e188c0512e
129
systems/Booster/comms.4node.perf
Normal file
129
systems/Booster/comms.4node.perf
Normal file
@ -0,0 +1,129 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: NVIDIA A100-SXM4-40GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 42505273344
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 16
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x14e740000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=f660dc67e4b193afc4015bc5e5fe47cfdbb0356e: (HEAD -> develop, origin/develop, origin/HEAD) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 0.706584 s : Grid is setup to use 4 threads
|
||||
Grid : Message : 0.706591 s : Number of iterations to average: 250
|
||||
Grid : Message : 0.706592 s : ====================================================================================================
|
||||
Grid : Message : 0.706593 s : = Benchmarking sequential halo exchange from host memory
|
||||
Grid : Message : 0.706594 s : ====================================================================================================
|
||||
Grid : Message : 0.706595 s : L Ls bytes MB/s uni (err/min/max) MB/s bidi (err/min/max)
|
||||
Grid : Message : 0.744123 s : 8 8 393216 45765.4 91530.7
|
||||
Grid : Message : 0.760079 s : 8 8 393216 49399.0 98798.0
|
||||
Grid : Message : 0.776168 s : 8 8 393216 48904.4 97808.8
|
||||
Grid : Message : 0.793391 s : 8 8 393216 45680.3 91360.6
|
||||
Grid : Message : 0.841483 s : 12 8 1327104 61988.1 123976.3
|
||||
Grid : Message : 0.881324 s : 12 8 1327104 66673.6 133347.1
|
||||
Grid : Message : 0.923429 s : 12 8 1327104 63049.8 126099.6
|
||||
Grid : Message : 0.965199 s : 12 8 1327104 63558.6 127117.2
|
||||
Grid : Message : 1.759350 s : 16 8 3145728 63505.2 127010.3
|
||||
Grid : Message : 1.162325 s : 16 8 3145728 72851.5 145703.0
|
||||
Grid : Message : 1.252374 s : 16 8 3145728 69873.2 139746.5
|
||||
Grid : Message : 1.343308 s : 16 8 3145728 69193.9 138387.8
|
||||
Grid : Message : 1.537929 s : 20 8 6144000 69283.6 138567.2
|
||||
Grid : Message : 1.689674 s : 20 8 6144000 80997.7 161995.4
|
||||
Grid : Message : 1.844366 s : 20 8 6144000 79440.4 158880.8
|
||||
Grid : Message : 2.585000 s : 20 8 6144000 78662.8 157325.7
|
||||
Grid : Message : 2.296310 s : 24 8 10616832 78318.9 156637.8
|
||||
Grid : Message : 2.552185 s : 24 8 10616832 82996.5 165993.0
|
||||
Grid : Message : 2.810117 s : 24 8 10616832 82325.6 164651.2
|
||||
Grid : Message : 3.708760 s : 24 8 10616832 81433.0 162866.1
|
||||
Grid : Message : 3.519278 s : 28 8 16859136 81498.1 162996.1
|
||||
Grid : Message : 3.919983 s : 28 8 16859136 84154.5 168309.0
|
||||
Grid : Message : 4.324104 s : 28 8 16859136 83438.8 166877.5
|
||||
Grid : Message : 4.726446 s : 28 8 16859136 83807.5 167615.0
|
||||
Grid : Message : 5.379742 s : 32 8 25165824 83183.5 166366.9
|
||||
Grid : Message : 5.973767 s : 32 8 25165824 84735.1 169470.3
|
||||
Grid : Message : 6.566572 s : 32 8 25165824 84905.7 169811.3
|
||||
Grid : Message : 7.161794 s : 32 8 25165824 84561.2 169122.3
|
||||
Grid : Message : 7.162522 s : ====================================================================================================
|
||||
Grid : Message : 7.162527 s : = Benchmarking sequential halo exchange from GPU memory
|
||||
Grid : Message : 7.162528 s : ====================================================================================================
|
||||
Grid : Message : 7.162529 s : L Ls bytes MB/s uni (err/min/max) MB/s bidi (err/min/max)
|
||||
Grid : Message : 7.222442 s : 8 8 393216 14172.2 28344.5
|
||||
Grid : Message : 7.253802 s : 8 8 393216 25092.0 50183.9
|
||||
Grid : Message : 7.278690 s : 8 8 393216 31614.1 63228.2
|
||||
Grid : Message : 7.304138 s : 8 8 393216 30915.6 61831.3
|
||||
Grid : Message : 7.347792 s : 12 8 1327104 109873.2 219746.5
|
||||
Grid : Message : 7.370904 s : 12 8 1327104 114900.8 229801.6
|
||||
Grid : Message : 7.418730 s : 12 8 1327104 55509.9 111019.9
|
||||
Grid : Message : 7.466445 s : 12 8 1327104 55639.1 111278.2
|
||||
Grid : Message : 7.531724 s : 16 8 3145728 168680.8 337361.6
|
||||
Grid : Message : 7.567205 s : 16 8 3145728 177379.0 354758.0
|
||||
Grid : Message : 7.669544 s : 16 8 3145728 61483.2 122966.5
|
||||
Grid : Message : 7.771543 s : 16 8 3145728 61687.6 123375.2
|
||||
Grid : Message : 7.852470 s : 20 8 6144000 232313.7 464627.4
|
||||
Grid : Message : 7.905813 s : 20 8 6144000 230410.1 460820.2
|
||||
Grid : Message : 8.979520 s : 20 8 6144000 63957.7 127915.4
|
||||
Grid : Message : 8.287586 s : 20 8 6144000 64802.3 129604.5
|
||||
Grid : Message : 8.393759 s : 24 8 10616832 272264.9 544529.7
|
||||
Grid : Message : 8.471189 s : 24 8 10616832 274276.5 548553.0
|
||||
Grid : Message : 8.794453 s : 24 8 10616832 65688.1 131376.1
|
||||
Grid : Message : 9.122233 s : 24 8 10616832 64782.8 129565.6
|
||||
Grid : Message : 9.263925 s : 28 8 16859136 300457.8 600915.5
|
||||
Grid : Message : 9.375294 s : 28 8 16859136 302794.4 605588.7
|
||||
Grid : Message : 9.757443 s : 28 8 16859136 88236.5 176473.1
|
||||
Grid : Message : 10.134856 s : 28 8 16859136 89343.4 178686.7
|
||||
Grid : Message : 10.318775 s : 32 8 25165824 327347.5 654695.1
|
||||
Grid : Message : 10.470617 s : 32 8 25165824 331500.0 663000.0
|
||||
Grid : Message : 11.235800 s : 32 8 25165824 91024.0 182048.1
|
||||
Grid : Message : 11.576311 s : 32 8 25165824 91062.1 182124.2
|
||||
Grid : Message : 11.592542 s : ====================================================================================================
|
||||
Grid : Message : 11.592548 s : = All done; Bye Bye
|
||||
Grid : Message : 11.592549 s : ====================================================================================================
|
14
systems/Booster/config-command
Normal file
14
systems/Booster/config-command
Normal file
@ -0,0 +1,14 @@
|
||||
LIME=/p/home/jusers/boyle2/juwels/gm2dwf/boyle/
|
||||
../../configure \
|
||||
--enable-comms=mpi \
|
||||
--enable-simd=GPU \
|
||||
--enable-gen-simd-width=64 \
|
||||
--enable-shm=nvlink \
|
||||
--enable-accelerator=cuda \
|
||||
--with-lime=$LIME \
|
||||
--disable-accelerator-cshift \
|
||||
--disable-unified \
|
||||
CXX=nvcc \
|
||||
LDFLAGS="-cudart shared " \
|
||||
CXXFLAGS="-ccbin mpicxx -gencode arch=compute_80,code=sm_80 -std=c++14 -cudart shared"
|
||||
|
156
systems/Booster/dwf.16node.perf
Normal file
156
systems/Booster/dwf.16node.perf
Normal file
@ -0,0 +1,156 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: NVIDIA A100-SXM4-40GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 42505273344
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 64
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x14ac40000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=f660dc67e4b193afc4015bc5e5fe47cfdbb0356e: (HEAD -> develop, origin/develop, origin/HEAD) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 0.910318 s : Grid Layout
|
||||
Grid : Message : 0.910320 s : Global lattice size : 64 64 64 256
|
||||
Grid : Message : 0.910325 s : OpenMP threads : 4
|
||||
Grid : Message : 0.910326 s : MPI tasks : 2 2 2 8
|
||||
Grid : Message : 0.973956 s : Making s innermost grids
|
||||
Grid : Message : 1.198830 s : Initialising 4d RNG
|
||||
Grid : Message : 1.119813 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 1.119870 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 2.683307 s : Initialising 5d RNG
|
||||
Grid : Message : 4.220535 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 4.220563 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 37.198140 s : Initialised RNGs
|
||||
Grid : Message : 39.952612 s : Drawing gauge field
|
||||
Grid : Message : 40.488019 s : Random gauge initialised
|
||||
Grid : Message : 42.659220 s : Setting up Cshift based reference
|
||||
Grid : Message : 47.622210 s : *****************************************************************
|
||||
Grid : Message : 47.622236 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 47.622237 s : *****************************************************************
|
||||
Grid : Message : 47.622238 s : *****************************************************************
|
||||
Grid : Message : 47.622239 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 47.622240 s : * Vectorising space-time by 8
|
||||
Grid : Message : 47.622241 s : * VComplexF size is 64 B
|
||||
Grid : Message : 47.622242 s : * SINGLE precision
|
||||
Grid : Message : 47.622243 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 47.622244 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 47.622245 s : *****************************************************************
|
||||
Grid : Message : 48.950210 s : Called warmup
|
||||
Grid : Message : 77.311124 s : Called Dw 3000 times in 2.83592e+07 us
|
||||
Grid : Message : 77.311181 s : mflop/s = 1.49934e+08
|
||||
Grid : Message : 77.311184 s : mflop/s per rank = 2.34273e+06
|
||||
Grid : Message : 77.311185 s : mflop/s per node = 9.37091e+06
|
||||
Grid : Message : 77.311186 s : RF GiB/s (base 2) = 304663
|
||||
Grid : Message : 77.311187 s : mem GiB/s (base 2) = 190415
|
||||
Grid : Message : 77.314752 s : norm diff 1.03478e-13
|
||||
Grid : Message : 77.349587 s : #### Dhop calls report
|
||||
Grid : Message : 77.349591 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 77.349613 s : WilsonFermion5D TotalTime /Calls : 4761.53 us
|
||||
Grid : Message : 77.349615 s : WilsonFermion5D CommTime /Calls : 3363.09 us
|
||||
Grid : Message : 77.349616 s : WilsonFermion5D FaceTime /Calls : 469.094 us
|
||||
Grid : Message : 77.349617 s : WilsonFermion5D ComputeTime1/Calls : 26.8794 us
|
||||
Grid : Message : 77.349618 s : WilsonFermion5D ComputeTime2/Calls : 949.276 us
|
||||
Grid : Message : 77.349702 s : Average mflops/s per call : 2.68569e+10
|
||||
Grid : Message : 77.349710 s : Average mflops/s per call per rank : 4.1964e+08
|
||||
Grid : Message : 77.349711 s : Average mflops/s per call per node : 1.67856e+09
|
||||
Grid : Message : 77.349712 s : Average mflops/s per call (full) : 1.51538e+08
|
||||
Grid : Message : 77.349713 s : Average mflops/s per call per rank (full): 2.36779e+06
|
||||
Grid : Message : 77.349714 s : Average mflops/s per call per node (full): 9.47115e+06
|
||||
Grid : Message : 77.349715 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 77.349716 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 77.349717 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 77.349718 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 77.349719 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 77.349720 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 104.883719 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 104.883743 s : Called DwDag
|
||||
Grid : Message : 104.883744 s : norm dag result 12.0421
|
||||
Grid : Message : 104.901901 s : norm dag ref 12.0421
|
||||
Grid : Message : 104.917822 s : norm dag diff 7.63254e-14
|
||||
Grid : Message : 104.957229 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 105.334551 s : src_e0.499998
|
||||
Grid : Message : 105.416616 s : src_o0.500002
|
||||
Grid : Message : 105.486729 s : *********************************************************
|
||||
Grid : Message : 105.486732 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 105.486733 s : * Vectorising space-time by 8
|
||||
Grid : Message : 105.486734 s : * SINGLE precision
|
||||
Grid : Message : 105.486739 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 105.486740 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 105.486741 s : *********************************************************
|
||||
Grid : Message : 119.695464 s : Deo mflop/s = 1.5039e+08
|
||||
Grid : Message : 119.695494 s : Deo mflop/s per rank 2.34984e+06
|
||||
Grid : Message : 119.695496 s : Deo mflop/s per node 9.39937e+06
|
||||
Grid : Message : 119.695502 s : #### Dhop calls report
|
||||
Grid : Message : 119.695503 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 119.695505 s : WilsonFermion5D TotalTime /Calls : 4734.45 us
|
||||
Grid : Message : 119.695507 s : WilsonFermion5D CommTime /Calls : 3287.23 us
|
||||
Grid : Message : 119.695508 s : WilsonFermion5D FaceTime /Calls : 537.724 us
|
||||
Grid : Message : 119.695509 s : WilsonFermion5D ComputeTime1/Calls : 16.0483 us
|
||||
Grid : Message : 119.695510 s : WilsonFermion5D ComputeTime2/Calls : 939.854 us
|
||||
Grid : Message : 119.695533 s : Average mflops/s per call : 4.50726e+10
|
||||
Grid : Message : 119.695535 s : Average mflops/s per call per rank : 7.04259e+08
|
||||
Grid : Message : 119.695536 s : Average mflops/s per call per node : 2.81703e+09
|
||||
Grid : Message : 119.695537 s : Average mflops/s per call (full) : 1.52405e+08
|
||||
Grid : Message : 119.695538 s : Average mflops/s per call per rank (full): 2.38133e+06
|
||||
Grid : Message : 119.695539 s : Average mflops/s per call per node (full): 9.52532e+06
|
||||
Grid : Message : 119.695540 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 119.695541 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 119.695542 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 119.695543 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 119.695544 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 119.695545 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 119.752707 s : r_e6.02108
|
||||
Grid : Message : 119.759448 s : r_o6.02101
|
||||
Grid : Message : 119.765382 s : res12.0421
|
||||
Grid : Message : 120.419093 s : norm diff 0
|
||||
Grid : Message : 120.829772 s : norm diff even 0
|
||||
Grid : Message : 120.909078 s : norm diff odd 0
|
156
systems/Booster/dwf.4node.perf
Normal file
156
systems/Booster/dwf.4node.perf
Normal file
@ -0,0 +1,156 @@
|
||||
OPENMPI detected
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device Number : 0
|
||||
AcceleratorCudaInit[0]: ========================
|
||||
AcceleratorCudaInit[0]: Device identifier: NVIDIA A100-SXM4-40GB
|
||||
AcceleratorCudaInit[0]: totalGlobalMem: 42505273344
|
||||
AcceleratorCudaInit[0]: managedMemory: 1
|
||||
AcceleratorCudaInit[0]: isMultiGpuBoard: 0
|
||||
AcceleratorCudaInit[0]: warpSize: 32
|
||||
AcceleratorCudaInit[0]: pciBusID: 3
|
||||
AcceleratorCudaInit[0]: pciDeviceID: 0
|
||||
AcceleratorCudaInit[0]: maxGridSize (2147483647,65535,65535)
|
||||
AcceleratorCudaInit: using default device
|
||||
AcceleratorCudaInit: assume user either uses a) IBM jsrun, or
|
||||
AcceleratorCudaInit: b) invokes through a wrapping script to set CUDA_VISIBLE_DEVICES, UCX_NET_DEVICES, and numa binding
|
||||
AcceleratorCudaInit: Configure options --enable-summit, --enable-select-gpu=no
|
||||
AcceleratorCudaInit: ================================================
|
||||
SharedMemoryMpi: World communicator of size 16
|
||||
SharedMemoryMpi: Node communicator of size 4
|
||||
0SharedMemoryMpi: SharedMemoryMPI.cc acceleratorAllocDevice 2147483648bytes at 0x14b7a0000000 for comms buffers
|
||||
Setting up IPC
|
||||
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|_ | | | | | | | | | | | | _|__
|
||||
__|_ _|__
|
||||
__|_ GGGG RRRR III DDDD _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G R R I D D _|__
|
||||
__|_ G GG RRRR I D D _|__
|
||||
__|_ G G R R I D D _|__
|
||||
__|_ GGGG R R III DDDD _|__
|
||||
__|_ _|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
__|__|__|__|__|__|__|__|__|__|__|__|__|__|__
|
||||
| | | | | | | | | | | | | |
|
||||
|
||||
|
||||
Copyright (C) 2015 Peter Boyle, Azusa Yamaguchi, Guido Cossu, Antonin Portelli and other authors
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
Current Grid git commit hash=f660dc67e4b193afc4015bc5e5fe47cfdbb0356e: (HEAD -> develop, origin/develop, origin/HEAD) uncommited changes
|
||||
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : MPI is initialised and logging filters activated
|
||||
Grid : Message : ================================================
|
||||
Grid : Message : Requested 2147483648 byte stencil comms buffers
|
||||
Grid : Message : MemoryManager Cache 34004218675 bytes
|
||||
Grid : Message : MemoryManager::Init() setting up
|
||||
Grid : Message : MemoryManager::Init() cache pool for recent allocations: SMALL 32 LARGE 8
|
||||
Grid : Message : MemoryManager::Init() Non unified: Caching accelerator data in dedicated memory
|
||||
Grid : Message : MemoryManager::Init() Using cudaMalloc
|
||||
Grid : Message : 0.751288 s : Grid Layout
|
||||
Grid : Message : 0.751291 s : Global lattice size : 64 64 64 64
|
||||
Grid : Message : 0.751296 s : OpenMP threads : 4
|
||||
Grid : Message : 0.751297 s : MPI tasks : 2 2 2 2
|
||||
Grid : Message : 0.792527 s : Making s innermost grids
|
||||
Grid : Message : 0.835495 s : Initialising 4d RNG
|
||||
Grid : Message : 0.940402 s : Intialising parallel RNG with unique string 'The 4D RNG'
|
||||
Grid : Message : 0.940421 s : Seed SHA256: 49db4542db694e3b1a74bf2592a8c1b83bfebbe18401693c2609a4c3af1
|
||||
Grid : Message : 1.336448 s : Initialising 5d RNG
|
||||
Grid : Message : 2.956230 s : Intialising parallel RNG with unique string 'The 5D RNG'
|
||||
Grid : Message : 2.956251 s : Seed SHA256: b6316f2fac44ce14111f93e0296389330b077bfd0a7b359f781c58589f8a
|
||||
Grid : Message : 11.242345 s : Initialised RNGs
|
||||
Grid : Message : 13.415017 s : Drawing gauge field
|
||||
Grid : Message : 13.937529 s : Random gauge initialised
|
||||
Grid : Message : 15.529056 s : Setting up Cshift based reference
|
||||
Grid : Message : 21.472100 s : *****************************************************************
|
||||
Grid : Message : 21.472690 s : * Kernel options --dslash-generic, --dslash-unroll, --dslash-asm
|
||||
Grid : Message : 21.472700 s : *****************************************************************
|
||||
Grid : Message : 21.472710 s : *****************************************************************
|
||||
Grid : Message : 21.472720 s : * Benchmarking DomainWallFermionR::Dhop
|
||||
Grid : Message : 21.472730 s : * Vectorising space-time by 8
|
||||
Grid : Message : 21.472740 s : * VComplexF size is 64 B
|
||||
Grid : Message : 21.472750 s : * SINGLE precision
|
||||
Grid : Message : 21.472760 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 21.472770 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 21.472780 s : *****************************************************************
|
||||
Grid : Message : 22.302206 s : Called warmup
|
||||
Grid : Message : 53.261410 s : Called Dw 3000 times in 3.07237e+07 us
|
||||
Grid : Message : 53.261970 s : mflop/s = 3.45988e+07
|
||||
Grid : Message : 53.261990 s : mflop/s per rank = 2.16243e+06
|
||||
Grid : Message : 53.262000 s : mflop/s per node = 8.64971e+06
|
||||
Grid : Message : 53.262010 s : RF GiB/s (base 2) = 70304
|
||||
Grid : Message : 53.262020 s : mem GiB/s (base 2) = 43940
|
||||
Grid : Message : 53.297930 s : norm diff 1.03481e-13
|
||||
Grid : Message : 53.647310 s : #### Dhop calls report
|
||||
Grid : Message : 53.647360 s : WilsonFermion5D Number of DhopEO Calls : 6002
|
||||
Grid : Message : 53.647410 s : WilsonFermion5D TotalTime /Calls : 5149.34 us
|
||||
Grid : Message : 53.647430 s : WilsonFermion5D CommTime /Calls : 3752.8 us
|
||||
Grid : Message : 53.647440 s : WilsonFermion5D FaceTime /Calls : 474.579 us
|
||||
Grid : Message : 53.647450 s : WilsonFermion5D ComputeTime1/Calls : 46.5708 us
|
||||
Grid : Message : 53.647460 s : WilsonFermion5D ComputeTime2/Calls : 926.982 us
|
||||
Grid : Message : 53.647550 s : Average mflops/s per call : 3.82643e+09
|
||||
Grid : Message : 53.647580 s : Average mflops/s per call per rank : 2.39152e+08
|
||||
Grid : Message : 53.647590 s : Average mflops/s per call per node : 9.56608e+08
|
||||
Grid : Message : 53.647610 s : Average mflops/s per call (full) : 3.50314e+07
|
||||
Grid : Message : 53.647620 s : Average mflops/s per call per rank (full): 2.18946e+06
|
||||
Grid : Message : 53.647630 s : Average mflops/s per call per node (full): 8.75785e+06
|
||||
Grid : Message : 53.647640 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 53.647650 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 53.647660 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 53.647670 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 53.647680 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 53.647690 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 80.460422 s : Compare to naive wilson implementation Dag to verify correctness
|
||||
Grid : Message : 80.460440 s : Called DwDag
|
||||
Grid : Message : 80.460441 s : norm dag result 12.0421
|
||||
Grid : Message : 80.481170 s : norm dag ref 12.0421
|
||||
Grid : Message : 80.497179 s : norm dag diff 7.63236e-14
|
||||
Grid : Message : 80.536902 s : Calling Deo and Doe and //assert Deo+Doe == Dunprec
|
||||
Grid : Message : 80.917100 s : src_e0.499997
|
||||
Grid : Message : 80.993190 s : src_o0.500003
|
||||
Grid : Message : 81.634480 s : *********************************************************
|
||||
Grid : Message : 81.634500 s : * Benchmarking DomainWallFermionF::DhopEO
|
||||
Grid : Message : 81.634510 s : * Vectorising space-time by 8
|
||||
Grid : Message : 81.634520 s : * SINGLE precision
|
||||
Grid : Message : 81.634530 s : * Using Overlapped Comms/Compute
|
||||
Grid : Message : 81.634540 s : * Using GENERIC Nc WilsonKernels
|
||||
Grid : Message : 81.634550 s : *********************************************************
|
||||
Grid : Message : 96.356262 s : Deo mflop/s = 3.49003e+07
|
||||
Grid : Message : 96.356288 s : Deo mflop/s per rank 2.18127e+06
|
||||
Grid : Message : 96.356290 s : Deo mflop/s per node 8.72508e+06
|
||||
Grid : Message : 96.356297 s : #### Dhop calls report
|
||||
Grid : Message : 96.356298 s : WilsonFermion5D Number of DhopEO Calls : 3001
|
||||
Grid : Message : 96.356300 s : WilsonFermion5D TotalTime /Calls : 5095.74 us
|
||||
Grid : Message : 96.356302 s : WilsonFermion5D CommTime /Calls : 3589.98 us
|
||||
Grid : Message : 96.356303 s : WilsonFermion5D FaceTime /Calls : 552.086 us
|
||||
Grid : Message : 96.356304 s : WilsonFermion5D ComputeTime1/Calls : 52.4692 us
|
||||
Grid : Message : 96.356305 s : WilsonFermion5D ComputeTime2/Calls : 963.892 us
|
||||
Grid : Message : 96.356324 s : Average mflops/s per call : 3.42222e+09
|
||||
Grid : Message : 96.356326 s : Average mflops/s per call per rank : 2.13889e+08
|
||||
Grid : Message : 96.356327 s : Average mflops/s per call per node : 8.55556e+08
|
||||
Grid : Message : 96.356328 s : Average mflops/s per call (full) : 3.53999e+07
|
||||
Grid : Message : 96.356329 s : Average mflops/s per call per rank (full): 2.21249e+06
|
||||
Grid : Message : 96.356330 s : Average mflops/s per call per node (full): 8.84997e+06
|
||||
Grid : Message : 96.356331 s : WilsonFermion5D Stencil
|
||||
Grid : Message : 96.356332 s : WilsonFermion5D StencilEven
|
||||
Grid : Message : 96.356333 s : WilsonFermion5D StencilOdd
|
||||
Grid : Message : 96.356334 s : WilsonFermion5D Stencil Reporti()
|
||||
Grid : Message : 96.356335 s : WilsonFermion5D StencilEven Reporti()
|
||||
Grid : Message : 96.356336 s : WilsonFermion5D StencilOdd Reporti()
|
||||
Grid : Message : 96.415562 s : r_e6.02111
|
||||
Grid : Message : 96.422386 s : r_o6.02102
|
||||
Grid : Message : 96.428364 s : res12.0421
|
||||
Grid : Message : 97.865450 s : norm diff 0
|
||||
Grid : Message : 97.490738 s : norm diff even 0
|
||||
Grid : Message : 97.561591 s : norm diff odd 0
|
29
systems/Booster/dwf16.slurm
Normal file
29
systems/Booster/dwf16.slurm
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
#SBATCH --account=gm2dwf
|
||||
#SBATCH --nodes=16
|
||||
#SBATCH --ntasks=64
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --cpus-per-task=12
|
||||
#SBATCH --time=0:30:00
|
||||
#SBATCH --partition=booster
|
||||
#SBATCH --gres=gpu:4
|
||||
|
||||
export OMP_NUM_THREADS=4
|
||||
export OMPI_MCA_btl=^uct,openib
|
||||
export UCX_TLS=gdr_copy,rc,rc_x,sm,cuda_copy,cuda_ipc
|
||||
export UCX_RNDV_SCHEME=put_zcopy
|
||||
export UCX_RNDV_THRESH=16384
|
||||
export UCX_IB_GPU_DIRECT_RDMA=yes
|
||||
export UCX_MEMTYPE_CACHE=n
|
||||
OPT="--comms-overlap --comms-concurrent"
|
||||
|
||||
|
||||
srun -N 16 -n $SLURM_NTASKS \
|
||||
./benchmarks/Benchmark_dwf_fp32 \
|
||||
$OPT \
|
||||
--mpi 2.2.2.8 \
|
||||
--accelerator-threads 8 \
|
||||
--grid 64.64.64.256 \
|
||||
--shm 2048 > dwf.16node.perf
|
||||
|
||||
|
40
systems/Booster/dwf4.slurm
Normal file
40
systems/Booster/dwf4.slurm
Normal file
@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
#SBATCH --account=gm2dwf
|
||||
#SBATCH --nodes=4
|
||||
#SBATCH --ntasks=16
|
||||
#SBATCH --ntasks-per-node=4
|
||||
#SBATCH --cpus-per-task=12
|
||||
#SBATCH --time=2:00:00
|
||||
#SBATCH --partition=develbooster
|
||||
#SBATCH --gres=gpu:4
|
||||
|
||||
export OMP_NUM_THREADS=4
|
||||
export OMPI_MCA_btl=^uct,openib
|
||||
export UCX_TLS=gdr_copy,rc,rc_x,sm,cuda_copy,cuda_ipc
|
||||
export UCX_RNDV_SCHEME=put_zcopy
|
||||
export UCX_RNDV_THRESH=16384
|
||||
export UCX_IB_GPU_DIRECT_RDMA=yes
|
||||
export UCX_MEMTYPE_CACHE=n
|
||||
|
||||
OPT="--comms-overlap --comms-concurrent"
|
||||
|
||||
srun -N 4 -n $SLURM_NTASKS \
|
||||
./benchmarks/Benchmark_dwf_fp32 \
|
||||
$OPT \
|
||||
--mpi 2.2.2.2 \
|
||||
--shm-mpi 0 \
|
||||
--accelerator-threads 8 \
|
||||
--grid 64.64.64.64 \
|
||||
--shm 2048 > dwf.4node.perf
|
||||
|
||||
|
||||
srun -N 4 -n $SLURM_NTASKS \
|
||||
./benchmarks/Benchmark_comms_host_device \
|
||||
--mpi 2.2.2.2 \
|
||||
--accelerator-threads 8 \
|
||||
--grid 64.64.64.64 \
|
||||
--shm 2048 > comms.4node.perf
|
||||
|
||||
|
||||
|
||||
|
5
systems/Booster/sourceme.sh
Normal file
5
systems/Booster/sourceme.sh
Normal file
@ -0,0 +1,5 @@
|
||||
module load GCC/9.3.0
|
||||
module load GMP/6.2.0
|
||||
module load MPFR/4.1.0
|
||||
module load OpenMPI/4.1.0rc1
|
||||
module load CUDA/11.3
|
Loading…
Reference in New Issue
Block a user