1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-13 20:57:06 +01:00

Compare commits

...

353 Commits

Author SHA1 Message Date
ccf147d6c1 Select the compiler that gives better performance on sunspot 2024-05-07 18:45:56 +00:00
7aa12b446f New config command for sunspot 2024-05-07 18:45:40 +00:00
c293228102 layout control 2024-05-07 18:45:21 +00:00
5c4c9f721a Remove pbs file and replace with bench1 and bench2 for 1 and 2 nodes 2024-05-07 18:44:49 +00:00
057f86c1de 2 queues works ok in performance 2024-05-07 18:42:50 +00:00
cd52e3cbc2 Jobs on subspot 2024-05-07 18:38:15 +00:00
24602e1259 Accidental synchronise 2024-05-07 17:28:38 +00:00
8a098889fc Update FlightRecorder.cc 2024-04-30 21:15:08 +01:00
ff2ea5de18 Update Tensor_traits.h 2024-04-11 14:25:45 -04:00
da59379612 Large reg file for double 2024-03-26 17:03:20 +00:00
3ef2a41518 ifdef guard ommitted 2024-03-26 14:50:32 +00:00
aa96f420c6 Acclerator ware MPI guard on the Unix domain sockets 2024-03-26 14:41:25 +00:00
49e9e4ed0e Fences 2024-03-26 14:14:06 +00:00
f7b8163016 Deterministic MPI reduce options 2024-03-26 14:11:40 +00:00
93769eacd3 Updated configure for bounce through host 2024-03-26 14:10:24 +00:00
59b0cc11df REduce the time in single 2024-03-26 00:42:40 +00:00
f32c275376 Updated config options for MPI not being aware of GPU 2024-03-26 00:42:00 +00:00
5404fc66ab Merge needs a fence on SYCL 2024-03-26 00:38:41 +00:00
1f53458af8 Options to bounce through a host buffer if
--disable-accelerator-aware-mpi
2024-03-26 00:37:19 +00:00
434c3e7f1d We have a choice of GET or PUT across NVlink 2024-03-25 14:32:44 +00:00
500b119f3d Deterministic MPI 2024-03-22 15:55:23 +00:00
4b87259c1b New config command for sunspot 2024-03-22 15:43:49 +00:00
503dec34ef This appears working now on Sunspot 2024-03-22 15:43:30 +00:00
d1e9fe50d2 Xor csum for repro testing 2024-03-22 15:42:57 +00:00
d01e5fa838 Improved FlightRecorder 2024-03-22 15:42:32 +00:00
a477c25e8c Sunspot repro tests 2024-03-22 15:42:11 +00:00
1bd20cd9e8 FlightRecorder 2024-03-22 15:40:01 +00:00
e49e95b037 Upgrade of the Britney test with flight recorder and fast xor checksum 2024-03-22 15:39:27 +00:00
6f59fed563 Flight recorder, resurrecting the "world famous" Britney test 2024-03-22 15:32:32 +00:00
60b7f6c99d Flight recorder, resurrecting the "world famous" Britney test 2024-03-22 15:32:26 +00:00
b92dfcc8d3 Flight recorder, resurrecting the "world famous" Britney test 2024-03-22 15:30:27 +00:00
f6fd6dd053 Flight recorder, resurrecting the "world famous" Britney test 2024-03-22 15:30:01 +00:00
79ad567dd5 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2024-03-19 15:43:42 +00:00
fab1efb48c More britney logging improvements 2024-03-19 14:36:21 +00:00
660eb76d93 FFTW from OneAPI 2024-03-19 14:28:33 +00:00
62e7bf024a Updated flight logging for Britney test 2024-03-12 20:10:04 +00:00
95f3d69cf9 Extra hardware test hook 2024-03-12 20:09:37 +00:00
89c0519f83 Repro test 2024-03-12 16:11:33 +00:00
2704b82084 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2024-03-12 15:16:24 +00:00
cf8632bbac Britney test option 2024-03-12 15:15:35 +00:00
d224297972 PBS scripts 2024-03-12 15:15:16 +00:00
a4d11a630f Merge pull request #458 from paboyle/fix/HOST_NAME_MAX
fallback to _POSIX_HOST_NAME_MAX if HOST_NAME_MAX is not defined
2024-03-07 07:50:25 -05:00
2b4399f8b1 more HOST_NAME_MAX fix 2024-03-07 15:26:01 +09:00
f17b8de907 fallback to _POSIX_HOST_NAME_MAX if HOST_NAME_MAX is not defined 2024-03-07 15:22:08 +09:00
7e5bd46dd3 Booster update 2024-03-06 19:03:45 +01:00
228bbb9d81 Benchmark results 2024-03-06 19:03:35 +01:00
b812a7b4c6 Staggered launch script 2024-03-06 01:32:40 +00:00
891a366f73 Repro CG script 2024-03-06 01:22:55 +00:00
10116b3be8 Force device copyable and tell SYCL to shut it. 2024-03-06 01:13:27 +00:00
a46a0f0882 force device copyable and don't take crap from SYCL 2024-03-06 01:12:49 +00:00
a26a8a38f4 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2024-03-06 00:05:00 +00:00
7435315d50 More blasted shell variables 2024-03-06 00:03:59 +00:00
9b5f741e85 Reproducing CG can be more useful now 2024-03-06 00:03:16 +00:00
517822fdd2 SPR HBM benchmarking right and also PVC batched GEMM 2024-03-06 00:02:27 +00:00
1b93a9be88 Print out the hostname 2024-03-06 00:01:58 +00:00
783a66b348 Deterministic reduction please 2024-03-06 00:01:37 +00:00
976c3e9b59 Hack for flight logging CG inner products.
Can be made to work, but could put in some more serious infrastructure
for repro testing and blame attribution (Britney test) if necessary
2024-03-05 23:59:57 +00:00
f8ca971dae Use of a bare PRECISION macro is not namespace safe and collides with
SYCL
2024-03-05 23:59:13 +00:00
21bc8c24df OneMKL batched blas starting 2024-03-05 23:58:20 +00:00
30228214f7 SYCL conflict with Eigen 2024-03-05 23:56:10 +00:00
2ae980ae43 Update sourceme.sh 2024-03-05 13:39:18 -05:00
6153dec2e4 Update setup.sh 2024-03-05 13:38:32 -05:00
c805f86343 USQCD benchmark 2024-03-01 00:05:04 -05:00
04ca065281 Only one rank opens 2024-02-29 20:09:11 -05:00
88d8fa43d7 Benchmark development 2024-02-29 20:01:44 -05:00
3c49762875 Propagate in the blas routine 2024-02-29 15:33:06 -05:00
436bf1d9d3 Merge pull request #455 from clarkedavida/hisq_fat_links
Hisq fat links
2024-02-29 15:29:39 -05:00
f70df6e195 changed NO_SHIFT and BACKWARD_CONST from define to enum 2024-02-29 12:29:30 -07:00
fce3852dff Merge pull request #451 from paboyle/feature/eigen-3.4.0-update
updating Eigen to 3.4.0
2024-02-28 18:03:37 -05:00
ee1b8bbdbd Merge pull request #454 from edbennett/adjoint-broke
fix HMC for non-fundamental representations
2024-02-28 14:05:27 -05:00
3f1636637d Merge pull request #453 from dbollweg/feature/sliceSum_gpu
Feature/slice sum gpu
2024-02-28 14:04:43 -05:00
2e570f5300 Merge pull request #457 from lehner/feature/gpt
Import GPT-related updates
2024-02-28 13:59:04 -05:00
9f89486df5 remove unnecessary code path 2024-02-28 19:56:23 +01:00
22b43b86cb Make GPT test suite work with SYCL 2024-02-28 12:57:17 +01:00
3c9012676a CUDA cub refuses to reduce vSpinColourMatrix, breaking up into smaller parts like already done for HIP case. 2024-02-27 12:41:45 -05:00
b507fe209c Added SpinColourMatrix case to sliceSum Test 2024-02-27 11:28:32 -05:00
6cd2d8fcd5 Replace cuda/hip memcpy with Grid functions 2024-02-26 09:55:07 -05:00
b02d022993 fixed race condition (thx michael) 2024-02-23 17:14:28 -07:00
94581e3c7a accelerator_for is broken 2024-02-23 15:58:33 -07:00
88b52cc045 Merge branch 'develop' into hisq_fat_links 2024-02-23 14:47:15 -07:00
0a816b5509 Merge branch 'feature/sliceSum_gpu' of https://github.com/dbollweg/Grid into feature/sliceSum_gpu 2024-02-22 21:43:06 -05:00
1c8b807c2e free malloc'd memory 2024-02-22 21:42:44 -05:00
66391f84f2 Merge branch 'feature/gpt' of ../Grid into develop 2024-02-21 19:05:00 +01:00
97f7a9ecb3 fix HMC for non-fundamental representations 2024-02-21 08:27:55 +00:00
15878f7613 sliceSumReduction_cub_large now also faster than CPU on Frontier 2024-02-16 13:55:21 -05:00
e0d5e3c6c7 Merge branch 'paboyle:develop' into feature/sliceSum_gpu 2024-02-16 13:16:37 -05:00
6f3455900e Adding sliceSumReduction_cub_small/large since hipcub cannot deal with arb. large vobjs 2024-02-16 13:15:02 -05:00
56827d6ad6 accelerator_inline bug 2024-02-14 13:56:57 -07:00
73c0b29535 Merge branch 'develop' of https://github.com/paboyle/Grid into develop 2024-02-13 20:19:32 +00:00
303b83cdb8 Scaling benchmarks, verbosity and MPICH aware in acceleratorInit()
For some reason Dirichlet benchmark fails on several nodes; need to
debug this.
2024-02-13 19:48:03 +00:00
5ef4da3f29 Silence verbose 2024-02-13 19:47:36 +00:00
1502860004 Benchmark scripts 2024-02-13 19:47:02 +00:00
585efc6f3f More benchmark scripts 2024-02-13 19:40:49 +00:00
62055e04dd missing semicolon generates error with some compilers 2024-02-13 18:18:27 +01:00
e4a641b64e removing old Eigen tensor patch 2024-02-13 10:37:14 +01:00
8849f187f1 updating Eigen to 3.4.0 2024-02-13 10:30:22 +01:00
db420525b3 fix Simd::Nsimd typo 2024-02-12 15:03:53 -07:00
b5659d106e more test cases 2024-02-09 13:37:14 -05:00
4b43307402 Undo include path changes for level zero api header 2024-02-09 13:07:56 -05:00
09af8c25a2 Merge branch 'paboyle:develop' into feature/sliceSum_gpu 2024-02-09 13:02:59 -05:00
9514035b87 refactor slicesum: slicesum uses GPU version by default now 2024-02-09 13:02:28 -05:00
2da09ae99b acceleration compiles and doesn't break scalar mode 2024-02-06 18:40:13 -07:00
a38fb0e04a first effort toward accelerators 2024-02-06 18:24:55 -07:00
7019916294 RNG seed change safer for large volumes; this is a long term solution 2024-02-07 00:56:39 +00:00
1514b4f137 slicesum_sycl passes test 2024-02-06 19:08:44 -05:00
91cf5ee312 Updated bench script 2024-02-06 23:45:10 +00:00
0a6e2f42c5 small amount of cleanup 2024-02-06 16:32:07 -07:00
ab2de131bd work towards sliceSum for sycl backend 2024-02-06 13:24:45 -05:00
5bfa88be85 Aurora MPI standalone benchmake and options that work well 2024-02-06 16:28:40 +00:00
5af8da76d7 Fix cuda compilation of Lattice_slicesum_gpu.h 2024-02-01 18:02:30 -05:00
b8b9dc952d Async memcpy's and cleanup 2024-02-01 17:55:35 -05:00
79a6ed32d8 Use accelerator_for2d and DeviceSegmentedRecude to avoid kernel launch latencies 2024-02-01 16:41:03 -05:00
caa5f97723 Add sliceSum gpu using cub/hipcub 2024-01-31 16:50:06 -05:00
4924b3209e projectU3 yields a unitary matrix 2024-01-23 14:43:58 -07:00
00f24f8765 already found some bugs in projection, still needs testing 2024-01-22 05:50:16 -07:00
f5b3d582b0 first attempt at U3 projection 2024-01-22 02:49:40 -07:00
981c93d67a update Test_fatLinks to accept Naik 2024-01-21 21:09:19 -07:00
c020b78e02 Merge branch 'develop' into hisq_fat_links 2024-01-21 20:21:08 -07:00
2a0d75bac2 Aurora files 2023-12-21 23:20:17 +00:00
f48298ad4e Bug fix 2023-12-11 20:57:02 -05:00
645e47c1ba Config for Ampere Altra ARM 2023-12-08 16:17:56 -05:00
d1d9827263 Integrator logging update 2023-12-08 12:14:00 -05:00
14643c0aab SDCC benchmarking scripts for A100 nodes and IceLake nodes (AVX512) 2023-12-04 15:45:57 -05:00
b77a9b8947 SDDC compiles starting 2023-11-30 14:31:51 -05:00
7d077fe493 Frontier compiel 2023-11-09 13:58:44 -05:00
9cd4128833 fix naik bug 2023-11-03 14:11:38 -06:00
c8b17c9526 Naik to CShift 2023-11-02 12:43:22 -06:00
2ae2a81e85 attempt to fix Naik 2023-10-31 13:54:55 -06:00
69c869d345 fixed stupid typo 2023-10-30 17:41:52 -06:00
df9b958c40 naik now returns separately 2023-10-30 17:40:53 -06:00
3d3376d1a3 LePage works, trying Naik 2023-10-27 16:26:31 -06:00
f2648e94b9 getHostPointer added to Lattice 2023-10-23 13:47:41 +02:00
21ed6ac0f4 added floating-point support 2023-10-20 13:54:26 -06:00
7bb8ab7000 improve smearing templating 2023-10-20 08:41:02 -06:00
2c824c2641 Merge branch 'develop' into hisq_fat_links 2023-10-17 16:03:59 -06:00
391fd9cc6a try lepage term 2023-10-17 14:57:15 -06:00
51051df62c 3GeV run setup 2023-10-16 20:49:52 +03:00
33097681b9 FTHMC compiled and merged to develop 2023-10-14 00:42:55 +03:00
07e4900218 FTHMC commit 2023-10-13 18:21:57 +03:00
36ab567d67 FTHMC 3 Gev 2023-10-13 18:21:57 +03:00
e19171523b FTHMC Status at lattice conference commit 2023-10-13 18:21:56 +03:00
9626a2c7c0 Asynch handling 2023-10-13 18:21:56 +03:00
e936f5b80b IfGridTensor shorthand 2023-10-13 18:21:56 +03:00
ffc0639cb9 Running in HMC tests 2023-10-13 18:21:56 +03:00
c5b43b322c traceProduct eliminates non-contributing intermediate terms 2023-10-13 18:21:56 +03:00
c9c4576237 Improved frontier cshift 2023-10-13 18:21:56 +03:00
bf4369f72d clean up HISQSmear with decltypes 2023-10-12 12:41:06 -06:00
36600899e2 working 7-link; Grid_log; generalShift 2023-10-12 11:11:39 -06:00
b9c70d156b Merge branch 'develop' into hisq_fat_links 2023-10-10 22:44:17 -06:00
eb89579fe7 Merge remote-tracking branch 'origin/develop' into develop 2023-10-10 22:43:51 -06:00
0cfd13d18b 7-link working 2023-10-10 22:41:52 -06:00
e6ed516052 merged 2023-10-08 09:00:37 +02:00
e2a3dae1f2 Option for multiple simultaneous CartesianStencils 2023-10-08 08:58:44 +02:00
6d0c2de399 Deprecate teh PVC directory and make a PVC-OEM generic PVC target with
no queueing system dependency -- just interactive scripts
2023-10-03 17:04:20 +00:00
7786ea9921 Bug fix in script 2023-10-03 09:58:44 -07:00
d93eac7b1c Performance regressed and is OK in icpx 2023.2 2023-10-03 15:53:14 +00:00
afc316f501 Rename headers 2023-10-02 16:25:11 -04:00
f14bfd5c1b Relocate sub includes 2023-10-02 16:23:38 -04:00
c5f1420dea Merge remote-tracking branch 'LupoA/develop' into LupoA-develop 2023-10-02 16:22:35 -04:00
018e6da872 Merge pull request #440 from giltirn/feature/paddedcellgauge
Feature/paddedcellgauge
2023-10-02 10:00:42 -04:00
b77bccfac2 Merge pull request #444 from mmphys/feature/docX
Update doc complete list of Macports needed to build Grid on a fresh Mac
2023-10-02 09:57:11 -04:00
80359e0d49 Bland SYCL compile 2023-09-26 13:20:27 -07:00
3d437c5cc4 Making SYCL happy 2023-09-26 13:19:42 -07:00
63d9b8e8a3 Merge remote-tracking branch 'origin/develop' into hisq_fat_links 2023-09-16 23:20:31 -06:00
d247031c98 try 7-link 2023-09-16 23:18:16 -06:00
affff3865f Merge branch 'develop' into hisq_fat_links 2023-08-11 23:08:04 -06:00
9c22655b5a Merge remote-tracking branch 'origin/develop' into develop 2023-08-11 23:06:42 -06:00
99d879ea7f 5-link first attempt 2023-08-11 22:56:30 -06:00
bd56c95a6f Update documentation with complete list of Macports needed to build Grid on a fresh Mac 2023-07-14 13:50:06 +01:00
dbd8bb49dc Merge pull request #32 from LupoA/sp2n/develop
Sp2n/develop
2023-07-04 15:23:43 +00:00
3a29af0ce4 Fixed linker error 2023-07-04 16:08:44 +01:00
f7b79cdd45 Added test for ProjectSpn 2023-07-03 18:00:32 +01:00
075b9d22d0 adjoint rep implemented as 2indx symmetric 2023-07-02 13:58:31 +01:00
b92428f05f better test 2023-07-02 13:34:03 +01:00
34b11864b6 prettiest tests 2023-07-02 13:25:57 +01:00
1dfaa08afb The stencils for the staple and rect-staple padded cell implementations are now created and stored by workspace classes that allow for reuse providing the grids remain consistent
The workspaces are now used by the plaq+rectangle gauge action resulting in a further 2x performance improvement as measured on a 16^4 local volume for 2 nodes (16 ranks) of Crusher
2023-06-28 15:11:24 -04:00
9d263d9a7d fix bug in HISQSmearing; move benchmark b/c i don't understand how makefiles work 2023-06-28 10:05:34 -06:00
9015c229dc add benchmark to see whether matrix multiplication is slower than read from object 2023-06-27 21:28:26 -06:00
f44dce390f Implemented acclerator-optimized versions of localCopyRegion and insertSliceLocal to speed up padding
Fixed const correctness on PaddedCell methods
Fixed compile issues on Crusher
Added timing breakdowns for PaddedCell::Expand and the padded implementations of the staples, visible under --log Performance
Optimized kernel for StaplePadded
Test_iwasaki_action_newstaple now repeats the calculation 10 times and reports average timings
2023-06-27 14:58:10 -04:00
bb71e9a96a Added PaddedCell and GeneralisedLocalStencil header includes to standard base headers
Moved versions of the padded-cell implementations of staple and rect-staple from test code to WilsonLoops header
Added StapleAndRectStapleAll which is now called by the plaq+rectangle action class. Under the hood it uses the padded cell implementations with maximal reuse of the padded gauge links
2023-06-27 11:23:30 -04:00
a7eabaad56 rudimentary appendShift convenience method, which allows the user to append an arbitrary shift in one line 2023-06-26 23:59:28 -06:00
eeb4703b84 develop wrappers to make the stencils easier to construct 2023-06-26 17:45:35 -06:00
a07421b3d3 Merge branch 'develop' into hisq_fat_links 2023-06-26 13:51:32 -06:00
cda53b4068 Merge remote-tracking branch 'origin/develop' into develop 2023-06-26 13:51:06 -06:00
6f6844ccf1 Added new StapleAll and RectStapleAll functions that return the staples for all mu as an array
Modified plaq+rectangle gauge actions to use the above
Added a test code to confirm the above changes
2023-06-26 15:48:47 -04:00
4c6613d72c Modified RectStapleDouble and RectStapleOptimised to use Gauge-BC respecting CshiftLink
Added test code tests/debug/Test_optimized_staple_gaugebc demonstrating equivalence of above to RectStapleUnoptimised for cconj gauge BCs
Removed optimized staple only being used for periodic gauge BCs; it is now always used
2023-06-26 10:20:23 -04:00
559257bbe9 better documentation and filelist names 2023-06-23 16:16:48 +01:00
cff1f8d3b8 rm unused variables and formatting 2023-06-23 16:04:18 +01:00
f27d2083cd adjustments in SUn and Sp2n impl 2023-06-23 15:34:08 +01:00
36cc9c524f Threaded the constructor of GeneralLocalStencil 2023-06-23 09:57:38 -04:00
2822487450 rm unncessary line 2023-06-23 14:55:23 +01:00
e07fafe46a minor adjustments to twoindex 2023-06-23 12:18:04 +01:00
063d290bd8 missing function 2023-06-23 11:11:20 +01:00
4e6194d92a Avoid code duplication in ProjectSUn 2023-06-23 11:03:50 +01:00
de30c4e22a minor improvements 2023-06-23 10:49:41 +01:00
df99f227c1 include missing staple orientations; invert path direction, which was backwards 2023-06-22 14:57:10 -06:00
4241c7d4a3 Imported coalescedReadGeneralPermute GPU implementation from Christoph
Fixed bug in padded staple code where extract was being called on the result before the GPU view was closed
Fixed compile issue with pointer cast in padded staple code
Added timing summaries of padded staple code and timing breakdown of staple implementation to Test_padded_cell_staple
2023-06-21 16:01:01 -04:00
d536c67b9d add HISQSmearing to Smearing.h 2023-06-20 16:04:48 -06:00
f44f005dad rename _lvl1 --> _linkTreatment 2023-06-20 15:48:27 -06:00
26b2caf570 add template parameter to Smear_HISQ_fat for MILC interfacing 2023-06-20 15:37:54 -06:00
7b11075102 The user can now specify the implementation of Cshift used by the PaddedCell class through a virtual base class API. Implementations for default (regular Cshift) and for gauge links (which respects the gauge BCs)
Fixed const-correctness for PaddedCell and ConjugateGimpl::setDirections
Modified test code for padded-cell implementation of staple, rect-staple to use cconj BCs
2023-06-20 17:09:56 -04:00
abc658dca5 Added coalescedReadGeneralPermute CPU implementation based on Christoph's GPT code
In a test code, implemented a padded-cell version of the staple and rectangular-staple calculation
2023-06-20 16:14:25 -04:00
8bb078db25 Merge branch 'develop' into hisq_fat_links 2023-06-20 13:05:00 -06:00
b61ba40023 Merge remote-tracking branch 'origin/develop' into develop 2023-06-20 13:04:53 -06:00
452bf2e907 Accelerator basisRotate also on HIP 2023-06-20 20:36:24 +03:00
2372275b2c Merge pull request #36 from LupoA/sp2n/gpu-bugfix
Sp2n/gpu bugfix [close #30]
2023-06-20 13:46:00 +01:00
ef736e8aa4 Merge pull request #35 from LupoA/sp2n/enableSp
consistent enable sp config flag
2023-06-20 10:41:09 +00:00
5e539e2d54 Forgot some follow-ups on changed signature 2023-06-18 12:37:51 +01:00
96773f5254 Apparently forgot to remove one Lattice version 2023-06-18 12:21:39 +01:00
d80df09f3b consistent enable sp config flag 2023-06-16 19:16:46 +01:00
621e612c30 Fix non-zero ret on device bug 2023-06-16 16:27:49 +01:00
8c3792721b ClangFormat 2023-06-16 15:58:23 +01:00
c95bbd3948 Remove accelerated lattice version 2023-06-16 15:50:26 +01:00
e28ab7a732 Re-included instantiations for symmetric 2Index AS Sp 2023-06-16 14:20:37 +01:00
c797cbe737 deal with post-merge trauma 2023-06-16 14:20:37 +01:00
e09dfbf1c2 definetely the right merge upstream/develop 2023-06-16 14:19:46 +01:00
116d90b0ee First attempt on #30 2023-06-15 15:09:37 +01:00
b0646ca187 Remove some unused variables 2023-06-15 15:09:09 +01:00
14d352ea4f added smearParams struct 2023-06-12 16:55:44 -06:00
1cf9ec1cce now compiles 2023-06-09 16:27:45 -06:00
4895ff260e Merge pull request #28 from LupoA/sp2n/config
compile sp2n fermion impl only if declared at config time
2023-06-09 13:07:48 +00:00
4b994a1bc7 trouble with compilation 2023-06-08 17:37:25 -06:00
e506d6d369 Merge branch 'develop' into hisq_fat_links 2023-06-07 21:16:20 -06:00
ab56ad8d7a fix 3-link stencil 2023-06-07 21:14:58 -06:00
470d93006a compile sp2n fermion impl only if declared at config time 2023-06-07 12:53:33 +01:00
2f3d03f188 Merge pull request #27 from LupoA/sp2n/documentation
documentation for gaugegroup and sp2n
2023-06-01 16:42:27 +00:00
8db7c23bee improve documentation 2023-06-01 17:39:10 +01:00
69dc5172dc Merge pull request #26 from LupoA/sp2n/irreps
Sp2n/irreps
2023-06-01 16:28:15 +00:00
fd72eb6546 Merge branch 'sp2n/algorithm' into sp2n/irreps 2023-06-01 17:24:01 +01:00
e8c29e2fe5 Merge pull request #31 from paboyle/develop
Sync
2023-05-28 16:13:12 +02:00
b405767569 make private methods private 2023-05-26 17:02:16 +01:00
fe88a0c12f cleaner twoindex class, cleaner tests 2023-05-26 16:55:30 +01:00
e61a9ed2b4 partial revert 2023-05-26 13:54:26 +01:00
de8daa3824 group is SUn by default 2023-05-26 13:44:41 +01:00
3a50fb29cb directly call sp helper 2023-05-26 13:28:47 +01:00
6647d2656f rm unnecessary specialisation 2023-05-26 12:27:22 +01:00
a6f4dbeb6d remove redundant template parameter 2023-05-26 12:13:40 +01:00
92a282f2d8 Merge pull request #24 from LupoA/sp2n/fix_static_assert_symmetric
Move static_assert inside of function
2023-05-26 11:13:50 +01:00
ca2fd9fc7b documentation for gaugegroup and sp2n 2023-05-25 18:40:54 +01:00
3825329f8e Merge branch 'develop' into hisq_fat_links 2023-05-24 15:37:25 -06:00
be1a4f5860 implement TwoIndexSymm for sp2n 2023-05-22 17:21:03 +01:00
5897b93dd4 debug tests, fix dimension 2023-05-22 13:42:21 +01:00
af091e0881 DimensionHelper for 2index irreps 2023-05-21 16:56:06 +01:00
3c1e5e9517 Merge pull request #25 from LupoA/sp2n/unify_representations
Sp2n/unify representations [close #3]
2023-05-21 14:55:27 +01:00
85b2cb7a8a changing some hardcoded SUn lines 2023-05-21 14:50:28 +01:00
c7bdf2c0e4 3-link test at least gives an answer 2023-05-21 04:33:20 -06:00
da9cbfc7cc Suppress BuildSurfaceList verbosity in Stencil.h 2023-05-19 20:22:20 +02:00
6b9f07c1ed Merge pull request #30 from paboyle/develop
Merge upstream
2023-05-19 20:20:58 +02:00
b8bdc2eefb Unified two index representations 2023-05-18 18:36:29 +01:00
0078826ff1 Move static_assert inside of function 2023-05-18 18:14:53 +01:00
e855c41772 Unified spfundamental.h with fundamental.h 2023-05-18 18:11:20 +01:00
d169c275b6 Merge pull request #22 from LupoA/sp2n/unify_twoindex
Unify TwoIndex
2023-05-18 14:55:02 +00:00
a5125e23f4 Typo 2023-05-18 15:41:35 +01:00
7b83c80757 Merge branch 'sp2n/unify_twoindex' of github.com:LupoA/Grid into sp2n/unify_twoindex 2023-05-18 15:36:14 +01:00
e41821e206 Disable two index symmetric 2023-05-18 15:29:55 +01:00
bf91778550 verbose plaquette example; fat link test frame 2023-05-17 15:15:54 -06:00
5a75ab15a2 typo in 2S dim 2023-05-17 20:47:57 +01:00
932c783fbf 2AS for every Nc! 2023-05-17 20:22:05 +01:00
55f9cce577 Revert "Added automated HMC test for Nc=4"
This reverts commit eee27b8b30.
2023-05-17 09:17:48 +01:00
b3533ca847 correct tests (failing) 2023-05-16 17:43:52 +01:00
fd2a637010 test 2index 2023-05-16 14:10:39 +01:00
eee27b8b30 Added automated HMC test for Nc=4 2023-05-15 18:37:33 +01:00
8522352aa3 ClangFormat 2023-05-15 18:36:05 +01:00
3beb8f4091 fixing typo, getting pre-changes physics 2023-05-15 16:00:15 +01:00
12a706e9b1 de-hardcode the number of generators 2023-05-15 15:48:21 +01:00
170aa7df01 fix (dimension to be improved) 2023-05-15 15:20:18 +01:00
e8ad1fef53 Unify TwoIndex 2023-05-12 14:35:50 +01:00
aa9df63a05 rename group projections based on determinants 2023-05-10 14:50:52 +01:00
3953312a93 Merge pull request #20 from LupoA/sp2n/unify_gaugeimpltypes
Sp2n/unify gaugeimpltypes
2023-05-03 15:17:10 +00:00
6e62f4f616 ClangFormat 2023-05-03 16:15:12 +01:00
6a7bdca53b Take over additional algebra tests from Alessandro 2023-05-03 16:02:02 +01:00
c7fba9aace Take over additional group tests from Alessandro 2023-05-03 16:01:48 +01:00
ac6c7cb8d6 Merge in Alessandro's changes [test fails] 2023-05-03 02:53:03 +01:00
c5924833a1 ClangFormat 2023-05-03 02:39:36 +01:00
ac0a74be0d Taken care of algebra tests 2023-05-03 02:32:42 +01:00
42b0e1125d Naming and argument types 2023-05-03 01:51:46 +01:00
339c4fda79 Extracted is_element_of Sp2n 2023-05-02 15:44:34 +01:00
9b85bf9402 better projection test 2023-05-02 15:42:20 +01:00
86b02c3cd8 cleaning up requested by Julian 2023-05-02 13:31:17 +01:00
7b3b7093fa cleaning up requested by Ed 2023-05-02 12:50:57 +01:00
881b08a465 Correct implementation of SpTa 2023-04-27 18:17:06 +01:00
3ee5444c69 Remove commented out stuff 2023-04-21 08:08:18 +01:00
5e28fe56d2 Remove code duplication: Iterating through vectors 2023-04-21 08:08:06 +01:00
5aabe074fe Rename Sympl* to Sp* 2023-04-18 11:50:20 +01:00
dace904c10 fix typo 2023-04-14 18:06:18 +01:00
be98d26610 small change I missed in previous commit 2023-04-13 17:48:43 +01:00
5f75735dab Add M and Mdag to WilsonTMFermion 2023-04-06 18:25:05 +02:00
178376f24b minor stylistic changes 2023-04-06 12:08:17 +01:00
6a0eb466ee Merge pull request #19 from LupoA/refactoring_sp2n
refactoring sp2n
2023-04-05 10:50:58 +00:00
4ea29b8f0f Template group into GaugeImplTypes. Closing #2 2023-04-04 17:49:28 +01:00
778291230a expand ProjecOnGaugeGroup, change ProjectOnSp2nAlgebra into SpTa, fixing some of its issues 2023-04-04 17:48:13 +01:00
026e736dfa Projection on algebra can now be templated. Fix #12 2023-04-03 16:31:19 +01:00
4275b3f431 Fix typo and remove unnecessary lines 2023-04-03 12:01:52 +01:00
1b8176e2c0 fix code duplication 2023-03-17 14:58:00 +00:00
cbc053c3db Revert "projection on Sp2n algebra, to be used instead of Ta"
This reverts commit ba7f9d7b70.
2023-03-17 11:36:58 +00:00
cdf3f6ef6e Merge branch 'refactoring_sp2n' of https://github.com/LupoA/Grid into refactoring_sp2n 2023-03-15 15:59:50 +00:00
ba7f9d7b70 projection on Sp2n algebra, to be used instead of Ta 2023-03-15 15:55:12 +00:00
371fd123fb consequence of iSUnMatrix being no longer a member of the SU class 2023-03-14 10:47:07 +00:00
d6ff644aab Towards the day all tests compile 2023-03-14 10:43:25 +00:00
29586f6b5e Deactivate some tests for Nc!=3 2023-03-13 08:17:14 +00:00
fd057c838f add ProjectOnGaugeGroup and ProjectGn to allow future templating in GaugeImplTypes 2023-03-10 12:10:46 +00:00
f51222086c Move functions from GaugeGroup to group specific implementations 2023-03-09 16:22:20 +00:00
f73691ec47 Merge pull request #18 from nickforce989/sp2n/newbranch
Sp2n/newbranch
2023-02-13 10:22:27 +01:00
7ebda3e9ec Merge commit 'b10e1b7bc8bec809f874e9e48a3ccc7b2619c9d1' into sp2n/newbranch 2023-01-19 12:10:18 +00:00
b10e1b7bc8 Fixed files giving zero force computation on GPU, issue #8 2023-01-18 18:04:47 +00:00
d7dea44ce7 Merge pull request #17 from chillenzer/unify_gauge_groups
Fix compilation error in nvcc (closes #15)
2022-12-19 16:24:03 +00:00
37b6b82869 Fix file extensions 2022-12-18 16:12:56 +00:00
92ad5b8f74 Compiler error fix: NVCC requires names for templ. par. 2022-12-18 15:50:19 +00:00
8c80f1c168 Merge pull request #14 from chillenzer/unify_gauge_groups
Unify gauge groups (closes #5)
2022-12-01 17:35:46 +00:00
0af7d5a793 Rename Grid/qcd/utils/<Group>_impl.h -> Grid/qcd/utils/<Group>.h 2022-11-30 17:12:00 +00:00
505fa49983 Renamed SUn.h -> GaugeGroup.h 2022-11-30 17:09:48 +00:00
7bcf33def9 Removed Sp2n.h 2022-11-30 16:59:46 +00:00
a13820656a Removed iSUnMatrix, etc. 2022-11-30 15:09:03 +00:00
fa71b46a41 Hide nsp 2022-11-30 14:44:23 +00:00
b8b3ae6ac1 Make helper functions private 2022-11-30 13:29:14 +00:00
55c008da21 Removed forward declaration 2022-11-30 13:12:21 +00:00
2507606bd0 With function overloading (still dirty). 2022-11-30 12:54:36 +00:00
7c2ad4f8c8 Attempt with SFINAE (failed) 2022-11-30 11:57:39 +00:00
54c8025aad Remove unnecessary pwd in scripts/filelist 2022-11-28 17:50:38 +00:00
921e23e83c Separated out everything SU specific 2022-11-28 17:47:50 +00:00
6e750ecb0e Remove apparently forgotten file 2022-11-28 16:33:46 +00:00
b8f1f5d2a3 Introduce GaugeGroup 2022-11-25 17:45:32 +00:00
9273f2937c Autoformat google style 2022-11-25 17:44:08 +00:00
1aa28b47ae Add existing test to check 2022-11-25 17:40:40 +00:00
629cb2987a Fix typo in Makefile.am 2022-11-25 17:40:21 +00:00
03235d6368 Fixed type in configure.ac 2022-11-25 16:57:40 +00:00
22064c7e4c Fixing #11 2022-11-25 13:10:29 +00:00
2de03e5172 Revert "Revert "Fixing issue #11: consistent use of ncolour and nsp""
This reverts commit 3af4929dda.
2022-11-23 19:40:28 +00:00
3af4929dda Revert "Fixing issue #11: consistent use of ncolour and nsp"
This reverts commit 1ba429345b.
2022-11-23 19:34:59 +00:00
1ba429345b Fixing issue #11: consistent use of ncolour and nsp 2022-11-23 18:45:01 +00:00
88bdd4344b 2indx antisymm representation of sp2n 2021-11-04 18:27:35 +00:00
4044536eea add projection on sp2n algebra 2021-10-26 10:20:44 +01:00
4d8ae6221c fix projection 2021-10-22 10:44:54 +01:00
4e31e4e094 Better tests 2021-10-13 15:07:23 +01:00
0d6674e489 hot start for sp2n 2021-10-12 18:53:54 +01:00
b145fd4f5b necessary to merge 2021-10-12 17:08:46 +01:00
8a5b794f25 necessary change to merge with upstrm 2021-10-12 16:04:03 +01:00
291e80f88a sp2n as config option 2021-10-12 16:00:32 +01:00
1ace5850ae first hmc 2021-10-12 16:00:32 +01:00
283f14b7c1 fix sp2n projection 2021-10-12 16:00:32 +01:00
1d6e708083 tests! 2021-10-12 16:00:32 +01:00
89457e25e3 sp fermion instantiation 2021-10-12 16:00:32 +01:00
7e3b298d3d project on sp2n 2021-10-12 16:00:32 +01:00
7ff3e5eed4 gauge and fermion implementation for sp2n 2021-10-12 16:00:32 +01:00
19eb51cf41 sp2n generators 2021-10-12 15:53:33 +01:00
470d4dcc6d sp2n as config option 2021-10-12 15:47:56 +01:00
ed03bfd555 first hmc 2021-10-12 12:16:47 +01:00
8c0fbcccae fix sp2n projection 2021-10-12 12:12:16 +01:00
d4866157fe tests! 2021-10-12 09:06:15 +01:00
b6496b6cb5 sp fermion instantiation 2021-10-11 16:32:10 +01:00
4f5fe57920 project on sp2n 2021-10-11 16:28:15 +01:00
11fb943b1e gauge and fermion implementation for sp2n 2021-10-11 16:21:25 +01:00
046a23121e sp2n generators 2021-10-05 15:51:22 +01:00
201 changed files with 13452 additions and 2332 deletions

4
.gitignore vendored
View File

@ -1,3 +1,7 @@
# Doxygen stuff
html/*
latex/*
# Compiled Object files #
#########################
*.slo

View File

@ -34,7 +34,7 @@
#pragma push_macro("__SYCL_DEVICE_ONLY__")
#undef __SYCL_DEVICE_ONLY__
#define EIGEN_DONT_VECTORIZE
//#undef EIGEN_USE_SYCL
#undef EIGEN_USE_SYCL
#define __SYCL__REDEFINE__
#endif

View File

@ -66,6 +66,10 @@ if BUILD_FERMION_REPS
extra_sources+=$(ADJ_FERMION_FILES)
extra_sources+=$(TWOIND_FERMION_FILES)
endif
if BUILD_SP
extra_sources+=$(SP_FERMION_FILES)
extra_sources+=$(SP_TWOIND_FERMION_FILES)
endif
lib_LIBRARIES = libGrid.a

View File

@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#define _GRID_FFT_H_
#ifdef HAVE_FFTW
#ifdef USE_MKL
#if defined(USE_MKL) || defined(GRID_SYCL)
#include <fftw/fftw3.h>
#else
#include <fftw3.h>

View File

@ -293,7 +293,7 @@ static void sncndnFK(INTERNAL_PRECISION u, INTERNAL_PRECISION k,
* Set type = 0 for the Zolotarev approximation, which is zero at x = 0, and
* type = 1 for the approximation which is infinite at x = 0. */
zolotarev_data* zolotarev(PRECISION epsilon, int n, int type) {
zolotarev_data* zolotarev(ZOLO_PRECISION epsilon, int n, int type) {
INTERNAL_PRECISION A, c, cp, kp, ksq, sn, cn, dn, Kp, Kj, z, z0, t, M, F,
l, invlambda, xi, xisq, *tv, s, opl;
int m, czero, ts;
@ -375,12 +375,12 @@ zolotarev_data* zolotarev(PRECISION epsilon, int n, int type) {
construct_partfrac(d);
construct_contfrac(d);
/* Converting everything to PRECISION for external use only */
/* Converting everything to ZOLO_PRECISION for external use only */
zd = (zolotarev_data*) malloc(sizeof(zolotarev_data));
zd -> A = (PRECISION) d -> A;
zd -> Delta = (PRECISION) d -> Delta;
zd -> epsilon = (PRECISION) d -> epsilon;
zd -> A = (ZOLO_PRECISION) d -> A;
zd -> Delta = (ZOLO_PRECISION) d -> Delta;
zd -> epsilon = (ZOLO_PRECISION) d -> epsilon;
zd -> n = d -> n;
zd -> type = d -> type;
zd -> dn = d -> dn;
@ -390,24 +390,24 @@ zolotarev_data* zolotarev(PRECISION epsilon, int n, int type) {
zd -> deg_num = d -> deg_num;
zd -> deg_denom = d -> deg_denom;
zd -> a = (PRECISION*) malloc(zd -> dn * sizeof(PRECISION));
for (m = 0; m < zd -> dn; m++) zd -> a[m] = (PRECISION) d -> a[m];
zd -> a = (ZOLO_PRECISION*) malloc(zd -> dn * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> dn; m++) zd -> a[m] = (ZOLO_PRECISION) d -> a[m];
free(d -> a);
zd -> ap = (PRECISION*) malloc(zd -> dd * sizeof(PRECISION));
for (m = 0; m < zd -> dd; m++) zd -> ap[m] = (PRECISION) d -> ap[m];
zd -> ap = (ZOLO_PRECISION*) malloc(zd -> dd * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> dd; m++) zd -> ap[m] = (ZOLO_PRECISION) d -> ap[m];
free(d -> ap);
zd -> alpha = (PRECISION*) malloc(zd -> da * sizeof(PRECISION));
for (m = 0; m < zd -> da; m++) zd -> alpha[m] = (PRECISION) d -> alpha[m];
zd -> alpha = (ZOLO_PRECISION*) malloc(zd -> da * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> da; m++) zd -> alpha[m] = (ZOLO_PRECISION) d -> alpha[m];
free(d -> alpha);
zd -> beta = (PRECISION*) malloc(zd -> db * sizeof(PRECISION));
for (m = 0; m < zd -> db; m++) zd -> beta[m] = (PRECISION) d -> beta[m];
zd -> beta = (ZOLO_PRECISION*) malloc(zd -> db * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> db; m++) zd -> beta[m] = (ZOLO_PRECISION) d -> beta[m];
free(d -> beta);
zd -> gamma = (PRECISION*) malloc(zd -> n * sizeof(PRECISION));
for (m = 0; m < zd -> n; m++) zd -> gamma[m] = (PRECISION) d -> gamma[m];
zd -> gamma = (ZOLO_PRECISION*) malloc(zd -> n * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> n; m++) zd -> gamma[m] = (ZOLO_PRECISION) d -> gamma[m];
free(d -> gamma);
free(d);
@ -426,7 +426,7 @@ void zolotarev_free(zolotarev_data *zdata)
}
zolotarev_data* higham(PRECISION epsilon, int n) {
zolotarev_data* higham(ZOLO_PRECISION epsilon, int n) {
INTERNAL_PRECISION A, M, c, cp, z, z0, t, epssq;
int m, czero;
zolotarev_data *zd;
@ -481,9 +481,9 @@ zolotarev_data* higham(PRECISION epsilon, int n) {
/* Converting everything to PRECISION for external use only */
zd = (zolotarev_data*) malloc(sizeof(zolotarev_data));
zd -> A = (PRECISION) d -> A;
zd -> Delta = (PRECISION) d -> Delta;
zd -> epsilon = (PRECISION) d -> epsilon;
zd -> A = (ZOLO_PRECISION) d -> A;
zd -> Delta = (ZOLO_PRECISION) d -> Delta;
zd -> epsilon = (ZOLO_PRECISION) d -> epsilon;
zd -> n = d -> n;
zd -> type = d -> type;
zd -> dn = d -> dn;
@ -493,24 +493,24 @@ zolotarev_data* higham(PRECISION epsilon, int n) {
zd -> deg_num = d -> deg_num;
zd -> deg_denom = d -> deg_denom;
zd -> a = (PRECISION*) malloc(zd -> dn * sizeof(PRECISION));
for (m = 0; m < zd -> dn; m++) zd -> a[m] = (PRECISION) d -> a[m];
zd -> a = (ZOLO_PRECISION*) malloc(zd -> dn * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> dn; m++) zd -> a[m] = (ZOLO_PRECISION) d -> a[m];
free(d -> a);
zd -> ap = (PRECISION*) malloc(zd -> dd * sizeof(PRECISION));
for (m = 0; m < zd -> dd; m++) zd -> ap[m] = (PRECISION) d -> ap[m];
zd -> ap = (ZOLO_PRECISION*) malloc(zd -> dd * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> dd; m++) zd -> ap[m] = (ZOLO_PRECISION) d -> ap[m];
free(d -> ap);
zd -> alpha = (PRECISION*) malloc(zd -> da * sizeof(PRECISION));
for (m = 0; m < zd -> da; m++) zd -> alpha[m] = (PRECISION) d -> alpha[m];
zd -> alpha = (ZOLO_PRECISION*) malloc(zd -> da * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> da; m++) zd -> alpha[m] = (ZOLO_PRECISION) d -> alpha[m];
free(d -> alpha);
zd -> beta = (PRECISION*) malloc(zd -> db * sizeof(PRECISION));
for (m = 0; m < zd -> db; m++) zd -> beta[m] = (PRECISION) d -> beta[m];
zd -> beta = (ZOLO_PRECISION*) malloc(zd -> db * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> db; m++) zd -> beta[m] = (ZOLO_PRECISION) d -> beta[m];
free(d -> beta);
zd -> gamma = (PRECISION*) malloc(zd -> n * sizeof(PRECISION));
for (m = 0; m < zd -> n; m++) zd -> gamma[m] = (PRECISION) d -> gamma[m];
zd -> gamma = (ZOLO_PRECISION*) malloc(zd -> n * sizeof(ZOLO_PRECISION));
for (m = 0; m < zd -> n; m++) zd -> gamma[m] = (ZOLO_PRECISION) d -> gamma[m];
free(d -> gamma);
free(d);
@ -523,17 +523,17 @@ NAMESPACE_END(Grid);
#ifdef TEST
#undef ZERO
#define ZERO ((PRECISION) 0)
#define ZERO ((ZOLO_PRECISION) 0)
#undef ONE
#define ONE ((PRECISION) 1)
#define ONE ((ZOLO_PRECISION) 1)
#undef TWO
#define TWO ((PRECISION) 2)
#define TWO ((ZOLO_PRECISION) 2)
/* Evaluate the rational approximation R(x) using the factored form */
static PRECISION zolotarev_eval(PRECISION x, zolotarev_data* rdata) {
static ZOLO_PRECISION zolotarev_eval(ZOLO_PRECISION x, zolotarev_data* rdata) {
int m;
PRECISION R;
ZOLO_PRECISION R;
if (rdata -> type == 0) {
R = rdata -> A * x;
@ -551,9 +551,9 @@ static PRECISION zolotarev_eval(PRECISION x, zolotarev_data* rdata) {
/* Evaluate the rational approximation R(x) using the partial fraction form */
static PRECISION zolotarev_partfrac_eval(PRECISION x, zolotarev_data* rdata) {
static ZOLO_PRECISION zolotarev_partfrac_eval(ZOLO_PRECISION x, zolotarev_data* rdata) {
int m;
PRECISION R = rdata -> alpha[rdata -> da - 1];
ZOLO_PRECISION R = rdata -> alpha[rdata -> da - 1];
for (m = 0; m < rdata -> dd; m++)
R += rdata -> alpha[m] / (x * x - rdata -> ap[m]);
if (rdata -> type == 1) R += rdata -> alpha[rdata -> dd] / (x * x);
@ -568,18 +568,18 @@ static PRECISION zolotarev_partfrac_eval(PRECISION x, zolotarev_data* rdata) {
* non-signalling overflow this will work correctly since 1/(1/0) = 1/INF = 0,
* but with signalling overflow you will get an error message. */
static PRECISION zolotarev_contfrac_eval(PRECISION x, zolotarev_data* rdata) {
static ZOLO_PRECISION zolotarev_contfrac_eval(ZOLO_PRECISION x, zolotarev_data* rdata) {
int m;
PRECISION R = rdata -> beta[0] * x;
ZOLO_PRECISION R = rdata -> beta[0] * x;
for (m = 1; m < rdata -> db; m++) R = rdata -> beta[m] * x + ONE / R;
return R;
}
/* Evaluate the rational approximation R(x) using Cayley form */
static PRECISION zolotarev_cayley_eval(PRECISION x, zolotarev_data* rdata) {
static ZOLO_PRECISION zolotarev_cayley_eval(ZOLO_PRECISION x, zolotarev_data* rdata) {
int m;
PRECISION T;
ZOLO_PRECISION T;
T = rdata -> type == 0 ? ONE : -ONE;
for (m = 0; m < rdata -> n; m++)
@ -607,7 +607,7 @@ int main(int argc, char** argv) {
int m, n, plotpts = 5000, type = 0;
float eps, x, ypferr, ycferr, ycaylerr, maxypferr, maxycferr, maxycaylerr;
zolotarev_data *rdata;
PRECISION y;
ZOLO_PRECISION y;
FILE *plot_function, *plot_error,
*plot_partfrac, *plot_contfrac, *plot_cayley;
@ -626,13 +626,13 @@ int main(int argc, char** argv) {
}
rdata = type == 2
? higham((PRECISION) eps, n)
: zolotarev((PRECISION) eps, n, type);
? higham((ZOLO_PRECISION) eps, n)
: zolotarev((ZOLO_PRECISION) eps, n, type);
printf("Zolotarev Test: R(epsilon = %g, n = %d, type = %d)\n\t"
STRINGIFY(VERSION) "\n\t" STRINGIFY(HVERSION)
"\n\tINTERNAL_PRECISION = " STRINGIFY(INTERNAL_PRECISION)
"\tPRECISION = " STRINGIFY(PRECISION)
"\tZOLO_PRECISION = " STRINGIFY(ZOLO_PRECISION)
"\n\n\tRational approximation of degree (%d,%d), %s at x = 0\n"
"\tDelta = %g (maximum error)\n\n"
"\tA = %g (overall factor)\n",
@ -681,15 +681,15 @@ int main(int argc, char** argv) {
x = 2.4 * (float) m / plotpts - 1.2;
if (rdata -> type == 0 || fabs(x) * (float) plotpts > 1.0) {
/* skip x = 0 for type 1, as R(0) is singular */
y = zolotarev_eval((PRECISION) x, rdata);
y = zolotarev_eval((ZOLO_PRECISION) x, rdata);
fprintf(plot_function, "%g %g\n", x, (float) y);
fprintf(plot_error, "%g %g\n",
x, (float)((y - ((x > 0.0 ? ONE : -ONE))) / rdata -> Delta));
ypferr = (float)((zolotarev_partfrac_eval((PRECISION) x, rdata) - y)
ypferr = (float)((zolotarev_partfrac_eval((ZOLO_PRECISION) x, rdata) - y)
/ rdata -> Delta);
ycferr = (float)((zolotarev_contfrac_eval((PRECISION) x, rdata) - y)
ycferr = (float)((zolotarev_contfrac_eval((ZOLO_PRECISION) x, rdata) - y)
/ rdata -> Delta);
ycaylerr = (float)((zolotarev_cayley_eval((PRECISION) x, rdata) - y)
ycaylerr = (float)((zolotarev_cayley_eval((ZOLO_PRECISION) x, rdata) - y)
/ rdata -> Delta);
if (fabs(x) < 1.0 && fabs(x) > rdata -> epsilon) {
maxypferr = MAX(maxypferr, fabs(ypferr));

View File

@ -9,10 +9,10 @@ NAMESPACE_BEGIN(Approx);
#define HVERSION Header Time-stamp: <14-OCT-2004 09:26:51.00 adk@MISSCONTRARY>
#ifndef ZOLOTAREV_INTERNAL
#ifndef PRECISION
#define PRECISION double
#ifndef ZOLO_PRECISION
#define ZOLO_PRECISION double
#endif
#define ZPRECISION PRECISION
#define ZPRECISION ZOLO_PRECISION
#define ZOLOTAREV_DATA zolotarev_data
#endif
@ -77,8 +77,8 @@ typedef struct {
* zolotarev_data structure. The arguments must satisfy the constraints that
* epsilon > 0, n > 0, and type = 0 or 1. */
ZOLOTAREV_DATA* higham(PRECISION epsilon, int n) ;
ZOLOTAREV_DATA* zolotarev(PRECISION epsilon, int n, int type);
ZOLOTAREV_DATA* higham(ZOLO_PRECISION epsilon, int n) ;
ZOLOTAREV_DATA* zolotarev(ZOLO_PRECISION epsilon, int n, int type);
void zolotarev_free(zolotarev_data *zdata);
#endif
@ -86,3 +86,4 @@ void zolotarev_free(zolotarev_data *zdata);
NAMESPACE_END(Approx);
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,34 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: BatchedBlas.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#include <Grid/GridCore.h>
#include <Grid/algorithms/blas/BatchedBlas.h>
NAMESPACE_BEGIN(Grid);
gridblasHandle_t GridBLAS::gridblasHandle;
int GridBLAS::gridblasInit;
NAMESPACE_END(Grid);

View File

@ -0,0 +1,727 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: BatchedBlas.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#ifdef GRID_HIP
#include <hipblas/hipblas.h>
#endif
#ifdef GRID_CUDA
#include <cublas_v2.h>
#endif
#ifdef GRID_SYCL
#include <oneapi/mkl.hpp>
#endif
#if 0
#define GRID_ONE_MKL
#endif
#ifdef GRID_ONE_MKL
#include <oneapi/mkl.hpp>
#endif
///////////////////////////////////////////////////////////////////////
// Need to rearrange lattice data to be in the right format for a
// batched multiply. Might as well make these static, dense packed
///////////////////////////////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
#ifdef GRID_HIP
typedef hipblasHandle_t gridblasHandle_t;
#endif
#ifdef GRID_CUDA
typedef cublasHandle_t gridblasHandle_t;
#endif
#ifdef GRID_SYCL
typedef cl::sycl::queue *gridblasHandle_t;
#endif
#ifdef GRID_ONE_MKL
typedef cl::sycl::queue *gridblasHandle_t;
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) && !defined(GRID_ONE_MKL)
typedef int32_t gridblasHandle_t;
#endif
enum GridBLASOperation_t { GridBLAS_OP_N, GridBLAS_OP_T, GridBLAS_OP_C } ;
class GridBLAS {
public:
static gridblasHandle_t gridblasHandle;
static int gridblasInit;
static void Init(void)
{
if ( ! gridblasInit ) {
#ifdef GRID_CUDA
std::cout << "cublasCreate"<<std::endl;
cublasCreate(&gridblasHandle);
cublasSetPointerMode(gridblasHandle, CUBLAS_POINTER_MODE_DEVICE);
#endif
#ifdef GRID_HIP
std::cout << "hipblasCreate"<<std::endl;
hipblasCreate(&gridblasHandle);
#endif
#ifdef GRID_SYCL
gridblasHandle = theGridAccelerator;
#endif
#ifdef GRID_ONE_MKL
cl::sycl::cpu_selector selector;
cl::sycl::device selectedDevice { selector };
gridblasHandle =new sycl::queue (selectedDevice);
#endif
gridblasInit=1;
}
}
// Force construct once
GridBLAS() { Init(); };
~GridBLAS() { };
/////////////////////////////////////////////////////////////////////////////////////
// BLAS GEMM conventions:
/////////////////////////////////////////////////////////////////////////////////////
// - C = alpha A * B + beta C
// Dimensions:
// - C_m.n
// - A_m.k
// - B_k.n
// - Flops = 8 M N K
// - Bytes = 2*sizeof(word) * (MN+MK+KN)
// M=60, N=12
// Flop/Byte = 8 . 60.60.12 / (60.12+60.60+60.12)/16 = 4 so expect about 4 TF/s on a GCD
/////////////////////////////////////////////////////////////////////////////////////
void synchronise(void)
{
#ifdef GRID_HIP
auto err = hipDeviceSynchronize();
assert(err==hipSuccess);
#endif
#ifdef GRID_CUDA
auto err = cudaDeviceSynchronize();
assert(err==cudaSuccess);
#endif
#ifdef GRID_SYCL
accelerator_barrier();
#endif
#ifdef GRID_ONE_MKL
gridblasHandle->wait();
#endif
}
void gemmBatched(int m,int n, int k,
ComplexD alpha,
deviceVector<ComplexD*> &Amk, // pointer list to matrices
deviceVector<ComplexD*> &Bkn,
ComplexD beta,
deviceVector<ComplexD*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
ComplexF alpha,
deviceVector<ComplexF*> &Amk, // pointer list to matrices
deviceVector<ComplexF*> &Bkn,
ComplexF beta,
deviceVector<ComplexF*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
RealD alpha,
deviceVector<RealD*> &Amk, // pointer list to matrices
deviceVector<RealD*> &Bkn,
RealD beta,
deviceVector<RealD*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
RealF alpha,
deviceVector<RealF*> &Amk, // pointer list to matrices
deviceVector<RealF*> &Bkn,
RealF beta,
deviceVector<RealF*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
ComplexD alpha,
deviceVector<ComplexD*> &Amk, // pointer list to matrices
deviceVector<ComplexD*> &Bkn,
ComplexD beta,
deviceVector<ComplexD*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<ComplexD> alpha_p(1);
static deviceVector<ComplexD> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD));
RealD t0=usecond();
// std::cout << "ZgemmBatched mnk "<<m<<","<<n<<","<<k<<" count "<<batchCount<<std::endl;
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasZgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(hipblasDoubleComplex *) &alpha_p[0],
(hipblasDoubleComplex **)&Amk[0], lda,
(hipblasDoubleComplex **)&Bkn[0], ldb,
(hipblasDoubleComplex *) &beta_p[0],
(hipblasDoubleComplex **)&Cmn[0], ldc,
batchCount);
// std::cout << " hipblas return code " <<(int)err<<std::endl;
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasZgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(cuDoubleComplex *) &alpha_p[0],
(cuDoubleComplex **)&Amk[0], lda,
(cuDoubleComplex **)&Bkn[0], ldb,
(cuDoubleComplex *) &beta_p[0],
(cuDoubleComplex **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
int sda = lda*k;
int sdb = ldb*k;
int sdc = ldc*n;
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexD c_mn(0.0);
for (int kk = 0; kk < k; ++kk)
c_mn += Amk[p][mm + kk*lda ] * Bkn[p][kk + nn*ldb];
Cmn[p][mm + nn*ldc] = (alpha)*c_mn + (beta)*Cmn[p][mm + nn*ldc ];
}
}
}
#endif
// synchronise();
RealD t1=usecond();
RealD flops = 8.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(ComplexD)*(m*k+k*n+m*n)*batchCount;
// std::cout <<GridLogMessage<< " batched Blas copy "<<(t0-t2)/1.e3 <<" ms "<<std::endl;
// std::cout <<GridLogMessage<< " batched Blas zGemm call "<<m<<","<<n<<","<<k<<" "<< flops/(t1-t0)/1.e3 <<" GF/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
// std::cout <<GridLogMessage<< " batched Blas zGemm call "<<m<<","<<n<<","<<k<<" "<< bytes/(t1-t0)/1.e3 <<" GB/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
}
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
ComplexF alpha,
deviceVector<ComplexF*> &Amk, // pointer list to matrices
deviceVector<ComplexF*> &Bkn,
ComplexF beta,
deviceVector<ComplexF*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<ComplexF> alpha_p(1);
static deviceVector<ComplexF> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexF));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexF));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasCgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(hipblasComplex *) &alpha_p[0],
(hipblasComplex **)&Amk[0], lda,
(hipblasComplex **)&Bkn[0], ldb,
(hipblasComplex *) &beta_p[0],
(hipblasComplex **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasCgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(cuComplex *) &alpha_p[0],
(cuComplex **)&Amk[0], lda,
(cuComplex **)&Bkn[0], ldb,
(cuComplex *) &beta_p[0],
(cuComplex **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
int sda = lda*k;
int sdb = ldb*k;
int sdc = ldc*n;
ComplexF alphaf(real(alpha),imag(alpha));
ComplexF betaf(real(beta),imag(beta));
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexF c_mn(0.0);
for (int kk = 0; kk < k; ++kk)
c_mn += Amk[p][mm + kk*lda ] * Bkn[p][kk + nn*ldb];
Cmn[p][mm + nn*ldc] = (alphaf)*c_mn + (betaf)*Cmn[p][mm + nn*ldc ];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 8.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(ComplexF)*(m*k+k*n+m*n)*batchCount;
}
///////////////////////////////////////////////////////////////////////////
// Single precision real GEMM
///////////////////////////////////////////////////////////////////////////
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
RealF alpha,
deviceVector<RealF*> &Amk, // pointer list to matrices
deviceVector<RealF*> &Bkn,
RealF beta,
deviceVector<RealF*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<RealF> alpha_p(1);
static deviceVector<RealF> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(RealF));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(RealF));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasSgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(float *) &alpha_p[0],
(float **)&Amk[0], lda,
(float **)&Bkn[0], ldb,
(float *) &beta_p[0],
(float **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasSgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(float *) &alpha_p[0],
(float **)&Amk[0], lda,
(float **)&Bkn[0], ldb,
(float *) &beta_p[0],
(float **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
int sda = lda*k;
int sdb = ldb*k;
int sdc = ldc*n;
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
RealD c_mn(0.0);
for (int kk = 0; kk < k; ++kk)
c_mn += Amk[p][mm + kk*lda ] * Bkn[p][kk + nn*ldb];
Cmn[p][mm + nn*ldc] = (alpha)*c_mn + (beta)*Cmn[p][mm + nn*ldc ];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 2.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(RealF)*(m*k+k*n+m*n)*batchCount;
}
///////////////////////////////////////////////////////////////////////////
// Double precision real GEMM
///////////////////////////////////////////////////////////////////////////
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
RealD alpha,
deviceVector<RealD*> &Amk, // pointer list to matrices
deviceVector<RealD*> &Bkn,
RealD beta,
deviceVector<RealD*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<RealD> alpha_p(1);
static deviceVector<RealD> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(RealD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(RealD));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasDgemmBatched(gridblasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
m,n,k,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasDgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
/*
int64_t m64=m;
int64_t n64=n;
int64_t k64=k;
int64_t batchCount64=batchCount;
oneapi::mkl::blas::column_major::gemm_batch(*theGridAccelerator,
onemkl::transpose::N,
onemkl::transpose::N,
&m64,&n64,&k64,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
1,&batchCount64);
*/
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
int sda = lda*k;
int sdb = ldb*k;
int sdc = ldc*n;
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
RealD c_mn(0.0);
for (int kk = 0; kk < k; ++kk)
c_mn += Amk[p][mm + kk*lda ] * Bkn[p][kk + nn*ldb];
Cmn[p][mm + nn*ldc] = (alpha)*c_mn + (beta)*Cmn[p][mm + nn*ldc ];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 2.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(RealD)*(m*k+k*n+m*n)*batchCount;
}
////////////////////////////////////////////////////////////////////////////////////////////////
// Strided case used by benchmark, but generally unused in Grid
// Keep a code example in double complex, but don't generate the single and real variants for now
////////////////////////////////////////////////////////////////////////////////////////////////
void gemmStridedBatched(int m,int n, int k,
ComplexD alpha,
ComplexD* Amk, // pointer list to matrices
ComplexD* Bkn,
ComplexD beta,
ComplexD* Cmn,
int batchCount)
{
// Use C-row major storage, so transpose calls
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
int sda = m*k;
int sdb = k*n;
int sdc = m*n;
deviceVector<ComplexD> alpha_p(1);
deviceVector<ComplexD> beta_p(1);
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD));
// std::cout << "blasZgemmStridedBatched mnk "<<m<<","<<n<<","<<k<<" count "<<batchCount<<std::endl;
// std::cout << "blasZgemmStridedBatched ld "<<lda<<","<<ldb<<","<<ldc<<std::endl;
// std::cout << "blasZgemmStridedBatched sd "<<sda<<","<<sdb<<","<<sdc<<std::endl;
#ifdef GRID_HIP
auto err = hipblasZgemmStridedBatched(gridblasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
m,n,k,
(hipblasDoubleComplex *) &alpha_p[0],
(hipblasDoubleComplex *) Amk, lda, sda,
(hipblasDoubleComplex *) Bkn, ldb, sdb,
(hipblasDoubleComplex *) &beta_p[0],
(hipblasDoubleComplex *) Cmn, ldc, sdc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasZgemmStridedBatched(gridblasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
m,n,k,
(cuDoubleComplex *) &alpha_p[0],
(cuDoubleComplex *) Amk, lda, sda,
(cuDoubleComplex *) Bkn, ldb, sdb,
(cuDoubleComplex *) &beta_p[0],
(cuDoubleComplex *) Cmn, ldc, sdc,
batchCount);
#endif
#if defined(GRID_SYCL) || defined(GRID_ONE_MKL)
oneapi::mkl::blas::column_major::gemm_batch(*gridblasHandle,
oneapi::mkl::transpose::N,
oneapi::mkl::transpose::N,
m,n,k,
alpha,
(const ComplexD *)Amk,lda,sda,
(const ComplexD *)Bkn,ldb,sdb,
beta,
(ComplexD *)Cmn,ldc,sdc,
batchCount);
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP) && !defined(GRID_ONE_MKL)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexD c_mn(0.0);
for (int kk = 0; kk < k; ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (alpha)*c_mn + (beta)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
}
double benchmark(int M, int N, int K, int BATCH)
{
int32_t N_A = M*K*BATCH;
int32_t N_B = K*N*BATCH;
int32_t N_C = M*N*BATCH;
deviceVector<ComplexD> A(N_A); acceleratorMemSet(&A[0],0,N_A*sizeof(ComplexD));
deviceVector<ComplexD> B(N_B); acceleratorMemSet(&B[0],0,N_B*sizeof(ComplexD));
deviceVector<ComplexD> C(N_C); acceleratorMemSet(&C[0],0,N_C*sizeof(ComplexD));
ComplexD alpha(1.0);
ComplexD beta (1.0);
RealD flops = 8.0*M*N*K*BATCH;
int ncall=10;
RealD t0 = usecond();
for(int i=0;i<ncall;i++){
gemmStridedBatched(M,N,K,
alpha,
&A[0], // m x k
&B[0], // k x n
beta,
&C[0], // m x n
BATCH);
}
synchronise();
RealD t1 = usecond();
RealD bytes = 1.0*sizeof(ComplexD)*(M*N*2+N*K+M*K)*BATCH;
flops = 8.0*M*N*K*BATCH*ncall;
flops = flops/(t1-t0)/1.e3;
return flops; // Returns gigaflops
}
};
NAMESPACE_END(Grid);

View File

@ -176,6 +176,7 @@ template<class T> using cshiftAllocator = std::allocator<T>;
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
template<class T> using commVector = std::vector<T,devAllocator<T> >;
template<class T> using deviceVector = std::vector<T,devAllocator<T> >;
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
NAMESPACE_END(Grid);

View File

@ -348,6 +348,7 @@ double CartesianCommunicator::StencilSendToRecvFrom( void *xmit,
return offbytes;
}
#undef NVLINK_GET // Define to use get instead of put DMA
double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int dest,int dox,
@ -380,9 +381,15 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
list.push_back(rrq);
off_node_bytes+=rbytes;
}
#ifdef NVLINK_GET
void *shm = (void *) this->ShmBufferTranslate(from,xmit);
assert(shm!=NULL);
acceleratorCopyDeviceToDeviceAsynch(shm,recv,rbytes);
#endif
}
if (dox) {
// rcrc = crc32(rcrc,(unsigned char *)recv,bytes);
if ( (gdest == MPI_UNDEFINED) || Stencil_force_mpi ) {
tag= dir+_processor*32;
ierr =MPI_Isend(xmit, xbytes, MPI_CHAR,dest,tag,communicator_halo[commdir],&xrq);
@ -390,9 +397,12 @@ double CartesianCommunicator::StencilSendToRecvFromBegin(std::vector<CommsReques
list.push_back(xrq);
off_node_bytes+=xbytes;
} else {
#ifndef NVLINK_GET
void *shm = (void *) this->ShmBufferTranslate(dest,recv);
assert(shm!=NULL);
acceleratorCopyDeviceToDeviceAsynch(xmit,shm,xbytes);
#endif
}
}
@ -402,6 +412,8 @@ void CartesianCommunicator::StencilSendToRecvFromComplete(std::vector<CommsReque
{
int nreq=list.size();
acceleratorCopySynchronise();
if (nreq==0) return;
std::vector<MPI_Status> status(nreq);

View File

@ -40,6 +40,9 @@ int GlobalSharedMemory::_ShmAlloc;
uint64_t GlobalSharedMemory::_ShmAllocBytes;
std::vector<void *> GlobalSharedMemory::WorldShmCommBufs;
#ifndef ACCELERATOR_AWARE_MPI
void * GlobalSharedMemory::HostCommBuf;
#endif
Grid_MPI_Comm GlobalSharedMemory::WorldShmComm;
int GlobalSharedMemory::WorldShmRank;
@ -66,6 +69,26 @@ void GlobalSharedMemory::SharedMemoryFree(void)
/////////////////////////////////
// Alloc, free shmem region
/////////////////////////////////
#ifndef ACCELERATOR_AWARE_MPI
void *SharedMemory::HostBufferMalloc(size_t bytes){
void *ptr = (void *)host_heap_top;
host_heap_top += bytes;
host_heap_bytes+= bytes;
if (host_heap_bytes >= host_heap_size) {
std::cout<< " HostBufferMalloc exceeded heap size -- try increasing with --shm <MB> flag" <<std::endl;
std::cout<< " Parameter specified in units of MB (megabytes) " <<std::endl;
std::cout<< " Current alloc is " << (bytes/(1024*1024)) <<"MB"<<std::endl;
std::cout<< " Current bytes is " << (host_heap_bytes/(1024*1024)) <<"MB"<<std::endl;
std::cout<< " Current heap is " << (host_heap_size/(1024*1024)) <<"MB"<<std::endl;
assert(host_heap_bytes<host_heap_size);
}
return ptr;
}
void SharedMemory::HostBufferFreeAll(void) {
host_heap_top =(size_t)HostCommBuf;
host_heap_bytes=0;
}
#endif
void *SharedMemory::ShmBufferMalloc(size_t bytes){
// bytes = (bytes+sizeof(vRealD))&(~(sizeof(vRealD)-1));// align up bytes
void *ptr = (void *)heap_top;

View File

@ -75,7 +75,9 @@ public:
static int Hugepages;
static std::vector<void *> WorldShmCommBufs;
#ifndef ACCELERATOR_AWARE_MPI
static void *HostCommBuf;
#endif
static Grid_MPI_Comm WorldComm;
static int WorldRank;
static int WorldSize;
@ -120,6 +122,13 @@ private:
size_t heap_bytes;
size_t heap_size;
#ifndef ACCELERATOR_AWARE_MPI
size_t host_heap_top; // set in free all
size_t host_heap_bytes;// set in free all
void *HostCommBuf; // set in SetCommunicator
size_t host_heap_size; // set in SetCommunicator
#endif
protected:
Grid_MPI_Comm ShmComm; // for barriers
@ -151,7 +160,10 @@ public:
void *ShmBufferTranslate(int rank,void * local_p);
void *ShmBufferMalloc(size_t bytes);
void ShmBufferFreeAll(void) ;
#ifndef ACCELERATOR_AWARE_MPI
void *HostBufferMalloc(size_t bytes);
void HostBufferFreeAll(void);
#endif
//////////////////////////////////////////////////////////////////////////
// Make info on Nodes & ranks and Shared memory available
//////////////////////////////////////////////////////////////////////////

View File

@ -39,9 +39,11 @@ Author: Christoph Lehner <christoph@lhnr.de>
#include <hip/hip_runtime_api.h>
#endif
#ifdef GRID_SYCL
#ifdef ACCELERATOR_AWARE_MPI
#define GRID_SYCL_LEVEL_ZERO_IPC
#define SHM_SOCKETS
#endif
#include <syscall.h>
#define SHM_SOCKETS
#endif
#include <sys/socket.h>
@ -512,46 +514,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
// Hugetlbfs mapping intended
////////////////////////////////////////////////////////////////////////////////////////////
#if defined(GRID_CUDA) ||defined(GRID_HIP) || defined(GRID_SYCL)
//if defined(GRID_SYCL)
#if 0
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
assert(_ShmSetup==1);
assert(_ShmAlloc==0);
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// allocate the pointer array for shared windows for our group
//////////////////////////////////////////////////////////////////////////////////////////////////////////
MPI_Barrier(WorldShmComm);
WorldShmCommBufs.resize(WorldShmSize);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
exit(EXIT_FAILURE);
}
std::cout << WorldRank << Mheader " SharedMemoryMPI.cc acceleratorAllocDevice "<< bytes
<< "bytes at "<< std::hex<< ShmCommBuf <<std::dec<<" for comms buffers " <<std::endl;
SharedMemoryZero(ShmCommBuf,bytes);
assert(WorldShmSize == 1);
for(int r=0;r<WorldShmSize;r++){
WorldShmCommBufs[r] = ShmCommBuf;
}
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#endif
#if defined(GRID_CUDA) ||defined(GRID_HIP) ||defined(GRID_SYCL)
void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
{
void * ShmCommBuf ;
@ -574,6 +536,9 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Each MPI rank should allocate our own buffer
///////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifndef ACCELERATOR_AWARE_MPI
HostCommBuf= malloc(bytes);
#endif
ShmCommBuf = acceleratorAllocDevice(bytes);
if (ShmCommBuf == (void *)NULL ) {
std::cerr << " SharedMemoryMPI.cc acceleratorAllocDevice failed NULL pointer for " << bytes<<" bytes " << std::endl;
@ -604,8 +569,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t;
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_context());
ze_ipc_mem_handle_t ihandle;
clone_mem_t handle;
@ -738,7 +703,6 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
_ShmAllocBytes=bytes;
_ShmAlloc=1;
}
#endif
#else
#ifdef GRID_MPI3_SHMMMAP
@ -962,6 +926,12 @@ void SharedMemory::SetCommunicator(Grid_MPI_Comm comm)
}
ShmBufferFreeAll();
#ifndef ACCELERATOR_AWARE_MPI
host_heap_size = heap_size;
HostCommBuf= GlobalSharedMemory::HostCommBuf;
HostBufferFreeAll();
#endif
/////////////////////////////////////////////////////////////////////
// find comm ranks in our SHM group (i.e. which ranks are on our node)
/////////////////////////////////////////////////////////////////////

View File

@ -29,8 +29,27 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
NAMESPACE_BEGIN(Grid);
extern Vector<std::pair<int,int> > Cshift_table;
extern std::vector<std::pair<int,int> > Cshift_table;
extern commVector<std::pair<int,int> > Cshift_table_device;
inline std::pair<int,int> *MapCshiftTable(void)
{
// GPU version
#ifdef ACCELERATOR_CSHIFT
uint64_t sz=Cshift_table.size();
if (Cshift_table_device.size()!=sz ) {
Cshift_table_device.resize(sz);
}
acceleratorCopyToDevice((void *)&Cshift_table[0],
(void *)&Cshift_table_device[0],
sizeof(Cshift_table[0])*sz);
return &Cshift_table_device[0];
#else
return &Cshift_table[0];
#endif
// CPU version use identify map
}
///////////////////////////////////////////////////////////////////
// Gather for when there is no need to SIMD split
///////////////////////////////////////////////////////////////////
@ -74,8 +93,8 @@ Gather_plane_simple (const Lattice<vobj> &rhs,cshiftVector<vobj> &buffer,int dim
}
{
auto buffer_p = & buffer[0];
auto table = &Cshift_table[0];
#ifdef ACCELERATOR_CSHIFT
auto table = MapCshiftTable();
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
accelerator_for(i,ent,vobj::Nsimd(),{
coalescedWrite(buffer_p[table[i].first],coalescedRead(rhs_v[table[i].second]));
@ -225,7 +244,7 @@ template<class vobj> void Scatter_plane_simple (Lattice<vobj> &rhs,cshiftVector<
{
auto buffer_p = & buffer[0];
auto table = &Cshift_table[0];
auto table = MapCshiftTable();
#ifdef ACCELERATOR_CSHIFT
autoView( rhs_v, rhs, AcceleratorWrite);
accelerator_for(i,ent,vobj::Nsimd(),{
@ -297,30 +316,6 @@ template<class vobj> void Scatter_plane_merge(Lattice<vobj> &rhs,ExtractPointerA
}
}
#if (defined(GRID_CUDA) || defined(GRID_HIP)) && defined(ACCELERATOR_CSHIFT)
template <typename T>
T iDivUp(T a, T b) // Round a / b to nearest higher integer value
{ return (a % b != 0) ? (a / b + 1) : (a / b); }
template <typename T>
__global__ void populate_Cshift_table(T* vector, T lo, T ro, T e1, T e2, T stride)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= e1*e2) return;
int n, b, o;
n = idx / e2;
b = idx % e2;
o = n*stride + b;
vector[2*idx + 0] = lo + o;
vector[2*idx + 1] = ro + o;
}
#endif
//////////////////////////////////////////////////////
// local to node block strided copies
//////////////////////////////////////////////////////
@ -345,20 +340,12 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
int ent=0;
if(cbmask == 0x3 ){
#if (defined(GRID_CUDA) || defined(GRID_HIP)) && defined(ACCELERATOR_CSHIFT)
ent = e1*e2;
dim3 blockSize(acceleratorThreads());
dim3 gridSize(iDivUp((unsigned int)ent, blockSize.x));
populate_Cshift_table<<<gridSize, blockSize>>>(&Cshift_table[0].first, lo, ro, e1, e2, stride);
accelerator_barrier();
#else
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride+b;
Cshift_table[ent++] = std::pair<int,int>(lo+o,ro+o);
}
}
#endif
} else {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
@ -372,7 +359,7 @@ template<class vobj> void Copy_plane(Lattice<vobj>& lhs,const Lattice<vobj> &rhs
}
{
auto table = &Cshift_table[0];
auto table = MapCshiftTable();
#ifdef ACCELERATOR_CSHIFT
autoView(rhs_v , rhs, AcceleratorRead);
autoView(lhs_v , lhs, AcceleratorWrite);
@ -409,19 +396,11 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
int ent=0;
if ( cbmask == 0x3 ) {
#if (defined(GRID_CUDA) || defined(GRID_HIP)) && defined(ACCELERATOR_CSHIFT)
ent = e1*e2;
dim3 blockSize(acceleratorThreads());
dim3 gridSize(iDivUp((unsigned int)ent, blockSize.x));
populate_Cshift_table<<<gridSize, blockSize>>>(&Cshift_table[0].first, lo, ro, e1, e2, stride);
accelerator_barrier();
#else
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int o =n*stride;
Cshift_table[ent++] = std::pair<int,int>(lo+o+b,ro+o+b);
}}
#endif
} else {
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
@ -432,7 +411,7 @@ template<class vobj> void Copy_plane_permute(Lattice<vobj>& lhs,const Lattice<vo
}
{
auto table = &Cshift_table[0];
auto table = MapCshiftTable();
#ifdef ACCELERATOR_CSHIFT
autoView( rhs_v, rhs, AcceleratorRead);
autoView( lhs_v, lhs, AcceleratorWrite);

View File

@ -52,7 +52,8 @@ template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension
int comm_dim = rhs.Grid()->_processors[dimension] >1 ;
int splice_dim = rhs.Grid()->_simd_layout[dimension]>1 && (comm_dim);
RealD t1,t0;
t0=usecond();
if ( !comm_dim ) {
//std::cout << "CSHIFT: Cshift_local" <<std::endl;
Cshift_local(ret,rhs,dimension,shift); // Handles checkerboarding
@ -63,6 +64,8 @@ template<class vobj> Lattice<vobj> Cshift(const Lattice<vobj> &rhs,int dimension
//std::cout << "CSHIFT: Cshift_comms" <<std::endl;
Cshift_comms(ret,rhs,dimension,shift);
}
t1=usecond();
// std::cout << GridLogPerformance << "Cshift took "<< (t1-t0)/1e3 << " ms"<<std::endl;
return ret;
}
@ -127,16 +130,20 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
int cb= (cbmask==0x2)? Odd : Even;
int sshift= rhs.Grid()->CheckerBoardShiftForCB(rhs.Checkerboard(),dimension,shift,cb);
RealD tcopy=0.0;
RealD tgather=0.0;
RealD tscatter=0.0;
RealD tcomms=0.0;
uint64_t xbytes=0;
for(int x=0;x<rd;x++){
int sx = (x+sshift)%rd;
int comm_proc = ((x+sshift)/rd)%pd;
if (comm_proc==0) {
tcopy-=usecond();
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
tcopy+=usecond();
} else {
int words = buffer_size;
@ -144,26 +151,39 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
int bytes = words * sizeof(vobj);
tgather-=usecond();
Gather_plane_simple (rhs,send_buf,dimension,sx,cbmask);
tgather+=usecond();
// int rank = grid->_processor;
int recv_from_rank;
int xmit_to_rank;
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
tcomms-=usecond();
// grid->Barrier();
grid->SendToRecvFrom((void *)&send_buf[0],
xmit_to_rank,
(void *)&recv_buf[0],
recv_from_rank,
bytes);
xbytes+=bytes;
// grid->Barrier();
tcomms+=usecond();
grid->Barrier();
tscatter-=usecond();
Scatter_plane_simple (ret,recv_buf,dimension,x,cbmask);
tscatter+=usecond();
}
}
/*
std::cout << GridLogPerformance << " Cshift copy "<<tcopy/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift gather "<<tgather/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift scatter "<<tscatter/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift comm "<<tcomms/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
*/
}
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
@ -190,6 +210,12 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
assert(shift>=0);
assert(shift<fd);
RealD tcopy=0.0;
RealD tgather=0.0;
RealD tscatter=0.0;
RealD tcomms=0.0;
uint64_t xbytes=0;
int permute_type=grid->PermuteType(dimension);
///////////////////////////////////////////////
@ -227,7 +253,9 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
pointers[i] = &send_buf_extract[i][0];
}
int sx = (x+sshift)%rd;
tgather-=usecond();
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
tgather+=usecond();
for(int i=0;i<Nsimd;i++){
@ -252,7 +280,8 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
if(nbr_proc){
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
tcomms-=usecond();
// grid->Barrier();
send_buf_extract_mpi = &send_buf_extract[nbr_lane][0];
recv_buf_extract_mpi = &recv_buf_extract[i][0];
@ -262,7 +291,9 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
recv_from_rank,
bytes);
grid->Barrier();
xbytes+=bytes;
// grid->Barrier();
tcomms+=usecond();
rpointers[i] = &recv_buf_extract[i][0];
} else {
@ -270,9 +301,17 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
}
}
tscatter-=usecond();
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
tscatter+=usecond();
}
/*
std::cout << GridLogPerformance << " Cshift (s) copy "<<tcopy/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) gather "<<tgather/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) scatter "<<tscatter/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) comm "<<tcomms/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
*/
}
#else
template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
@ -292,6 +331,11 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
assert(comm_dim==1);
assert(shift>=0);
assert(shift<fd);
RealD tcopy=0.0;
RealD tgather=0.0;
RealD tscatter=0.0;
RealD tcomms=0.0;
uint64_t xbytes=0;
int buffer_size = rhs.Grid()->_slice_nblock[dimension]*rhs.Grid()->_slice_block[dimension];
static cshiftVector<vobj> send_buf_v; send_buf_v.resize(buffer_size);
@ -315,7 +359,9 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
if (comm_proc==0) {
tcopy-=usecond();
Copy_plane(ret,rhs,dimension,x,sx,cbmask);
tcopy+=usecond();
} else {
@ -324,7 +370,9 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
int bytes = words * sizeof(vobj);
tgather-=usecond();
Gather_plane_simple (rhs,send_buf_v,dimension,sx,cbmask);
tgather+=usecond();
// int rank = grid->_processor;
int recv_from_rank;
@ -332,7 +380,8 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
tcomms-=usecond();
// grid->Barrier();
acceleratorCopyDeviceToDevice((void *)&send_buf_v[0],(void *)&send_buf[0],bytes);
grid->SendToRecvFrom((void *)&send_buf[0],
@ -340,13 +389,24 @@ template<class vobj> void Cshift_comms(Lattice<vobj> &ret,const Lattice<vobj> &r
(void *)&recv_buf[0],
recv_from_rank,
bytes);
xbytes+=bytes;
acceleratorCopyDeviceToDevice((void *)&recv_buf[0],(void *)&recv_buf_v[0],bytes);
grid->Barrier();
// grid->Barrier();
tcomms+=usecond();
tscatter-=usecond();
Scatter_plane_simple (ret,recv_buf_v,dimension,x,cbmask);
tscatter+=usecond();
}
}
/*
std::cout << GridLogPerformance << " Cshift copy "<<tcopy/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift gather "<<tgather/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift scatter "<<tscatter/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift comm "<<tcomms/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s "<<2*xbytes<< " Bytes "<<std::endl;
*/
}
template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vobj> &rhs,int dimension,int shift,int cbmask)
@ -372,6 +432,11 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
assert(simd_layout==2);
assert(shift>=0);
assert(shift<fd);
RealD tcopy=0.0;
RealD tgather=0.0;
RealD tscatter=0.0;
RealD tcomms=0.0;
uint64_t xbytes=0;
int permute_type=grid->PermuteType(dimension);
@ -414,8 +479,10 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
for(int i=0;i<Nsimd;i++){
pointers[i] = &send_buf_extract[i][0];
}
tgather-=usecond();
int sx = (x+sshift)%rd;
Gather_plane_extract(rhs,pointers,dimension,sx,cbmask);
tgather+=usecond();
for(int i=0;i<Nsimd;i++){
@ -440,7 +507,8 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
if(nbr_proc){
grid->ShiftedRanks(dimension,nbr_proc,xmit_to_rank,recv_from_rank);
grid->Barrier();
tcomms-=usecond();
// grid->Barrier();
acceleratorCopyDeviceToDevice((void *)&send_buf_extract[nbr_lane][0],(void *)send_buf_extract_mpi,bytes);
grid->SendToRecvFrom((void *)send_buf_extract_mpi,
@ -449,17 +517,28 @@ template<class vobj> void Cshift_comms_simd(Lattice<vobj> &ret,const Lattice<vo
recv_from_rank,
bytes);
acceleratorCopyDeviceToDevice((void *)recv_buf_extract_mpi,(void *)&recv_buf_extract[i][0],bytes);
xbytes+=bytes;
grid->Barrier();
// grid->Barrier();
tcomms+=usecond();
rpointers[i] = &recv_buf_extract[i][0];
} else {
rpointers[i] = &send_buf_extract[nbr_lane][0];
}
}
tscatter-=usecond();
Scatter_plane_merge(ret,rpointers,dimension,x,cbmask);
}
tscatter+=usecond();
}
/*
std::cout << GridLogPerformance << " Cshift (s) copy "<<tcopy/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) gather "<<tgather/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) scatter "<<tscatter/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift (s) comm "<<tcomms/1e3<<" ms"<<std::endl;
std::cout << GridLogPerformance << " Cshift BW "<<(2.0*xbytes)/tcomms<<" MB/s"<<std::endl;
*/
}
#endif
NAMESPACE_END(Grid);

View File

@ -1,4 +1,5 @@
#include <Grid/GridCore.h>
NAMESPACE_BEGIN(Grid);
Vector<std::pair<int,int> > Cshift_table;
std::vector<std::pair<int,int> > Cshift_table;
commVector<std::pair<int,int> > Cshift_table_device;
NAMESPACE_END(Grid);

View File

@ -35,6 +35,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice_transpose.h>
#include <Grid/lattice/Lattice_local.h>
#include <Grid/lattice/Lattice_reduction.h>
#include <Grid/lattice/Lattice_crc.h>
#include <Grid/lattice/Lattice_peekpoke.h>
#include <Grid/lattice/Lattice_reality.h>
#include <Grid/lattice/Lattice_real_imag.h>
@ -46,4 +47,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice_unary.h>
#include <Grid/lattice/Lattice_transfer.h>
#include <Grid/lattice/Lattice_basis.h>
#include <Grid/lattice/Lattice_crc.h>
#include <Grid/lattice/PaddedCell.h>

View File

@ -345,7 +345,9 @@ GridUnopClass(UnaryNot, Not(a));
GridUnopClass(UnaryTrace, trace(a));
GridUnopClass(UnaryTranspose, transpose(a));
GridUnopClass(UnaryTa, Ta(a));
GridUnopClass(UnarySpTa, SpTa(a));
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
GridUnopClass(UnaryProjectOnSpGroup, ProjectOnSpGroup(a));
GridUnopClass(UnaryTimesI, timesI(a));
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
GridUnopClass(UnaryAbs, abs(a));
@ -456,7 +458,9 @@ GRID_DEF_UNOP(operator!, UnaryNot);
GRID_DEF_UNOP(trace, UnaryTrace);
GRID_DEF_UNOP(transpose, UnaryTranspose);
GRID_DEF_UNOP(Ta, UnaryTa);
GRID_DEF_UNOP(SpTa, UnarySpTa);
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
GRID_DEF_UNOP(ProjectOnSpGroup, UnaryProjectOnSpGroup);
GRID_DEF_UNOP(timesI, UnaryTimesI);
GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the

View File

@ -270,5 +270,42 @@ RealD axpby_norm(Lattice<vobj> &ret,sobj a,sobj b,const Lattice<vobj> &x,const L
return axpby_norm_fast(ret,a,b,x,y);
}
/// Trace product
template<class obj> auto traceProduct(const Lattice<obj> &rhs_1,const Lattice<obj> &rhs_2)
-> Lattice<decltype(trace(obj()))>
{
typedef decltype(trace(obj())) robj;
Lattice<robj> ret_i(rhs_1.Grid());
autoView( rhs1 , rhs_1, AcceleratorRead);
autoView( rhs2 , rhs_2, AcceleratorRead);
autoView( ret , ret_i, AcceleratorWrite);
ret.Checkerboard() = rhs_1.Checkerboard();
accelerator_for(ss,rhs1.size(),obj::Nsimd(),{
coalescedWrite(ret[ss],traceProduct(rhs1(ss),rhs2(ss)));
});
return ret_i;
}
template<class obj1,class obj2> auto traceProduct(const Lattice<obj1> &rhs_1,const obj2 &rhs2)
-> Lattice<decltype(trace(obj1()))>
{
typedef decltype(trace(obj1())) robj;
Lattice<robj> ret_i(rhs_1.Grid());
autoView( rhs1 , rhs_1, AcceleratorRead);
autoView( ret , ret_i, AcceleratorWrite);
ret.Checkerboard() = rhs_1.Checkerboard();
accelerator_for(ss,rhs1.size(),obj1::Nsimd(),{
coalescedWrite(ret[ss],traceProduct(rhs1(ss),rhs2));
});
return ret_i;
}
template<class obj1,class obj2> auto traceProduct(const obj2 &rhs_2,const Lattice<obj1> &rhs_1)
-> Lattice<decltype(trace(obj1()))>
{
return traceProduct(rhs_1,rhs_2);
}
NAMESPACE_END(Grid);
#endif

View File

@ -62,7 +62,7 @@ void basisRotate(VField &basis,Matrix& Qt,int j0, int j1, int k0,int k1,int Nm)
basis_v.push_back(basis[k].View(AcceleratorWrite));
}
#if ( (!defined(GRID_CUDA)) )
#if ( !(defined(GRID_CUDA) || defined(GRID_HIP) || defined(GRID_SYCL)) )
int max_threads = thread_max();
Vector < vobj > Bt(Nm * max_threads);
thread_region

View File

@ -42,13 +42,13 @@ template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1
}
}
template<class vobj> uint32_t crc(Lattice<vobj> & buf)
template<class vobj> uint32_t crc(const Lattice<vobj> & buf)
{
autoView( buf_v , buf, CpuRead);
return ::crc32(0L,(unsigned char *)&buf_v[0],(size_t)sizeof(vobj)*buf.oSites());
}
#define CRC(U) std::cout << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
#define CRC(U) std::cerr << "FingerPrint "<<__FILE__ <<" "<< __LINE__ <<" "<< #U <<" "<<crc(U)<<std::endl;
NAMESPACE_END(Grid);

View File

@ -31,6 +31,7 @@ Author: Christoph Lehner <christoph@lhnr.de>
#if defined(GRID_SYCL)
#include <Grid/lattice/Lattice_reduction_sycl.h>
#endif
#include <Grid/lattice/Lattice_slicesum_core.h>
NAMESPACE_BEGIN(Grid);
@ -280,11 +281,29 @@ inline ComplexD rankInnerProduct(const Lattice<vobj> &left,const Lattice<vobj> &
return nrm;
}
template<class vobj>
inline ComplexD innerProduct(const Lattice<vobj> &left,const Lattice<vobj> &right) {
GridBase *grid = left.Grid();
#ifdef GRID_SYCL
uint64_t csum=0;
if ( FlightRecorder::LoggingMode != FlightRecorder::LoggingModeNone)
{
// Hack
// Fast integer xor checksum. Can also be used in comms now.
autoView(l_v,left,AcceleratorRead);
Integer words = left.Grid()->oSites()*sizeof(vobj)/sizeof(uint64_t);
uint64_t *base= (uint64_t *)&l_v[0];
csum=svm_xor(base,words);
}
FlightRecorder::CsumLog(csum);
#endif
ComplexD nrm = rankInnerProduct(left,right);
RealD local = real(nrm);
FlightRecorder::NormLog(real(nrm));
grid->GlobalSum(nrm);
FlightRecorder::ReductionLog(local,real(nrm));
return nrm;
}
@ -448,19 +467,10 @@ template<class vobj> inline void sliceSum(const Lattice<vobj> &Data,std::vector<
int e1= grid->_slice_nblock[orthogdim];
int e2= grid->_slice_block [orthogdim];
int stride=grid->_slice_stride[orthogdim];
// sum over reduced dimension planes, breaking out orthog dir
// Parallel over orthog direction
autoView( Data_v, Data, CpuRead);
thread_for( r,rd, {
int so=r*grid->_ostride[orthogdim]; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
lvSum[r]=lvSum[r]+Data_v[ss];
}
}
});
int ostride=grid->_ostride[orthogdim];
//Reduce Data down to lvSum
sliceSumReduction(Data,lvSum,rd, e1,e2,stride,ostride,Nsimd);
// Sum across simd lanes in the plane, breaking out orthog dir.
Coordinate icoor(Nd);
@ -504,6 +514,7 @@ sliceSum(const Lattice<vobj> &Data,int orthogdim)
return result;
}
template<class vobj>
static void sliceInnerProductVector( std::vector<ComplexD> & result, const Lattice<vobj> &lhs,const Lattice<vobj> &rhs,int orthogdim)
{

View File

@ -30,7 +30,7 @@ int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &
cudaGetDevice(&device);
#endif
#ifdef GRID_HIP
hipGetDevice(&device);
auto r=hipGetDevice(&device);
#endif
Iterator warpSize = gpu_props[device].warpSize;

View File

@ -69,29 +69,30 @@ inline typename vobj::scalar_object sum_gpu_large(const vobj *lat, Integer osite
return result;
}
NAMESPACE_END(Grid);
/*
template<class Double> Double svm_reduce(Double *vec,uint64_t L)
template<class Word> Word svm_xor(Word *vec,uint64_t L)
{
Double sumResult; zeroit(sumResult);
Double *d_sum =(Double *)cl::sycl::malloc_shared(sizeof(Double),*theGridAccelerator);
Double identity; zeroit(identity);
Word xorResult; xorResult = 0;
Word *d_sum =(Word *)cl::sycl::malloc_shared(sizeof(Word),*theGridAccelerator);
Word identity; identity=0;
theGridAccelerator->submit([&](cl::sycl::handler &cgh) {
auto Reduction = cl::sycl::reduction(d_sum,identity,std::plus<>());
auto Reduction = cl::sycl::reduction(d_sum,identity,std::bit_xor<>());
cgh.parallel_for(cl::sycl::range<1>{L},
Reduction,
[=] (cl::sycl::id<1> index, auto &sum) {
sum +=vec[index];
sum ^=vec[index];
});
});
theGridAccelerator->wait();
Double ret = d_sum[0];
Word ret = d_sum[0];
free(d_sum,*theGridAccelerator);
std::cout << " svm_reduce finished "<<L<<" sites sum = " << ret <<std::endl;
return ret;
}
NAMESPACE_END(Grid);
/*
template <class vobj>
inline typename vobj::scalar_objectD sumD_gpu_repack(const vobj *lat, Integer osites)
{

View File

@ -152,6 +152,7 @@ public:
#ifdef RNG_FAST_DISCARD
static void Skip(RngEngine &eng,uint64_t site)
{
#if 0
/////////////////////////////////////////////////////////////////////////////////////
// Skip by 2^40 elements between successive lattice sites
// This goes by 10^12.
@ -162,9 +163,9 @@ public:
// tens of seconds per trajectory so this is clean in all reasonable cases,
// and margin of safety is orders of magnitude.
// We could hack Sitmo to skip in the higher order words of state if necessary
//
// Replace with 2^30 ; avoid problem on large volumes
//
//
// Replace with 2^30 ; avoid problem on large volumes
//
/////////////////////////////////////////////////////////////////////////////////////
// uint64_t skip = site+1; // Old init Skipped then drew. Checked compat with faster init
const int shift = 30;
@ -179,6 +180,9 @@ public:
assert((skip >> shift)==site); // check for overflow
eng.discard(skip);
#else
eng.discardhi(site);
#endif
// std::cout << " Engine " <<site << " state " <<eng<<std::endl;
}
#endif
@ -407,7 +411,7 @@ public:
std::cout << GridLogMessage << "Seed SHA256: " << GridChecksum::sha256_string(seeds) << std::endl;
SeedFixedIntegers(seeds);
}
void SeedFixedIntegers(const std::vector<int> &seeds){
void SeedFixedIntegers(const std::vector<int> &seeds, int britney=0){
// Everyone generates the same seed_seq based on input seeds
CartesianCommunicator::BroadcastWorld(0,(void *)&seeds[0],sizeof(int)*seeds.size());
@ -424,7 +428,6 @@ public:
// MT implementation does not implement fast discard even though
// in principle this is possible
////////////////////////////////////////////////
#if 1
thread_for( lidx, _grid->lSites(), {
int gidx;
@ -445,29 +448,12 @@ public:
int l_idx=generator_idx(o_idx,i_idx);
_generators[l_idx] = master_engine;
Skip(_generators[l_idx],gidx); // Skip to next RNG sequence
});
#else
// Everybody loops over global volume.
thread_for( gidx, _grid->_gsites, {
// Where is it?
int rank;
int o_idx;
int i_idx;
Coordinate gcoor;
_grid->GlobalIndexToGlobalCoor(gidx,gcoor);
_grid->GlobalCoorToRankIndex(rank,o_idx,i_idx,gcoor);
// If this is one of mine we take it
if( rank == _grid->ThisRank() ){
int l_idx=generator_idx(o_idx,i_idx);
_generators[l_idx] = master_engine;
if ( britney ) {
Skip(_generators[l_idx],l_idx); // Skip to next RNG sequence
} else {
Skip(_generators[l_idx],gidx); // Skip to next RNG sequence
}
});
#endif
#else
////////////////////////////////////////////////////////////////
// Machine and thread decomposition dependent seeding is efficient

View File

@ -0,0 +1,213 @@
#pragma once
#include <type_traits>
#if defined(GRID_CUDA)
#include <cub/cub.cuh>
#define gpucub cub
#define gpuError_t cudaError_t
#define gpuSuccess cudaSuccess
#elif defined(GRID_HIP)
#include <hipcub/hipcub.hpp>
#define gpucub hipcub
#define gpuError_t hipError_t
#define gpuSuccess hipSuccess
#endif
NAMESPACE_BEGIN(Grid);
#if defined(GRID_CUDA) || defined(GRID_HIP)
template<class vobj> inline void sliceSumReduction_cub_small(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) {
size_t subvol_size = e1*e2;
commVector<vobj> reduction_buffer(rd*subvol_size);
auto rb_p = &reduction_buffer[0];
vobj zero_init;
zeroit(zero_init);
void *temp_storage_array = NULL;
size_t temp_storage_bytes = 0;
vobj *d_out;
int* d_offsets;
std::vector<int> offsets(rd+1,0);
for (int i = 0; i < offsets.size(); i++) {
offsets[i] = i*subvol_size;
}
//Allocate memory for output and offset arrays on device
d_out = static_cast<vobj*>(acceleratorAllocDevice(rd*sizeof(vobj)));
d_offsets = static_cast<int*>(acceleratorAllocDevice((rd+1)*sizeof(int)));
//copy offsets to device
acceleratorCopyToDeviceAsync(&offsets[0],d_offsets,sizeof(int)*(rd+1),computeStream);
gpuError_t gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p,d_out, rd, d_offsets, d_offsets+1, ::gpucub::Sum(), zero_init, computeStream);
if (gpuErr!=gpuSuccess) {
std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce (setup)! Error: " << gpuErr <<std::endl;
exit(EXIT_FAILURE);
}
//allocate memory for temp_storage_array
temp_storage_array = acceleratorAllocDevice(temp_storage_bytes);
//prepare buffer for reduction
//use non-blocking accelerator_for to avoid syncs (ok because we submit to same computeStream)
//use 2d accelerator_for to avoid launch latencies found when serially looping over rd
accelerator_for2dNB( s,subvol_size, r,rd, Nsimd,{
int n = s / e2;
int b = s % e2;
int so=r*ostride; // base offset for start of plane
int ss= so+n*stride+b;
coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data[ss]));
});
//issue segmented reductions in computeStream
gpuErr = gpucub::DeviceSegmentedReduce::Reduce(temp_storage_array, temp_storage_bytes, rb_p, d_out, rd, d_offsets, d_offsets+1,::gpucub::Sum(), zero_init, computeStream);
if (gpuErr!=gpuSuccess) {
std::cout << GridLogError << "Lattice_slicesum_gpu.h: Encountered error during gpucub::DeviceSegmentedReduce::Reduce! Error: " << gpuErr <<std::endl;
exit(EXIT_FAILURE);
}
acceleratorCopyFromDeviceAsync(d_out,&lvSum[0],rd*sizeof(vobj),computeStream);
//sync after copy
accelerator_barrier();
acceleratorFreeDevice(temp_storage_array);
acceleratorFreeDevice(d_out);
acceleratorFreeDevice(d_offsets);
}
template<class vobj> inline void sliceSumReduction_cub_large(const vobj *Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd) {
typedef typename vobj::vector_type vector;
const int words = sizeof(vobj)/sizeof(vector);
const int osites = rd*e1*e2;
commVector<vector>buffer(osites);
vector *dat = (vector *)Data;
vector *buf = &buffer[0];
Vector<vector> lvSum_small(rd);
vector *lvSum_ptr = (vector *)&lvSum[0];
for (int w = 0; w < words; w++) {
accelerator_for(ss,osites,1,{
buf[ss] = dat[ss*words+w];
});
sliceSumReduction_cub_small(buf,lvSum_small,rd,e1,e2,stride, ostride,Nsimd);
for (int r = 0; r < rd; r++) {
lvSum_ptr[w+words*r]=lvSum_small[r];
}
}
}
template<class vobj> inline void sliceSumReduction_cub(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int rd, const int e1, const int e2, const int stride, const int ostride, const int Nsimd)
{
autoView(Data_v, Data, AcceleratorRead); //hipcub/cub cannot deal with large vobjs so we split into small/large case.
if constexpr (sizeof(vobj) <= 256) {
sliceSumReduction_cub_small(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
}
else {
sliceSumReduction_cub_large(&Data_v[0], lvSum, rd, e1, e2, stride, ostride, Nsimd);
}
}
#endif
#if defined(GRID_SYCL)
template<class vobj> inline void sliceSumReduction_sycl(const Lattice<vobj> &Data, Vector <vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
typedef typename vobj::scalar_object sobj;
size_t subvol_size = e1*e2;
vobj *mysum = (vobj *) malloc_shared(sizeof(vobj),*theGridAccelerator);
vobj vobj_zero;
zeroit(vobj_zero);
commVector<vobj> reduction_buffer(rd*subvol_size);
auto rb_p = &reduction_buffer[0];
autoView(Data_v, Data, AcceleratorRead);
//prepare reduction buffer
accelerator_for2d( s,subvol_size, r,rd, (size_t)Nsimd,{
int n = s / e2;
int b = s % e2;
int so=r*ostride; // base offset for start of plane
int ss= so+n*stride+b;
coalescedWrite(rb_p[r*subvol_size+s], coalescedRead(Data_v[ss]));
});
for (int r = 0; r < rd; r++) {
mysum[0] = vobj_zero; //dirty hack: cannot pass vobj_zero as identity to sycl::reduction as its not device_copyable
theGridAccelerator->submit([&](cl::sycl::handler &cgh) {
auto Reduction = cl::sycl::reduction(mysum,std::plus<>());
cgh.parallel_for(cl::sycl::range<1>{subvol_size},
Reduction,
[=](cl::sycl::id<1> item, auto &sum) {
auto s = item[0];
sum += rb_p[r*subvol_size+s];
});
});
theGridAccelerator->wait();
lvSum[r] = mysum[0];
}
free(mysum,*theGridAccelerator);
}
#endif
template<class vobj> inline void sliceSumReduction_cpu(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
// sum over reduced dimension planes, breaking out orthog dir
// Parallel over orthog direction
autoView( Data_v, Data, CpuRead);
thread_for( r,rd, {
int so=r*ostride; // base offset for start of plane
for(int n=0;n<e1;n++){
for(int b=0;b<e2;b++){
int ss= so+n*stride+b;
lvSum[r]=lvSum[r]+Data_v[ss];
}
}
});
}
template<class vobj> inline void sliceSumReduction(const Lattice<vobj> &Data, Vector<vobj> &lvSum, const int &rd, const int &e1, const int &e2, const int &stride, const int &ostride, const int &Nsimd)
{
#if defined(GRID_CUDA) || defined(GRID_HIP)
sliceSumReduction_cub(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#elif defined(GRID_SYCL)
sliceSumReduction_sycl(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#else
sliceSumReduction_cpu(Data, lvSum, rd, e1, e2, stride, ostride, Nsimd);
#endif
}
NAMESPACE_END(Grid);

View File

@ -66,6 +66,65 @@ inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<
return ret;
};
template<int N, class Vec>
Lattice<iScalar<iScalar<iScalar<Vec> > > > Determinant(const Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iScalar<Vec> > > > ret(grid);
typedef typename Vec::scalar_type scalar;
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<scalar, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
scalar tmp= Us()()(i,j);
ComplexD ztmp(real(tmp),imag(tmp));
EigenU(i,j)=ztmp;
}}
ComplexD detD = EigenU.determinant();
typename Vec::scalar_type det(detD.real(),detD.imag());
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N>
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > Inverse(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
iScalar<iScalar<iMatrix<ComplexD, N> > > Ui;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
Eigen::MatrixXcd EigenUinv = EigenU.inverse();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
Ui()()(i,j) = EigenUinv(i,j);
}}
pokeLocalSite(Ui,ret_v,lcoor);
});
return ret;
}
NAMESPACE_END(Grid);
#endif

View File

@ -469,15 +469,13 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
Coordinate fine_rdimensions = fine->_rdimensions;
Coordinate coarse_rdimensions = coarse->_rdimensions;
vobj zz = Zero();
accelerator_for(sc,coarse->oSites(),1,{
// One thread per sub block
Coordinate coor_c(_ndimension);
Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions); // Block coordinate
vobj cd = zz;
vobj cd = Zero();
for(int sb=0;sb<blockVol;sb++){
@ -697,8 +695,68 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
for(int d=0;d<nd;d++){
assert(Fg->_processors[d] == Tg->_processors[d]);
}
// the above should guarantee that the operations are local
#if 1
size_t nsite = 1;
for(int i=0;i<nd;i++) nsite *= RegionSize[i];
size_t tbytes = 4*nsite*sizeof(int);
int *table = (int*)malloc(tbytes);
thread_for(idx, nsite, {
Coordinate from_coor, to_coor;
size_t rem = idx;
for(int i=0;i<nd;i++){
size_t base_i = rem % RegionSize[i]; rem /= RegionSize[i];
from_coor[i] = base_i + FromLowerLeft[i];
to_coor[i] = base_i + ToLowerLeft[i];
}
int foidx = Fg->oIndex(from_coor);
int fiidx = Fg->iIndex(from_coor);
int toidx = Tg->oIndex(to_coor);
int tiidx = Tg->iIndex(to_coor);
int* tt = table + 4*idx;
tt[0] = foidx;
tt[1] = fiidx;
tt[2] = toidx;
tt[3] = tiidx;
});
int* table_d = (int*)acceleratorAllocDevice(tbytes);
acceleratorCopyToDevice(table,table_d,tbytes);
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
autoView(from_v,From,AcceleratorRead);
autoView(to_v,To,AcceleratorWrite);
accelerator_for(idx,nsite,1,{
static const int words=sizeof(vobj)/sizeof(vector_type);
int* tt = table_d + 4*idx;
int from_oidx = *tt++;
int from_lane = *tt++;
int to_oidx = *tt++;
int to_lane = *tt;
const vector_type* from = (const vector_type *)&from_v[from_oidx];
vector_type* to = (vector_type *)&to_v[to_oidx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], from_lane);
putlane(to[w], stmp, to_lane);
}
});
acceleratorFreeDevice(table_d);
free(table);
#else
Coordinate ldf = Fg->_ldimensions;
Coordinate rdf = Fg->_rdimensions;
Coordinate isf = Fg->_istride;
@ -738,6 +796,8 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
#endif
}
});
#endif
}
@ -830,6 +890,8 @@ void ExtractSlice(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slic
}
//Insert subvolume orthogonal to direction 'orthog' with slice index 'slice_lo' from 'lowDim' onto slice index 'slice_hi' of higherDim
//The local dimensions of both 'lowDim' and 'higherDim' orthogonal to 'orthog' should be the same
template<class vobj>
void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
{
@ -851,6 +913,65 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
}
}
#if 1
size_t nsite = lg->lSites()/lg->LocalDimensions()[orthog];
size_t tbytes = 4*nsite*sizeof(int);
int *table = (int*)malloc(tbytes);
thread_for(idx,nsite,{
Coordinate lcoor(nl);
Coordinate hcoor(nh);
lcoor[orthog] = slice_lo;
hcoor[orthog] = slice_hi;
size_t rem = idx;
for(int mu=0;mu<nl;mu++){
if(mu != orthog){
int xmu = rem % lg->LocalDimensions()[mu]; rem /= lg->LocalDimensions()[mu];
lcoor[mu] = hcoor[mu] = xmu;
}
}
int loidx = lg->oIndex(lcoor);
int liidx = lg->iIndex(lcoor);
int hoidx = hg->oIndex(hcoor);
int hiidx = hg->iIndex(hcoor);
int* tt = table + 4*idx;
tt[0] = loidx;
tt[1] = liidx;
tt[2] = hoidx;
tt[3] = hiidx;
});
int* table_d = (int*)acceleratorAllocDevice(tbytes);
acceleratorCopyToDevice(table,table_d,tbytes);
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
autoView(lowDim_v,lowDim,AcceleratorRead);
autoView(higherDim_v,higherDim,AcceleratorWrite);
accelerator_for(idx,nsite,1,{
static const int words=sizeof(vobj)/sizeof(vector_type);
int* tt = table_d + 4*idx;
int from_oidx = *tt++;
int from_lane = *tt++;
int to_oidx = *tt++;
int to_lane = *tt;
const vector_type* from = (const vector_type *)&lowDim_v[from_oidx];
vector_type* to = (vector_type *)&higherDim_v[to_oidx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], from_lane);
putlane(to[w], stmp, to_lane);
}
});
acceleratorFreeDevice(table_d);
free(table);
#else
// the above should guarantee that the operations are local
autoView(lowDimv,lowDim,CpuRead);
autoView(higherDimv,higherDim,CpuWrite);
@ -866,6 +987,7 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
pokeLocalSite(s,higherDimv,hcoor);
}
});
#endif
}

View File

@ -45,6 +45,7 @@ public:
};
// Host only
GridBase * getGrid(void) const { return _grid; };
vobj* getHostPointer(void) const { return _odata; };
};
/////////////////////////////////////////////////////////////////////////////////////////

View File

@ -26,14 +26,32 @@ Author: Peter Boyle pboyle@bnl.gov
/* END LEGAL */
#pragma once
#include<Grid/cshift/Cshift.h>
NAMESPACE_BEGIN(Grid);
//Allow the user to specify how the C-shift is performed, e.g. to respect the appropriate boundary conditions
template<typename vobj>
struct CshiftImplBase{
virtual Lattice<vobj> Cshift(const Lattice<vobj> &in, int dir, int shift) const = 0;
virtual ~CshiftImplBase(){}
};
template<typename vobj>
struct CshiftImplDefault: public CshiftImplBase<vobj>{
Lattice<vobj> Cshift(const Lattice<vobj> &in, int dir, int shift) const override{ return Grid::Cshift(in,dir,shift); }
};
template<typename Gimpl>
struct CshiftImplGauge: public CshiftImplBase<typename Gimpl::GaugeLinkField::vector_object>{
typename Gimpl::GaugeLinkField Cshift(const typename Gimpl::GaugeLinkField &in, int dir, int shift) const override{ return Gimpl::CshiftLink(in,dir,shift); }
};
class PaddedCell {
public:
GridCartesian * unpadded_grid;
int dims;
int depth;
std::vector<GridCartesian *> grids;
~PaddedCell()
{
DeleteGrids();
@ -77,7 +95,7 @@ public:
}
};
template<class vobj>
inline Lattice<vobj> Extract(Lattice<vobj> &in)
inline Lattice<vobj> Extract(const Lattice<vobj> &in) const
{
Lattice<vobj> out(unpadded_grid);
@ -88,19 +106,19 @@ public:
return out;
}
template<class vobj>
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
inline Lattice<vobj> Exchange(const Lattice<vobj> &in, const CshiftImplBase<vobj> &cshift = CshiftImplDefault<vobj>()) const
{
GridBase *old_grid = in.Grid();
int dims = old_grid->Nd();
Lattice<vobj> tmp = in;
for(int d=0;d<dims;d++){
tmp = Expand(d,tmp); // rvalue && assignment
tmp = Expand(d,tmp,cshift); // rvalue && assignment
}
return tmp;
}
// expand up one dim at a time
template<class vobj>
inline Lattice<vobj> Expand(int dim,Lattice<vobj> &in)
inline Lattice<vobj> Expand(int dim, const Lattice<vobj> &in, const CshiftImplBase<vobj> &cshift = CshiftImplDefault<vobj>()) const
{
GridBase *old_grid = in.Grid();
GridCartesian *new_grid = grids[dim];//These are new grids
@ -112,20 +130,40 @@ public:
else conformable(old_grid,grids[dim-1]);
std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl;
double tins=0, tshift=0;
// Middle bit
double t = usecond();
for(int x=0;x<local[dim];x++){
InsertSliceLocal(in,padded,x,depth+x,dim);
}
tins += usecond() - t;
// High bit
shifted = Cshift(in,dim,depth);
t = usecond();
shifted = cshift.Cshift(in,dim,depth);
tshift += usecond() - t;
t=usecond();
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim);
}
tins += usecond() - t;
// Low bit
shifted = Cshift(in,dim,-depth);
t = usecond();
shifted = cshift.Cshift(in,dim,-depth);
tshift += usecond() - t;
t = usecond();
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,x,x,dim);
}
tins += usecond() - t;
std::cout << GridLogPerformance << "PaddedCell::Expand timings: cshift:" << tshift/1000 << "ms, insert-slice:" << tins/1000 << "ms" << std::endl;
return padded;
}

View File

@ -179,11 +179,11 @@ extern GridLogger GridLogSolver;
extern GridLogger GridLogError;
extern GridLogger GridLogWarning;
extern GridLogger GridLogMessage;
extern GridLogger GridLogDebug ;
extern GridLogger GridLogDebug;
extern GridLogger GridLogPerformance;
extern GridLogger GridLogDslash;
extern GridLogger GridLogIterative ;
extern GridLogger GridLogIntegrator ;
extern GridLogger GridLogIterative;
extern GridLogger GridLogIntegrator;
extern GridLogger GridLogHMC;
extern GridLogger GridLogMemory;
extern GridLogger GridLogTracing;
@ -191,6 +191,41 @@ extern Colours GridLogColours;
std::string demangle(const char* name) ;
template<typename... Args>
inline std::string sjoin(Args&&... args) noexcept {
std::ostringstream msg;
(msg << ... << args);
return msg.str();
}
/*! @brief make log messages work like python print */
template <typename... Args>
inline void Grid_log(Args&&... args) {
std::string msg = sjoin(std::forward<Args>(args)...);
std::cout << GridLogMessage << msg << std::endl;
}
/*! @brief make warning messages work like python print */
template <typename... Args>
inline void Grid_warn(Args&&... args) {
std::string msg = sjoin(std::forward<Args>(args)...);
std::cout << "\033[33m" << GridLogWarning << msg << "\033[0m" << std::endl;
}
/*! @brief make error messages work like python print */
template <typename... Args>
inline void Grid_error(Args&&... args) {
std::string msg = sjoin(std::forward<Args>(args)...);
std::cout << "\033[31m" << GridLogError << msg << "\033[0m" << std::endl;
}
/*! @brief make pass messages work like python print */
template <typename... Args>
inline void Grid_pass(Args&&... args) {
std::string msg = sjoin(std::forward<Args>(args)...);
std::cout << "\033[32m" << GridLogMessage << msg << "\033[0m" << std::endl;
}
#define _NBACKTRACE (256)
extern void * Grid_backtrace_buffer[_NBACKTRACE];

View File

@ -34,7 +34,7 @@ class GridTracer {
};
inline void tracePush(const char *name) { roctxRangePushA(name); }
inline void tracePop(const char *name) { roctxRangePop(); }
inline int traceStart(const char *name) { roctxRangeStart(name); }
inline int traceStart(const char *name) { return roctxRangeStart(name); }
inline void traceStop(int ID) { roctxRangeStop(ID); }
#endif

View File

@ -129,6 +129,22 @@ public:
virtual ~Action(){}
};
template <class GaugeField >
class EmptyAction : public Action <GaugeField>
{
virtual void refresh(const GaugeField& U, GridSerialRNG &sRNG, GridParallelRNG& pRNG) { assert(0);}; // refresh pseudofermions
virtual RealD S(const GaugeField& U) { return 0.0;}; // evaluate the action
virtual void deriv(const GaugeField& U, GaugeField& dSdU) { assert(0); }; // evaluate the action derivative
///////////////////////////////
// Logging
///////////////////////////////
virtual std::string action_name() { return std::string("Level Force Log"); };
virtual std::string LogParameters() { return std::string("No parameters");};
};
NAMESPACE_END(Grid);
#endif // ACTION_BASE_H

View File

@ -126,6 +126,16 @@ typedef WilsonFermion<WilsonTwoIndexSymmetricImplD> WilsonTwoIndexSymmetricFermi
typedef WilsonFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonTwoIndexAntiSymmetricFermionF;
typedef WilsonFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonTwoIndexAntiSymmetricFermionD;
// Sp(2n)
typedef WilsonFermion<SpWilsonImplF> SpWilsonFermionF;
typedef WilsonFermion<SpWilsonImplD> SpWilsonFermionD;
typedef WilsonFermion<SpWilsonTwoIndexAntiSymmetricImplF> SpWilsonTwoIndexAntiSymmetricFermionF;
typedef WilsonFermion<SpWilsonTwoIndexAntiSymmetricImplD> SpWilsonTwoIndexAntiSymmetricFermionD;
typedef WilsonFermion<SpWilsonTwoIndexSymmetricImplF> SpWilsonTwoIndexSymmetricFermionF;
typedef WilsonFermion<SpWilsonTwoIndexSymmetricImplD> SpWilsonTwoIndexSymmetricFermionD;
// Twisted mass fermion
typedef WilsonTMFermion<WilsonImplD2> WilsonTMFermionD2;
typedef WilsonTMFermion<WilsonImplF> WilsonTMFermionF;

View File

@ -261,6 +261,22 @@ typedef WilsonImpl<vComplex, TwoIndexAntiSymmetricRepresentation, CoeffReal > W
typedef WilsonImpl<vComplexF, TwoIndexAntiSymmetricRepresentation, CoeffReal > WilsonTwoIndexAntiSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, TwoIndexAntiSymmetricRepresentation, CoeffReal > WilsonTwoIndexAntiSymmetricImplD; // Double
//sp 2n
typedef WilsonImpl<vComplex, SpFundamentalRepresentation, CoeffReal > SpWilsonImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpFundamentalRepresentation, CoeffReal > SpWilsonImplF; // Float
typedef WilsonImpl<vComplexD, SpFundamentalRepresentation, CoeffReal > SpWilsonImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplR; // Real.. whichever prec // adj = 2indx symmetric for Sp(2N)
typedef WilsonImpl<vComplexF, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplF; // Float // adj = 2indx symmetric for Sp(2N)
typedef WilsonImpl<vComplexD, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplD; // Double // adj = 2indx symmetric for Sp(2N)
NAMESPACE_END(Grid);

View File

@ -63,7 +63,9 @@ public:
virtual void MooeeDag(const FermionField &in, FermionField &out) ;
virtual void MooeeInv(const FermionField &in, FermionField &out) ;
virtual void MooeeInvDag(const FermionField &in, FermionField &out) ;
virtual void M(const FermionField &in, FermionField &out) ;
virtual void Mdag(const FermionField &in, FermionField &out) ;
private:
RealD mu; // TwistedMass parameter

View File

@ -280,20 +280,16 @@ void StaggeredKernels<Impl>::DhopImproved(StencilImpl &st, LebesgueOrder &lo,
if( interior && exterior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGeneric,1); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHand,1); return;}
#ifndef GRID_CUDA
if (Opt == OptInlineAsm ) { ASM_CALL(DhopSiteAsm); return;}
#endif
} else if( interior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGenericInt,1); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHandInt,1); return;}
#endif
} else if( exterior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGenericExt,1); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHandExt,1); return;}
#endif
}
assert(0 && " Kernel optimisation case not covered ");
}
@ -322,19 +318,13 @@ void StaggeredKernels<Impl>::DhopNaive(StencilImpl &st, LebesgueOrder &lo,
if( interior && exterior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGeneric,0); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHand,0); return;}
#endif
} else if( interior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGenericInt,0); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHandInt,0); return;}
#endif
} else if( exterior ) {
if (Opt == OptGeneric ) { KERNEL_CALL(DhopSiteGenericExt,0); return;}
#ifndef GRID_CUDA
if (Opt == OptHandUnroll ) { KERNEL_CALL(DhopSiteHandExt,0); return;}
#endif
}
}

View File

@ -462,6 +462,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
autoView(st_v , st,AcceleratorRead);
if( interior && exterior ) {
acceleratorFenceComputeStream();
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSite); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSite); return;}
#ifndef GRID_CUDA
@ -495,6 +496,7 @@ void WilsonKernels<Impl>::DhopKernel(int Opt,StencilImpl &st, DoubledGaugeField
autoView(st_v ,st,AcceleratorRead);
if( interior && exterior ) {
acceleratorFenceComputeStream();
if (Opt == WilsonKernelsStatic::OptGeneric ) { KERNEL_CALL(GenericDhopSiteDag); return;}
if (Opt == WilsonKernelsStatic::OptHandUnroll ) { KERNEL_CALL(HandDhopSiteDag); return;}
#ifndef GRID_CUDA

View File

@ -93,5 +93,25 @@ void WilsonTMFermion<Impl>::MooeeInvDag(const FermionField &in, FermionField &ou
RealD b = tm /sq;
axpibg5x(out,in,a,b);
}
template<class Impl>
void WilsonTMFermion<Impl>::M(const FermionField &in, FermionField &out) {
out.Checkerboard() = in.Checkerboard();
this->Dhop(in, out, DaggerNo);
FermionField tmp(out.Grid());
RealD a = 4.0+this->mass;
RealD b = this->mu;
axpibg5x(tmp,in,a,b);
axpy(out, 1.0, tmp, out);
}
template<class Impl>
void WilsonTMFermion<Impl>::Mdag(const FermionField &in, FermionField &out) {
out.Checkerboard() = in.Checkerboard();
this->Dhop(in, out, DaggerYes);
FermionField tmp(out.Grid());
RealD a = 4.0+this->mass;
RealD b = -this->mu;
axpibg5x(tmp,in,a,b);
axpy(out, 1.0, tmp, out);
}
NAMESPACE_END(Grid);

View File

@ -0,0 +1 @@
../WilsonCloverFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonTMFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonImplD

View File

@ -0,0 +1 @@
../WilsonCloverFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonTMFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonImplF

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexAntiSymmetricImplD

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexAntiSymmetricImplF

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexSymmetricImplD

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexSymmetricImplF

View File

@ -10,12 +10,18 @@ WILSON_IMPL_LIST=" \
WilsonImplF \
WilsonImplD \
WilsonImplD2 \
SpWilsonImplF \
SpWilsonImplD \
WilsonAdjImplF \
WilsonAdjImplD \
WilsonTwoIndexSymmetricImplF \
WilsonTwoIndexSymmetricImplD \
WilsonTwoIndexAntiSymmetricImplF \
WilsonTwoIndexAntiSymmetricImplD \
SpWilsonTwoIndexAntiSymmetricImplF \
SpWilsonTwoIndexAntiSymmetricImplD \
SpWilsonTwoIndexSymmetricImplF \
SpWilsonTwoIndexSymmetricImplD \
GparityWilsonImplF \
GparityWilsonImplD "

View File

@ -39,6 +39,9 @@ NAMESPACE_BEGIN(Grid);
typedef WilsonGaugeAction<PeriodicGimplR> WilsonGaugeActionR;
typedef WilsonGaugeAction<PeriodicGimplF> WilsonGaugeActionF;
typedef WilsonGaugeAction<PeriodicGimplD> WilsonGaugeActionD;
typedef WilsonGaugeAction<SpPeriodicGimplR> SpWilsonGaugeActionR;
typedef WilsonGaugeAction<SpPeriodicGimplF> SpWilsonGaugeActionF;
typedef WilsonGaugeAction<SpPeriodicGimplD> SpWilsonGaugeActionD;
typedef PlaqPlusRectangleAction<PeriodicGimplR> PlaqPlusRectangleActionR;
typedef PlaqPlusRectangleAction<PeriodicGimplF> PlaqPlusRectangleActionF;
typedef PlaqPlusRectangleAction<PeriodicGimplD> PlaqPlusRectangleActionD;

View File

@ -61,7 +61,7 @@ NAMESPACE_BEGIN(Grid);
typedef typename Impl::Field Field;
// hardcodes the exponential approximation in the template
template <class S, int Nrepresentation = Nc, int Nexp = 12 > class GaugeImplTypes {
template <class S, int Nrepresentation = Nc, int Nexp = 12, class Group = SU<Nc> > class GaugeImplTypes {
public:
typedef S Simd;
typedef typename Simd::scalar_type scalar_type;
@ -78,8 +78,6 @@ public:
typedef Lattice<SiteLink> LinkField;
typedef Lattice<SiteField> Field;
typedef SU<Nrepresentation> Group;
// Guido: we can probably separate the types from the HMC functions
// this will create 2 kind of implementations
// probably confusing the users
@ -119,6 +117,7 @@ public:
//
LinkField Pmu(P.Grid());
Pmu = Zero();
for (int mu = 0; mu < Nd; mu++) {
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
RealD scale = ::sqrt(HMC_MOMENTUM_DENOMINATOR) ;
@ -126,8 +125,12 @@ public:
PokeIndex<LorentzIndex>(P, Pmu, mu);
}
}
static inline Field projectForce(Field &P) { return Ta(P); }
static inline Field projectForce(Field &P) {
Field ret(P.Grid());
Group::taProj(P, ret);
return ret;
}
static inline void update_field(Field& P, Field& U, double ep){
//static std::chrono::duration<double> diff;
@ -137,14 +140,15 @@ public:
autoView(P_v,P,AcceleratorRead);
accelerator_for(ss, P.Grid()->oSites(),1,{
for (int mu = 0; mu < Nd; mu++) {
U_v[ss](mu) = ProjectOnGroup(Exponentiate(P_v[ss](mu), ep, Nexp) * U_v[ss](mu));
U_v[ss](mu) = Exponentiate(P_v[ss](mu), ep, Nexp) * U_v[ss](mu);
U_v[ss](mu) = Group::ProjectOnGeneralGroup(U_v[ss](mu));
}
});
//auto end = std::chrono::high_resolution_clock::now();
// diff += end - start;
// std::cout << "Time to exponentiate matrix " << diff.count() << " s\n";
}
static inline RealD FieldSquareNorm(Field& U){
LatticeComplex Hloc(U.Grid());
Hloc = Zero();
@ -157,7 +161,7 @@ public:
}
static inline void Project(Field &U) {
ProjectSUn(U);
Group::ProjectOnSpecialGroup(U);
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
@ -171,6 +175,7 @@ public:
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::ColdConfiguration(pRNG, U);
}
};
@ -178,10 +183,17 @@ typedef GaugeImplTypes<vComplex, Nc> GimplTypesR;
typedef GaugeImplTypes<vComplexF, Nc> GimplTypesF;
typedef GaugeImplTypes<vComplexD, Nc> GimplTypesD;
typedef GaugeImplTypes<vComplex, Nc, 12, Sp<Nc> > SpGimplTypesR;
typedef GaugeImplTypes<vComplexF, Nc, 12, Sp<Nc> > SpGimplTypesF;
typedef GaugeImplTypes<vComplexD, Nc, 12, Sp<Nc> > SpGimplTypesD;
typedef GaugeImplTypes<vComplex, SU<Nc>::AdjointDimension> GimplAdjointTypesR;
typedef GaugeImplTypes<vComplexF, SU<Nc>::AdjointDimension> GimplAdjointTypesF;
typedef GaugeImplTypes<vComplexD, SU<Nc>::AdjointDimension> GimplAdjointTypesD;
NAMESPACE_END(Grid);
#endif // GRID_GAUGE_IMPL_TYPES_H

View File

@ -176,7 +176,7 @@ public:
return PeriodicBC::CshiftLink(Link,mu,shift);
}
static inline void setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
static inline void setDirections(const std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
static inline std::vector<int> getDirections(void) { return _conjDirs; }
static inline bool isPeriodicGaugeField(void) { return false; }
};
@ -193,6 +193,11 @@ typedef ConjugateGaugeImpl<GimplTypesR> ConjugateGimplR; // Real.. whichever pre
typedef ConjugateGaugeImpl<GimplTypesF> ConjugateGimplF; // Float
typedef ConjugateGaugeImpl<GimplTypesD> ConjugateGimplD; // Double
typedef PeriodicGaugeImpl<SpGimplTypesR> SpPeriodicGimplR; // Real.. whichever prec
typedef PeriodicGaugeImpl<SpGimplTypesF> SpPeriodicGimplF; // Float
typedef PeriodicGaugeImpl<SpGimplTypesD> SpPeriodicGimplD; // Double
NAMESPACE_END(Grid);
#endif

View File

@ -43,7 +43,7 @@ public:
private:
RealD c_plaq;
RealD c_rect;
typename WilsonLoops<Gimpl>::StapleAndRectStapleAllWorkspace workspace;
public:
PlaqPlusRectangleAction(RealD b,RealD c): c_plaq(b),c_rect(c){};
@ -79,27 +79,18 @@ public:
GridBase *grid = Umu.Grid();
std::vector<GaugeLinkField> U (Nd,grid);
std::vector<GaugeLinkField> U2(Nd,grid);
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
WilsonLoops<Gimpl>::RectStapleDouble(U2[mu],U[mu],mu);
}
std::vector<GaugeLinkField> RectStaple(Nd,grid), Staple(Nd,grid);
WilsonLoops<Gimpl>::StapleAndRectStapleAll(Staple, RectStaple, U, workspace);
GaugeLinkField dSdU_mu(grid);
GaugeLinkField staple(grid);
for (int mu=0; mu < Nd; mu++){
// Staple in direction mu
WilsonLoops<Gimpl>::Staple(staple,Umu,mu);
dSdU_mu = Ta(U[mu]*staple)*factor_p;
WilsonLoops<Gimpl>::RectStaple(Umu,staple,U2,U,mu);
dSdU_mu = dSdU_mu + Ta(U[mu]*staple)*factor_r;
dSdU_mu = Ta(U[mu]*Staple[mu])*factor_p;
dSdU_mu = dSdU_mu + Ta(U[mu]*RectStaple[mu])*factor_r;
PokeIndex<LorentzIndex>(dSdU, dSdU_mu, mu);
}

View File

@ -225,6 +225,18 @@ template <class RepresentationsPolicy,
using GenericHMCRunnerHirep =
HMCWrapperTemplate<PeriodicGimplR, Integrator, RepresentationsPolicy>;
// sp2n
template <template <typename, typename, typename> class Integrator>
using GenericSpHMCRunner = HMCWrapperTemplate<SpPeriodicGimplR, Integrator>;
template <class RepresentationsPolicy,
template <typename, typename, typename> class Integrator>
using GenericSpHMCRunnerHirep =
HMCWrapperTemplate<SpPeriodicGimplR, Integrator, RepresentationsPolicy>;
template <class Implementation, class RepresentationsPolicy,
template <typename, typename, typename> class Integrator>
using GenericHMCRunnerTemplate = HMCWrapperTemplate<Implementation, Integrator, RepresentationsPolicy>;

View File

@ -87,6 +87,8 @@ public:
const ActionSet<Field, RepresentationPolicy> as;
ActionSet<Field,RepresentationPolicy> LevelForces;
//Get a pointer to a shared static instance of the "do-nothing" momentum filter to serve as a default
static MomentumFilterBase<MomentaField> const* getDefaultMomFilter(){
static MomentumFilterNone<MomentaField> filter;
@ -124,6 +126,9 @@ public:
// input U actually not used in the fundamental case
// Fundamental updates, include smearing
assert(as.size()==LevelForces.size());
Field level_force(U.Grid()); level_force =Zero();
for (int a = 0; a < as[level].actions.size(); ++a) {
double start_full = usecond();
@ -144,7 +149,10 @@ public:
MomFilter->applyFilter(force);
std::cout << GridLogIntegrator << " update_P : Level [" << level <<"]["<<a <<"] "<<name<<" dt "<<ep<< std::endl;
// track the total
level_force = level_force+force;
Real force_abs = std::sqrt(norm2(force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
@ -167,6 +175,16 @@ public:
}
{
// total force
Real force_abs = std::sqrt(norm2(level_force)/U.Grid()->gSites()); //average per-site norm. nb. norm2(latt) = \sum_x norm2(latt[x])
Real impulse_abs = force_abs * ep * HMC_MOMENTUM_DENOMINATOR;
Real force_max = std::sqrt(maxLocalNorm2(level_force));
Real impulse_max = force_max * ep * HMC_MOMENTUM_DENOMINATOR;
LevelForces[level].actions.at(0)->deriv_log(force_abs,force_max,impulse_abs,impulse_max);
}
// Force from the other representations
as[level].apply(update_P_hireps, Representations, Mom, U, ep);
@ -216,6 +234,16 @@ public:
//Default the momentum filter to "do-nothing"
MomFilter = getDefaultMomFilter();
for (int level = 0; level < as.size(); ++level) {
int multiplier = as.at(level).multiplier;
ActionLevel<Field, RepresentationPolicy> * Level = new ActionLevel<Field, RepresentationPolicy>(multiplier);
Level->push_back(new EmptyAction<Field>);
LevelForces.push_back(*Level);
// does it copy by value or reference??
// - answer it copies by value, BUT the action level contains a reference that is NOT updated.
// Unsafe code in Guido's area
}
};
virtual ~Integrator() {}
@ -233,10 +261,14 @@ public:
void reset_timer(void)
{
assert(as.size()==LevelForces.size());
for (int level = 0; level < as.size(); ++level) {
for (int actionID = 0; actionID < as[level].actions.size(); ++actionID) {
as[level].actions.at(actionID)->reset_timer();
}
int actionID=0;
assert(LevelForces.at(level).actions.size()==1);
LevelForces.at(level).actions.at(actionID)->reset_timer();
}
}
void print_timer(void)
@ -298,6 +330,16 @@ public:
<<" calls " << as[level].actions.at(actionID)->deriv_num
<< std::endl;
}
int actionID=0;
std::cout << GridLogMessage
<< LevelForces[level].actions.at(actionID)->action_name()
<<"["<<level<<"]["<< actionID<<"] :\n\t\t "
<<" force max " << LevelForces[level].actions.at(actionID)->deriv_max_average()
<<" norm " << LevelForces[level].actions.at(actionID)->deriv_norm_average()
<<" Fdt max " << LevelForces[level].actions.at(actionID)->Fdt_max_average()
<<" Fdt norm " << LevelForces[level].actions.at(actionID)->Fdt_norm_average()
<<" calls " << LevelForces[level].actions.at(actionID)->deriv_num
<< std::endl;
}
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
}
@ -319,6 +361,13 @@ public:
std::cout << as[level].actions.at(actionID)->LogParameters();
}
}
std::cout << " [Integrator] Total Force loggers: "<< LevelForces.size() <<std::endl;
for (int level = 0; level < LevelForces.size(); ++level) {
std::cout << GridLogMessage << "[Integrator] ---- Level: "<< level << std::endl;
for (int actionID = 0; actionID < LevelForces[level].actions.size(); ++actionID) {
std::cout << GridLogMessage << "["<< LevelForces[level].actions.at(actionID)->action_name() << "] ID: " << actionID << std::endl;
}
}
std::cout << GridLogMessage << ":::::::::::::::::::::::::::::::::::::::::"<< std::endl;
}
@ -400,6 +449,7 @@ public:
RealD S(Field& U)
{ // here also U not used
assert(as.size()==LevelForces.size());
std::cout << GridLogIntegrator << "Integrator action\n";
RealD H = - FieldImplementation::FieldSquareNorm(P)/HMC_MOMENTUM_DENOMINATOR; // - trace (P*P)/denom

View File

@ -13,7 +13,7 @@ NAMESPACE_BEGIN(Grid);
* Empty since HMC updates already the fundamental representation
*/
template <int ncolour>
template <int ncolour, class group_name>
class FundamentalRep {
public:
static const int Dimension = ncolour;
@ -21,7 +21,7 @@ public:
// typdef to be used by the Representations class in HMC to get the
// types for the higher representation fields
typedef typename SU<ncolour>::LatticeMatrix LatticeMatrix;
typedef typename GaugeGroup<ncolour,group_name>::LatticeMatrix LatticeMatrix;
typedef LatticeGaugeField LatticeField;
explicit FundamentalRep(GridBase* grid) {} //do nothing
@ -45,7 +45,8 @@ public:
typedef FundamentalRep<Nc> FundamentalRepresentation;
typedef FundamentalRep<Nc,GroupName::SU> FundamentalRepresentation;
typedef FundamentalRep<Nc,GroupName::Sp> SpFundamentalRepresentation;
NAMESPACE_END(Grid);

View File

@ -20,14 +20,14 @@ NAMESPACE_BEGIN(Grid);
* in the SUnTwoIndex.h file
*/
template <int ncolour, TwoIndexSymmetry S>
template <int ncolour, TwoIndexSymmetry S, class group_name = GroupName::SU>
class TwoIndexRep {
public:
// typdef to be used by the Representations class in HMC to get the
// types for the higher representation fields
typedef typename SU_TwoIndex<ncolour, S>::LatticeTwoIndexMatrix LatticeMatrix;
typedef typename SU_TwoIndex<ncolour, S>::LatticeTwoIndexField LatticeField;
static const int Dimension = ncolour * (ncolour + S) / 2;
typedef typename GaugeGroupTwoIndex<ncolour, S, group_name>::LatticeTwoIndexMatrix LatticeMatrix;
typedef typename GaugeGroupTwoIndex<ncolour, S, group_name>::LatticeTwoIndexField LatticeField;
static const int Dimension = GaugeGroupTwoIndex<ncolour,S,group_name>::Dimension;
static const bool isFundamental = false;
LatticeField U;
@ -43,10 +43,10 @@ public:
U = Zero();
LatticeColourMatrix tmp(Uin.Grid());
Vector<typename SU<ncolour>::Matrix> eij(Dimension);
Vector<typename GaugeGroup<ncolour,group_name>::Matrix> eij(Dimension);
for (int a = 0; a < Dimension; a++)
SU_TwoIndex<ncolour, S>::base(a, eij[a]);
GaugeGroupTwoIndex<ncolour, S, group_name>::base(a, eij[a]);
for (int mu = 0; mu < Nd; mu++) {
auto Uin_mu = peekLorentz(Uin, mu);
@ -71,7 +71,7 @@ public:
out_mu = Zero();
typename SU<ncolour>::LatticeAlgebraVector h(in.Grid());
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector h(in.Grid());
projectOnAlgebra(h, in_mu, double(Nc + 2 * S)); // factor T(r)/T(fund)
FundamentalLieAlgebraMatrix(h, out_mu); // apply scale only once
pokeLorentz(out, out_mu, mu);
@ -80,20 +80,23 @@ public:
}
private:
void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out,
void projectOnAlgebra(typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeMatrix &in, Real scale = 1.0) const {
SU_TwoIndex<ncolour, S>::projectOnAlgebra(h_out, in, scale);
GaugeGroupTwoIndex<ncolour, S,group_name>::projectOnAlgebra(h_out, in, scale);
}
void FundamentalLieAlgebraMatrix(
typename SU<ncolour>::LatticeAlgebraVector &h,
typename SU<ncolour>::LatticeMatrix &out, Real scale = 1.0) const {
SU<ncolour>::FundamentalLieAlgebraMatrix(h, out, scale);
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h,
typename GaugeGroup<ncolour, group_name>::LatticeMatrix &out, Real scale = 1.0) const {
GaugeGroup<ncolour,group_name>::FundamentalLieAlgebraMatrix(h, out, scale);
}
};
typedef TwoIndexRep<Nc, Symmetric> TwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric> TwoIndexAntiSymmetricRepresentation;
typedef TwoIndexRep<Nc, Symmetric, GroupName::SU> TwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric, GroupName::SU> TwoIndexAntiSymmetricRepresentation;
typedef TwoIndexRep<Nc, Symmetric, GroupName::Sp> SpTwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric, GroupName::Sp> SpTwoIndexAntiSymmetricRepresentation;
NAMESPACE_END(Grid);

View File

@ -1,3 +1,4 @@
/*!
@file GaugeConfiguration.h
@brief Declares the GaugeConfiguration class
@ -6,6 +7,15 @@
NAMESPACE_BEGIN(Grid);
template<class T> void Dump(const Lattice<T> & lat,
std::string s,
Coordinate site = Coordinate({0,0,0,0}))
{
typename T::scalar_object tmp;
peekSite(tmp,lat,site);
std::cout << " Dump "<<s<<" "<<tmp<<std::endl;
}
/*!
@brief Smeared configuration masked container
Modified for a multi-subset smearing (aka Luscher Flowed HMC)
@ -28,6 +38,101 @@ private:
typedef typename SU3Adjoint::LatticeAdjMatrix AdjMatrixField;
typedef typename SU3Adjoint::LatticeAdjVector AdjVectorField;
void BaseSmearDerivative(GaugeField& SigmaTerm,
const GaugeField& iLambda,
const GaugeField& U,
int mmu, RealD rho)
{
// Reference
// Morningstar, Peardon, Phys.Rev.D69,054501(2004)
// Equation 75
// Computing Sigma_mu, derivative of S[fat links] with respect to the thin links
// Output SigmaTerm
GridBase *grid = U.Grid();
WilsonLoops<Gimpl> WL;
GaugeLinkField staple(grid), u_tmp(grid);
GaugeLinkField iLambda_mu(grid), iLambda_nu(grid);
GaugeLinkField U_mu(grid), U_nu(grid);
GaugeLinkField sh_field(grid), temp_Sigma(grid);
Real rho_munu, rho_numu;
rho_munu = rho;
rho_numu = rho;
for(int mu = 0; mu < Nd; ++mu){
U_mu = peekLorentz( U, mu);
iLambda_mu = peekLorentz(iLambda, mu);
for(int nu = 0; nu < Nd; ++nu){
if(nu==mu) continue;
U_nu = peekLorentz( U, nu);
// Nd(nd-1) = 12 staples normally.
// We must compute 6 of these
// in FTHMC case
if ( (mu==mmu)||(nu==mmu) )
WL.StapleUpper(staple, U, mu, nu);
if(nu==mmu) {
iLambda_nu = peekLorentz(iLambda, nu);
temp_Sigma = -rho_numu*staple*iLambda_nu; //ok
//-r_numu*U_nu(x+mu)*Udag_mu(x+nu)*Udag_nu(x)*Lambda_nu(x)
Gimpl::AddLink(SigmaTerm, temp_Sigma, mu);
sh_field = Cshift(iLambda_nu, mu, 1);// general also for Gparity?
temp_Sigma = rho_numu*sh_field*staple; //ok
//r_numu*Lambda_nu(mu)*U_nu(x+mu)*Udag_mu(x+nu)*Udag_nu(x)
Gimpl::AddLink(SigmaTerm, temp_Sigma, mu);
}
if ( mu == mmu ) {
sh_field = Cshift(iLambda_mu, nu, 1);
temp_Sigma = -rho_munu*staple*U_nu*sh_field*adj(U_nu); //ok
//-r_munu*U_nu(x+mu)*Udag_mu(x+nu)*Lambda_mu(x+nu)*Udag_nu(x)
Gimpl::AddLink(SigmaTerm, temp_Sigma, mu);
}
// staple = Zero();
sh_field = Cshift(U_nu, mu, 1);
temp_Sigma = Zero();
if ( mu == mmu )
temp_Sigma = -rho_munu*adj(sh_field)*adj(U_mu)*iLambda_mu*U_nu;
if ( nu == mmu ) {
temp_Sigma += rho_numu*adj(sh_field)*adj(U_mu)*iLambda_nu*U_nu;
u_tmp = adj(U_nu)*iLambda_nu;
sh_field = Cshift(u_tmp, mu, 1);
temp_Sigma += -rho_numu*sh_field*adj(U_mu)*U_nu;
}
sh_field = Cshift(temp_Sigma, nu, -1);
Gimpl::AddLink(SigmaTerm, sh_field, mu);
}
}
}
void BaseSmear(GaugeLinkField& Cup, const GaugeField& U,int mu,RealD rho) {
GridBase *grid = U.Grid();
GaugeLinkField tmp_stpl(grid);
WilsonLoops<Gimpl> WL;
Cup = Zero();
for(int nu=0; nu<Nd; ++nu){
if (nu != mu) {
// get the staple in direction mu, nu
WL.Staple(tmp_stpl, U, mu, nu); //nb staple conventions of IroIro and Grid differ by a dagger
Cup += adj(tmp_stpl*rho);
}
}
}
// Adjoint vector to GaugeField force
void InsertForce(GaugeField &Fdet,AdjVectorField &Fdet_nu,int nu)
{
@ -47,27 +152,54 @@ private:
GaugeLinkField UtaU(PlaqL.Grid());
GaugeLinkField D(PlaqL.Grid());
AdjMatrixField Dbc(PlaqL.Grid());
AdjMatrixField Dbc_opt(PlaqL.Grid());
LatticeComplex tmp(PlaqL.Grid());
const int Ngen = SU3Adjoint::Dimension;
Complex ci(0,1);
ColourMatrix ta,tb,tc;
RealD t=0;
RealD tp=0;
RealD tta=0;
RealD tpk=0;
t-=usecond();
for(int a=0;a<Ngen;a++) {
tta-=usecond();
SU3::generator(a, ta);
ta = 2.0 * ci * ta;
// Qlat Tb = 2i Tb^Grid
UtaU= 2.0*ci*adj(PlaqL)*ta*PlaqR;
UtaU= adj(PlaqL)*ta*PlaqR; // 6ms
tta+=usecond();
////////////////////////////////////////////
// Could add this entire C-loop to a projection routine
// for performance. Could also pick checkerboard on UtaU
// and set checkerboard on result for 2x perf
////////////////////////////////////////////
for(int c=0;c<Ngen;c++) {
SU3::generator(c, tc);
D = Ta( (2.0)*ci*tc *UtaU);
tc = 2.0*ci*tc;
tp-=usecond();
D = Ta( tc *UtaU); // 2ms
#if 1
SU3::LieAlgebraProject(Dbc_opt,D,c); // 5.5ms
#else
for(int b=0;b<Ngen;b++){
SU3::generator(b, tb);
tmp =-trace(ci*tb*D);
PokeIndex<ColourIndex>(Dbc,tmp,b,c); // Adjoint rep
}
#endif
tp+=usecond();
}
tmp = trace(MpInvJx * Dbc);
// Dump(Dbc_opt,"Dbc_opt");
// Dump(Dbc,"Dbc");
tpk-=usecond();
tmp = trace(MpInvJx * Dbc_opt);
PokeIndex<ColourIndex>(Fdet2,tmp,a);
tpk+=usecond();
}
t+=usecond();
std::cout << GridLogPerformance << " Compute_MpInvJx_dNxxdSy " << t/1e3 << " ms proj "<<tp/1e3<< " ms"
<< " ta "<<tta/1e3<<" ms" << " poke "<<tpk/1e3<< " ms"<<std::endl;
}
void ComputeNxy(const GaugeLinkField &PlaqL,const GaugeLinkField &PlaqR,AdjMatrixField &NxAd)
@ -79,12 +211,17 @@ private:
ColourMatrix tc;
for(int b=0;b<Ngen;b++) {
SU3::generator(b, tb);
Nx = (2.0)*Ta( adj(PlaqL)*ci*tb * PlaqR );
tb = 2.0 * ci * tb;
Nx = Ta( adj(PlaqL)*tb * PlaqR );
#if 1
SU3::LieAlgebraProject(NxAd,Nx,b);
#else
for(int c=0;c<Ngen;c++) {
SU3::generator(c, tc);
auto tmp =closure( -trace(ci*tc*Nx));
PokeIndex<ColourIndex>(NxAd,tmp,c,b);
}
#endif
}
}
void ApplyMask(GaugeField &U,int smr)
@ -164,8 +301,7 @@ public:
// Computes ALL the staples -- could compute one only and do it here
RealD time;
time=-usecond();
this->StoutSmearing->BaseSmear(C, U);
Cmu = peekLorentz(C, mu);
BaseSmear(Cmu, U,mu,rho);
//////////////////////////////////////////////////////////////////
// Assemble Luscher exp diff map J matrix
@ -209,6 +345,36 @@ public:
// dJ(x)/dxe
//////////////////////////////////////
time=-usecond();
#if 1
std::vector<AdjMatrixField> dJdX; dJdX.resize(8,grid);
std::vector<AdjMatrix> TRb_s; TRb_s.resize(8);
AdjMatrixField tbXn(grid);
AdjMatrixField sumXtbX(grid);
AdjMatrixField t2(grid);
AdjMatrixField dt2(grid);
AdjMatrixField t3(grid);
AdjMatrixField dt3(grid);
AdjMatrixField aunit(grid);
for(int b=0;b<8;b++){
SU3Adjoint::generator(b, TRb_s[b]);
dJdX[b] = TRb_s[b];
}
aunit = ComplexD(1.0);
// Could put into an accelerator_for
X = (-1.0)*ZxAd;
t2 = X;
for (int j = 12; j > 1; --j) {
t3 = t2*(1.0 / (j + 1)) + aunit;
t2 = X * t3;
for(int b=0;b<8;b++){
dJdX[b]= TRb_s[b] * t3 + X * dJdX[b]*(1.0 / (j + 1));
}
}
for(int b=0;b<8;b++){
dJdX[b] = -dJdX[b];
}
#else
std::vector<AdjMatrixField> dJdX; dJdX.resize(8,grid);
AdjMatrixField tbXn(grid);
AdjMatrixField sumXtbX(grid);
@ -224,14 +390,15 @@ public:
X = (-1.0)*ZxAd;
t2 = X;
dt2 = TRb;
for (int j = 20; j > 1; --j) {
t3 = t2*(1.0 / (j + 1)) + aunit;
for (int j = 12; j > 1; --j) {
t3 = t2*(1.0 / (j + 1)) + aunit;
dt3 = dt2*(1.0 / (j + 1));
t2 = X * t3;
dt2 = TRb * t3 + X * dt3;
}
dJdX[b] = -dt2;
}
#endif
time+=usecond();
std::cout << GridLogMessage << "dJx took "<<time<< " us"<<std::endl;
/////////////////////////////////////////////////////////////////
@ -281,8 +448,8 @@ public:
for(int e =0 ; e<8 ; e++){
LatticeComplexD tr(grid);
ColourMatrix te;
SU3::generator(e, te);
// ColourMatrix te;
// SU3::generator(e, te);
tr = trace(dJdX[e] * nMpInv);
pokeColour(dJdXe_nMpInv,tr,e);
}
@ -493,20 +660,25 @@ public:
//////////////////////////////////////////////////////////////////
// Assemble the N matrix
//////////////////////////////////////////////////////////////////
// Computes ALL the staples -- could compute one only here
this->StoutSmearing->BaseSmear(C, U);
Cmu = peekLorentz(C, mu);
double rho=this->StoutSmearing->SmearRho[1];
BaseSmear(Cmu, U,mu,rho);
Umu = peekLorentz(U, mu);
Complex ci(0,1);
for(int b=0;b<Ngen;b++) {
SU3::generator(b, Tb);
// Qlat Tb = 2i Tb^Grid
Nb = (2.0)*Ta( ci*Tb * Umu * adj(Cmu));
// FIXME -- replace this with LieAlgebraProject
#if 0
SU3::LieAlgebraProject(Ncb,tmp,b);
#else
for(int c=0;c<Ngen;c++) {
SU3::generator(c, Tc);
auto tmp = -trace(ci*Tc*Nb); // Luchang's norm: (2Tc) (2Td) N^db = -2 delta cd N^db // - was important
PokeIndex<ColourIndex>(Ncb,tmp,c,b);
}
#endif
}
//////////////////////////////////////////////////////////////////
@ -693,15 +865,19 @@ private:
const GaugeField& GaugeK,int level)
{
GridBase* grid = GaugeK.Grid();
GaugeField C(grid), SigmaK(grid), iLambda(grid);
GaugeField SigmaK(grid), iLambda(grid);
GaugeField SigmaKPrimeA(grid);
GaugeField SigmaKPrimeB(grid);
GaugeLinkField iLambda_mu(grid);
GaugeLinkField iQ(grid), e_iQ(grid);
GaugeLinkField SigmaKPrime_mu(grid);
GaugeLinkField GaugeKmu(grid), Cmu(grid);
this->StoutSmearing->BaseSmear(C, GaugeK);
int mmu= (level/2) %Nd;
int cb= (level%2);
double rho=this->StoutSmearing->SmearRho[1];
// Can override this to do one direction only.
SigmaK = Zero();
iLambda = Zero();
@ -712,18 +888,38 @@ private:
// Could get away with computing only one polarisation here
// int mu= (smr/2) %Nd;
// SigmaKprime_A has only one component
for (int mu = 0; mu < Nd; mu++)
#if 0
BaseSmear(Cmu, GaugeK,mu,rho);
GaugeKmu = peekLorentz(GaugeK, mu);
SigmaKPrime_mu = peekLorentz(SigmaKPrimeA, mu);
iQ = Ta(Cmu * adj(GaugeKmu));
this->set_iLambda(iLambda_mu, e_iQ, iQ, SigmaKPrime_mu, GaugeKmu);
pokeLorentz(SigmaK, SigmaKPrime_mu * e_iQ + adj(Cmu) * iLambda_mu, mu);
pokeLorentz(iLambda, iLambda_mu, mu);
BaseSmearDerivative(SigmaK, iLambda,GaugeK,mu,rho); // derivative of SmearBase
#else
// GaugeField C(grid);
// this->StoutSmearing->BaseSmear(C, GaugeK);
// for (int mu = 0; mu < Nd; mu++)
int mu =mmu;
BaseSmear(Cmu, GaugeK,mu,rho);
{
Cmu = peekLorentz(C, mu);
// Cmu = peekLorentz(C, mu);
GaugeKmu = peekLorentz(GaugeK, mu);
SigmaKPrime_mu = peekLorentz(SigmaKPrimeA, mu);
iQ = Ta(Cmu * adj(GaugeKmu));
this->set_iLambda(iLambda_mu, e_iQ, iQ, SigmaKPrime_mu, GaugeKmu);
pokeLorentz(SigmaK, SigmaKPrime_mu * e_iQ + adj(Cmu) * iLambda_mu, mu);
pokeLorentz(iLambda, iLambda_mu, mu);
std::cout << " mu "<<mu<<" SigmaKPrime_mu"<<norm2(SigmaKPrime_mu)<< " iLambda_mu " <<norm2(iLambda_mu)<<std::endl;
}
this->StoutSmearing->derivative(SigmaK, iLambda,GaugeK); // derivative of SmearBase
// GaugeField SigmaKcopy(grid);
// SigmaKcopy = SigmaK;
BaseSmearDerivative(SigmaK, iLambda,GaugeK,mu,rho); // derivative of SmearBase
// this->StoutSmearing->derivative(SigmaK, iLambda,GaugeK); // derivative of SmearBase
// SigmaKcopy = SigmaKcopy - SigmaK;
// std::cout << " BaseSmearDerivative fast path error" <<norm2(SigmaKcopy)<<std::endl;
#endif
////////////////////////////////////////////////////////////////////////////////////
// propagate the rest of the force as identity map, just add back
////////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,389 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/smearing/HISQSmearing.h
Copyright (C) 2023
Author: D. A. Clarke <clarke.davida@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/*
@file HISQSmearing.h
@brief Declares classes related to HISQ smearing
*/
#pragma once
#include <Grid/Grid.h>
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
NAMESPACE_BEGIN(Grid);
// TODO: find a way to fold this into the stencil header. need to access grid to get
// Nd, since you don't want to inherit from QCD.h
/*! @brief append arbitrary shift path to shifts */
template<typename... Args>
void appendShift(std::vector<Coordinate>& shifts, int dir, Args... args) {
Coordinate shift(Nd,0);
generalShift(shift, dir, args...);
// push_back creates an element at the end of shifts and
// assigns the data in the argument to it.
shifts.push_back(shift);
}
/*! @brief figure out the stencil index from mu and nu */
accelerator_inline int stencilIndex(int mu, int nu) {
// Nshifts depends on how you built the stencil
int Nshifts = 6;
return Nshifts*nu + Nd*Nshifts*mu;
}
/*! @brief structure holding the link treatment */
struct SmearingParameters{
SmearingParameters(){}
Real c_1; // 1 link
Real c_naik; // Naik term
Real c_3; // 3 link
Real c_5; // 5 link
Real c_7; // 7 link
Real c_lp; // 5 link Lepage
SmearingParameters(Real c1, Real cnaik, Real c3, Real c5, Real c7, Real clp)
: c_1(c1),
c_naik(cnaik),
c_3(c3),
c_5(c5),
c_7(c7),
c_lp(clp){}
};
/*! @brief create fat links from link variables */
template<class Gimpl>
class Smear_HISQ : public Gimpl {
private:
GridCartesian* const _grid;
SmearingParameters _linkTreatment;
public:
INHERIT_GIMPL_TYPES(Gimpl);
typedef typename Gimpl::GaugeField GF;
typedef typename Gimpl::GaugeLinkField LF;
typedef typename Gimpl::ComplexField CF;
// Don't allow default values here.
Smear_HISQ(GridCartesian* grid, Real c1, Real cnaik, Real c3, Real c5, Real c7, Real clp)
: _grid(grid),
_linkTreatment(c1,cnaik,c3,c5,c7,clp) {
assert(Nc == 3 && "HISQ smearing currently implemented only for Nc==3");
assert(Nd == 4 && "HISQ smearing only defined for Nd==4");
}
// Allow to pass a pointer to a C-style, double array for MILC convenience
Smear_HISQ(GridCartesian* grid, double* coeff)
: _grid(grid),
_linkTreatment(coeff[0],coeff[1],coeff[2],coeff[3],coeff[4],coeff[5]) {
assert(Nc == 3 && "HISQ smearing currently implemented only for Nc==3");
assert(Nd == 4 && "HISQ smearing only defined for Nd==4");
}
~Smear_HISQ() {}
// Intent: OUT--u_smr, u_naik
// IN--u_thin
void smear(GF& u_smr, GF& u_naik, GF& u_thin) const {
SmearingParameters lt = this->_linkTreatment;
auto grid = this->_grid;
// Create a padded cell of extra padding depth=1 and fill the padding.
int depth = 1;
PaddedCell Ghost(depth,grid);
GF Ughost = Ghost.Exchange(u_thin);
// This is where auxiliary N-link fields and the final smear will be stored.
GF Ughost_fat(Ughost.Grid());
GF Ughost_3link(Ughost.Grid());
GF Ughost_5linkA(Ughost.Grid());
GF Ughost_5linkB(Ughost.Grid());
// mu-nu plane stencil. We allow mu==nu to make indexing the stencil easier,
// but these entries will not be used.
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++)
for(int nu=0;nu<Nd;nu++) {
appendShift(shifts,mu);
appendShift(shifts,nu);
appendShift(shifts,shiftSignal::NO_SHIFT);
appendShift(shifts,mu,Back(nu));
appendShift(shifts,Back(nu));
appendShift(shifts,Back(mu));
}
// A GeneralLocalStencil has two indices: a site and stencil index
GeneralLocalStencil gStencil(Ughost.Grid(),shifts);
// This is where contributions from the smearing get added together
Ughost_fat=Zero();
// This loop handles 3-, 5-, and 7-link constructs, minus Lepage and Naik.
for(int mu=0;mu<Nd;mu++) {
// TODO: This approach is slightly memory inefficient. It uses 25% extra memory
Ughost_3link =Zero();
Ughost_5linkA=Zero();
Ughost_5linkB=Zero();
// Create the accessors
autoView(U_v , Ughost , AcceleratorRead);
autoView(U_fat_v , Ughost_fat , AcceleratorWrite);
autoView(U_3link_v , Ughost_3link , AcceleratorWrite);
autoView(U_5linkA_v, Ughost_5linkA, AcceleratorWrite);
autoView(U_5linkB_v, Ughost_5linkB, AcceleratorWrite);
// We infer some types that will be needed in the calculation.
typedef decltype(gStencil.GetEntry(0,0)) stencilElement;
typedef decltype(coalescedReadGeneralPermute(U_v[0](0),gStencil.GetEntry(0,0)->_permute,Nd)) U3matrix;
int Nsites = U_v.size();
auto gStencil_v = gStencil.View();
accelerator_for(site,Nsites,Simd::Nsimd(),{ // ----------- 3-link constructs
stencilElement SE0, SE1, SE2, SE3, SE4, SE5;
U3matrix U0, U1, U2, U3, U4, U5, W;
for(int nu=0;nu<Nd;nu++) {
if(nu==mu) continue;
int s = stencilIndex(mu,nu);
// The stencil gives us support points in the mu-nu plane that we will use to
// grab the links we need.
SE0 = gStencil_v.GetEntry(s+0,site); int x_p_mu = SE0->_offset;
SE1 = gStencil_v.GetEntry(s+1,site); int x_p_nu = SE1->_offset;
SE2 = gStencil_v.GetEntry(s+2,site); int x = SE2->_offset;
SE3 = gStencil_v.GetEntry(s+3,site); int x_p_mu_m_nu = SE3->_offset;
SE4 = gStencil_v.GetEntry(s+4,site); int x_m_nu = SE4->_offset;
SE5 = gStencil_v.GetEntry(s+5,site); int x_m_mu = SE5->_offset;
// When you're deciding whether to take an adjoint, the question is: how is the
// stored link oriented compared to the one you want? If I imagine myself travelling
// with the to-be-updated link, I have two possible, alternative 3-link paths I can
// take, one starting by going to the left, the other starting by going to the right.
U0 = coalescedReadGeneralPermute(U_v[x_p_mu ](nu),SE0->_permute,Nd);
U1 = coalescedReadGeneralPermute(U_v[x_p_nu ](mu),SE1->_permute,Nd);
U2 = coalescedReadGeneralPermute(U_v[x ](nu),SE2->_permute,Nd);
U3 = coalescedReadGeneralPermute(U_v[x_p_mu_m_nu](nu),SE3->_permute,Nd);
U4 = coalescedReadGeneralPermute(U_v[x_m_nu ](mu),SE4->_permute,Nd);
U5 = coalescedReadGeneralPermute(U_v[x_m_nu ](nu),SE4->_permute,Nd);
// "left" "right"
W = U2*U1*adj(U0) + adj(U5)*U4*U3;
// Save 3-link construct for later and add to smeared field.
coalescedWrite(U_3link_v[x](nu), W);
// The index operator (x) returns the coalesced read on GPU. The view [] index returns
// a reference to the vector object. The [x](mu) returns a reference to the densely
// packed (contiguous in memory) mu-th element of the vector object. On CPU,
// coalescedRead/Write is the identity mapping assigning vector object to vector object.
// But on GPU it's non-trivial and maps scalar object to vector object and vice versa.
coalescedWrite(U_fat_v[x](mu), U_fat_v(x)(mu) + lt.c_3*W);
}
})
accelerator_for(site,Nsites,Simd::Nsimd(),{ // ----------- 5-link
stencilElement SE0, SE1, SE2, SE3, SE4, SE5;
U3matrix U0, U1, U2, U3, U4, U5, W;
int sigmaIndex = 0;
for(int nu=0;nu<Nd;nu++) {
if(nu==mu) continue;
int s = stencilIndex(mu,nu);
for(int rho=0;rho<Nd;rho++) {
if (rho == mu || rho == nu) continue;
SE0 = gStencil_v.GetEntry(s+0,site); int x_p_mu = SE0->_offset;
SE1 = gStencil_v.GetEntry(s+1,site); int x_p_nu = SE1->_offset;
SE2 = gStencil_v.GetEntry(s+2,site); int x = SE2->_offset;
SE3 = gStencil_v.GetEntry(s+3,site); int x_p_mu_m_nu = SE3->_offset;
SE4 = gStencil_v.GetEntry(s+4,site); int x_m_nu = SE4->_offset;
U0 = coalescedReadGeneralPermute( U_v[x_p_mu ](nu ),SE0->_permute,Nd);
U1 = coalescedReadGeneralPermute(U_3link_v[x_p_nu ](rho),SE1->_permute,Nd);
U2 = coalescedReadGeneralPermute( U_v[x ](nu ),SE2->_permute,Nd);
U3 = coalescedReadGeneralPermute( U_v[x_p_mu_m_nu](nu ),SE3->_permute,Nd);
U4 = coalescedReadGeneralPermute(U_3link_v[x_m_nu ](rho),SE4->_permute,Nd);
U5 = coalescedReadGeneralPermute( U_v[x_m_nu ](nu ),SE4->_permute,Nd);
W = U2*U1*adj(U0) + adj(U5)*U4*U3;
if(sigmaIndex<3) {
coalescedWrite(U_5linkA_v[x](rho), W);
} else {
coalescedWrite(U_5linkB_v[x](rho), W);
}
coalescedWrite(U_fat_v[x](mu), U_fat_v(x)(mu) + lt.c_5*W);
sigmaIndex++;
}
}
})
accelerator_for(site,Nsites,Simd::Nsimd(),{ // ----------- 7-link
stencilElement SE0, SE1, SE2, SE3, SE4, SE5;
U3matrix U0, U1, U2, U3, U4, U5, W;
int sigmaIndex = 0;
for(int nu=0;nu<Nd;nu++) {
if(nu==mu) continue;
int s = stencilIndex(mu,nu);
for(int rho=0;rho<Nd;rho++) {
if (rho == mu || rho == nu) continue;
SE0 = gStencil_v.GetEntry(s+0,site); int x_p_mu = SE0->_offset;
SE1 = gStencil_v.GetEntry(s+1,site); int x_p_nu = SE1->_offset;
SE2 = gStencil_v.GetEntry(s+2,site); int x = SE2->_offset;
SE3 = gStencil_v.GetEntry(s+3,site); int x_p_mu_m_nu = SE3->_offset;
SE4 = gStencil_v.GetEntry(s+4,site); int x_m_nu = SE4->_offset;
U0 = coalescedReadGeneralPermute(U_v[x_p_mu](nu),SE0->_permute,Nd);
if(sigmaIndex<3) {
U1 = coalescedReadGeneralPermute(U_5linkB_v[x_p_nu](rho),SE1->_permute,Nd);
} else {
U1 = coalescedReadGeneralPermute(U_5linkA_v[x_p_nu](rho),SE1->_permute,Nd);
}
U2 = coalescedReadGeneralPermute(U_v[x](nu),SE2->_permute,Nd);
U3 = coalescedReadGeneralPermute(U_v[x_p_mu_m_nu](nu),SE3->_permute,Nd);
if(sigmaIndex<3) {
U4 = coalescedReadGeneralPermute(U_5linkB_v[x_m_nu](rho),SE4->_permute,Nd);
} else {
U4 = coalescedReadGeneralPermute(U_5linkA_v[x_m_nu](rho),SE4->_permute,Nd);
}
U5 = coalescedReadGeneralPermute(U_v[x_m_nu](nu),SE4->_permute,Nd);
W = U2*U1*adj(U0) + adj(U5)*U4*U3;
coalescedWrite(U_fat_v[x](mu), U_fat_v(x)(mu) + lt.c_7*W);
sigmaIndex++;
}
}
})
} // end mu loop
// c1, c3, c5, c7 construct contributions
u_smr = Ghost.Extract(Ughost_fat) + lt.c_1*u_thin;
// Load up U and V std::vectors to access thin and smeared links.
std::vector<LF> U(Nd, grid);
std::vector<LF> V(Nd, grid);
std::vector<LF> Vnaik(Nd, grid);
for (int mu = 0; mu < Nd; mu++) {
U[mu] = PeekIndex<LorentzIndex>(u_thin, mu);
V[mu] = PeekIndex<LorentzIndex>(u_smr, mu);
}
for(int mu=0;mu<Nd;mu++) {
// Naik
Vnaik[mu] = lt.c_naik*Gimpl::CovShiftForward(U[mu],mu,
Gimpl::CovShiftForward(U[mu],mu,
Gimpl::CovShiftIdentityForward(U[mu],mu)));
// LePage
for (int nu_h=1;nu_h<Nd;nu_h++) {
int nu=(mu+nu_h)%Nd;
// nu, nu, mu, Back(nu), Back(nu)
V[mu] = V[mu] + lt.c_lp*Gimpl::CovShiftForward(U[nu],nu,
Gimpl::CovShiftForward(U[nu],nu,
Gimpl::CovShiftForward(U[mu],mu,
Gimpl::CovShiftBackward(U[nu],nu,
Gimpl::CovShiftIdentityBackward(U[nu],nu)))))
// Back(nu), Back(nu), mu, nu, nu
+ lt.c_lp*Gimpl::CovShiftBackward(U[nu],nu,
Gimpl::CovShiftBackward(U[nu],nu,
Gimpl::CovShiftForward(U[mu],mu,
Gimpl::CovShiftForward(U[nu],nu,
Gimpl::CovShiftIdentityForward(U[nu],nu)))));
}
}
// Put V back into u_smr.
for (int mu = 0; mu < Nd; mu++) {
PokeIndex<LorentzIndex>(u_smr , V[mu] , mu);
PokeIndex<LorentzIndex>(u_naik, Vnaik[mu], mu);
}
};
// Intent: OUT--u_proj
// IN--u_mu
void projectU3(GF& u_proj, GF& u_mu) const {
auto grid = this->_grid;
LF V(grid), Q(grid), sqrtQinv(grid), id_3(grid), diff(grid);
CF c0(grid), c1(grid), c2(grid), g0(grid), g1(grid), g2(grid), S(grid), R(grid), theta(grid),
u(grid), v(grid), w(grid), den(grid), f0(grid), f1(grid), f2(grid);
// Follow MILC 10.1103/PhysRevD.82.074501, eqs (B2-B3) and (C1-C8)
for (int mu = 0; mu < Nd; mu++) {
V = PeekIndex<LorentzIndex>(u_mu, mu);
Q = adj(V)*V;
c0 = real(trace(Q));
c1 = (1/2.)*real(trace(Q*Q));
c2 = (1/3.)*real(trace(Q*Q*Q));
S = (1/3.)*c1-(1/18.)*c0*c0;
if (norm2(S)<1e-28) {
g0 = (1/3.)*c0; g1 = g0; g2 = g1;
} else {
R = (1/2.)*c2-(1/3. )*c0*c1+(1/27.)*c0*c0*c0;
theta = acos(R*pow(S,-1.5));
g0 = (1/3.)*c0+2.*sqrt(S)*cos((1/3.)*theta-2*M_PI/3.);
g1 = (1/3.)*c0+2.*sqrt(S)*cos((1/3.)*theta );
g2 = (1/3.)*c0+2.*sqrt(S)*cos((1/3.)*theta+2*M_PI/3.);
}
// if (fabs(Q.determinant()/(g0*g1*g2)-1.0) > 1e-5) { SVD }
u = sqrt(g0) + sqrt(g1) + sqrt(g2);
v = sqrt(g0*g1) + sqrt(g0*g2) + sqrt(g1*g2);
w = sqrt(g0*g1*g2);
den = w*(u*v-w);
f0 = (-w*(u*u+v)+u*v*v)/den;
f1 = (-w-u*u*u+2.*u*v)/den;
f2 = u/den;
id_3 = 1.;
sqrtQinv = f0*id_3 + f1*Q + f2*Q*Q;
PokeIndex<LorentzIndex>(u_proj, V*sqrtQinv, mu);
}
};
// void derivative(const GaugeField& Gauge) const {
// };
};
NAMESPACE_END(Grid);

View File

@ -5,4 +5,5 @@
#include <Grid/qcd/smearing/StoutSmearing.h>
#include <Grid/qcd/smearing/GaugeConfiguration.h>
#include <Grid/qcd/smearing/WilsonFlow.h>
#include <Grid/qcd/smearing/HISQSmearing.h>

View File

@ -69,7 +69,7 @@ public:
/*! Construct stout smearing object from explicitly specified rho matrix */
Smear_Stout(const std::vector<double>& rho_)
: OwnedBase{new Smear_APE<Gimpl>(rho_)}, SmearBase{OwnedBase.get()} {
std::cout << GridLogDebug << "Stout smearing constructor : Smear_Stout(const std::vector<double>& " << rho_ << " )" << std::endl
std::cout << GridLogDebug << "Stout smearing constructor : Smear_Stout(const std::vector<double>& " << rho_ << " )" << std::endl;
assert(Nc == 3 && "Stout smearing currently implemented only for Nc==3");
}

View File

@ -37,13 +37,14 @@ NAMESPACE_BEGIN(Grid);
// Make these members of an Impl class for BC's.
namespace PeriodicBC {
//Out(x) = Link(x)*field(x+mu)
template<class covariant,class gauge> Lattice<covariant> CovShiftForward(const Lattice<gauge> &Link,
int mu,
const Lattice<covariant> &field)
{
return Link*Cshift(field,mu,1);// moves towards negative mu
}
//Out(x) = Link^dag(x-mu)*field(x-mu)
template<class covariant,class gauge> Lattice<covariant> CovShiftBackward(const Lattice<gauge> &Link,
int mu,
const Lattice<covariant> &field)
@ -52,19 +53,19 @@ namespace PeriodicBC {
tmp = adj(Link)*field;
return Cshift(tmp,mu,-1);// moves towards positive mu
}
//Out(x) = Link^dag(x-mu)
template<class gauge> Lattice<gauge>
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu)
{
return Cshift(adj(Link), mu, -1);
}
//Out(x) = Link(x)
template<class gauge> Lattice<gauge>
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu)
{
return Link;
}
//Link(x) = Link(x+mu)
template<class gauge> Lattice<gauge>
ShiftStaple(const Lattice<gauge> &Link, int mu)
{

528
Grid/qcd/utils/GaugeGroup.h Normal file
View File

@ -0,0 +1,528 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/utils/GaugeGroup.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_UTIL_GAUGEGROUP_H
#define QCD_UTIL_GAUGEGROUP_H
// Important detail: nvcc requires all template parameters to have names.
// This is the only reason why the second template parameter has a name.
#define ONLY_IF_SU \
typename dummy_name = group_name, \
typename named_dummy = std::enable_if_t < \
std::is_same<dummy_name, group_name>::value && \
is_su<dummy_name>::value >
#define ONLY_IF_Sp \
typename dummy_name = group_name, \
typename named_dummy = std::enable_if_t < \
std::is_same<dummy_name, group_name>::value && \
is_sp<dummy_name>::value >
NAMESPACE_BEGIN(Grid);
namespace GroupName {
class SU {};
class Sp {};
} // namespace GroupName
template <typename group_name>
struct is_su {
static const bool value = false;
};
template <>
struct is_su<GroupName::SU> {
static const bool value = true;
};
template <typename group_name>
struct is_sp {
static const bool value = false;
};
template <>
struct is_sp<GroupName::Sp> {
static const bool value = true;
};
template <typename group_name>
constexpr int compute_adjoint_dimension(int ncolour);
template <>
constexpr int compute_adjoint_dimension<GroupName::SU>(int ncolour) {
return ncolour * ncolour - 1;
}
template <>
constexpr int compute_adjoint_dimension<GroupName::Sp>(int ncolour) {
return ncolour / 2 * (ncolour + 1);
}
template <int ncolour, class group_name>
class GaugeGroup {
public:
static const int Dimension = ncolour;
static const int AdjointDimension =
compute_adjoint_dimension<group_name>(ncolour);
static const int AlgebraDimension =
compute_adjoint_dimension<group_name>(ncolour);
template <typename vtype>
using iSU2Matrix = iScalar<iScalar<iMatrix<vtype, 2> > >;
template <typename vtype>
using iGroupMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
template <typename vtype>
using iAlgebraVector = iScalar<iScalar<iVector<vtype, AdjointDimension> > >;
template <typename vtype>
using iSUnAlgebraMatrix =
iScalar<iScalar<iMatrix<vtype, AdjointDimension> > >;
static int su2subgroups(void) { return su2subgroups(group_name()); }
//////////////////////////////////////////////////////////////////////////////////////////////////
// Types can be accessed as SU<2>::Matrix , SU<2>::vSUnMatrix,
// SU<2>::LatticeMatrix etc...
//////////////////////////////////////////////////////////////////////////////////////////////////
typedef iGroupMatrix<Complex> Matrix;
typedef iGroupMatrix<ComplexF> MatrixF;
typedef iGroupMatrix<ComplexD> MatrixD;
typedef iGroupMatrix<vComplex> vMatrix;
typedef iGroupMatrix<vComplexF> vMatrixF;
typedef iGroupMatrix<vComplexD> vMatrixD;
// For the projectors to the algebra
// these should be real...
// keeping complex for consistency with the SIMD vector types
typedef iAlgebraVector<Complex> AlgebraVector;
typedef iAlgebraVector<ComplexF> AlgebraVectorF;
typedef iAlgebraVector<ComplexD> AlgebraVectorD;
typedef iAlgebraVector<vComplex> vAlgebraVector;
typedef iAlgebraVector<vComplexF> vAlgebraVectorF;
typedef iAlgebraVector<vComplexD> vAlgebraVectorD;
typedef Lattice<vMatrix> LatticeMatrix;
typedef Lattice<vMatrixF> LatticeMatrixF;
typedef Lattice<vMatrixD> LatticeMatrixD;
typedef Lattice<vAlgebraVector> LatticeAlgebraVector;
typedef Lattice<vAlgebraVectorF> LatticeAlgebraVectorF;
typedef Lattice<vAlgebraVectorD> LatticeAlgebraVectorD;
typedef iSUnAlgebraMatrix<vComplex> vAlgebraMatrix;
typedef iSUnAlgebraMatrix<vComplexF> vAlgebraMatrixF;
typedef iSUnAlgebraMatrix<vComplexD> vAlgebraMatrixD;
typedef Lattice<vAlgebraMatrix> LatticeAlgebraMatrix;
typedef Lattice<vAlgebraMatrixF> LatticeAlgebraMatrixF;
typedef Lattice<vAlgebraMatrixD> LatticeAlgebraMatrixD;
typedef iSU2Matrix<Complex> SU2Matrix;
typedef iSU2Matrix<ComplexF> SU2MatrixF;
typedef iSU2Matrix<ComplexD> SU2MatrixD;
typedef iSU2Matrix<vComplex> vSU2Matrix;
typedef iSU2Matrix<vComplexF> vSU2MatrixF;
typedef iSU2Matrix<vComplexD> vSU2MatrixD;
typedef Lattice<vSU2Matrix> LatticeSU2Matrix;
typedef Lattice<vSU2MatrixF> LatticeSU2MatrixF;
typedef Lattice<vSU2MatrixD> LatticeSU2MatrixD;
// Private implementation details are specified in the following files:
// Grid/qcd/utils/SUn.impl
// Grid/qcd/utils/SUn.impl
// The public part of the interface follows below and refers to these
// private member functions.
#include <Grid/qcd/utils/SUn.impl.h>
#include <Grid/qcd/utils/Sp2n.impl.h>
public:
template <class cplx>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta) {
return generator(lieIndex, ta, group_name());
}
static accelerator_inline void su2SubGroupIndex(int &i1, int &i2, int su2_index) {
return su2SubGroupIndex(i1, i2, su2_index, group_name());
}
static void testGenerators(void) { testGenerators(group_name()); }
static void printGenerators(void) {
for (int gen = 0; gen < AlgebraDimension; gen++) {
Matrix ta;
generator(gen, ta);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << ta << std::endl;
}
}
template <typename LatticeMatrixType>
static void LieRandomize(GridParallelRNG &pRNG, LatticeMatrixType &out,
double scale = 1.0) {
GridBase *grid = out.Grid();
typedef typename LatticeMatrixType::vector_type vector_type;
typedef iSinglet<vector_type> vTComplexType;
typedef Lattice<vTComplexType> LatticeComplexType;
typedef typename GridTypeMapper<
typename LatticeMatrixType::vector_object>::scalar_object MatrixType;
LatticeComplexType ca(grid);
LatticeMatrixType lie(grid);
LatticeMatrixType la(grid);
ComplexD ci(0.0, scale);
MatrixType ta;
lie = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
random(pRNG, ca);
ca = (ca + conjugate(ca)) * 0.5;
ca = ca - 0.5;
generator(a, ta);
la = ci * ca * ta;
lie = lie + la; // e^{i la ta}
}
taExp(lie, out);
}
static void GaussianFundamentalLieAlgebraMatrix(GridParallelRNG &pRNG,
LatticeMatrix &out,
Real scale = 1.0) {
GridBase *grid = out.Grid();
LatticeReal ca(grid);
LatticeMatrix la(grid);
Complex ci(0.0, scale);
Matrix ta;
out = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
gaussian(pRNG, ca);
generator(a, ta);
la = toComplex(ca) * ta;
out += la;
}
out *= ci;
}
static void FundamentalLieAlgebraMatrix(const LatticeAlgebraVector &h,
LatticeMatrix &out,
Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeMatrix la(grid);
Matrix ta;
out = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
la = peekColour(h, a) * timesI(ta) * scale;
out += la;
}
}
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1
// ) inverse operation: FundamentalLieAlgebraMatrix
static void projectOnAlgebra(LatticeAlgebraVector &h_out,
const LatticeMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
Matrix Ta;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, Ta);
pokeColour(h_out, -2.0 * (trace(timesI(Ta) * in)) * scale, a);
}
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r) {
return ProjectOnGeneralGroup(r, group_name());
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r) {
return ProjectOnGeneralGroup(r, group_name());
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg) {
return ProjectOnGeneralGroup(arg, group_name());
}
template <int N,class vComplex_t> // Projects on the general groups U(N), Sp(2N)xZ2 i.e. determinant is allowed a complex phase.
static void ProjectOnGeneralGroup(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >, Nd> > &U) {
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
Umu = ProjectOnGeneralGroup(Umu);
}
}
template <int N,class vComplex_t>
static Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu) {
return ProjectOnGeneralGroup(Umu, group_name());
}
template <int N,class vComplex_t> // Projects on SU(N), Sp(2N), with unit determinant, by first projecting on general group and then enforcing unit determinant
static void ProjectOnSpecialGroup(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu) {
Umu = ProjectOnGeneralGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for (int i = 0; i < N; i++) {
auto element = PeekIndex<ColourIndex>(Umu, N - 1, i);
element = element * det;
PokeIndex<ColourIndex>(Umu, element, Nc - 1, i);
}
}
template <int N,class vComplex_t> // reunitarise, resimplectify... previously ProjectSUn
static void ProjectOnSpecialGroup(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >, Nd> > &U) {
// Reunitarise
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
ProjectOnSpecialGroup(Umu);
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
template <typename GaugeField>
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
LatticeMatrixType tmp(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
// LieRandomize(pRNG, Umu, 1.0);
// PokeIndex<LorentzIndex>(out, Umu, mu);
gaussian(pRNG,Umu);
tmp = Ta(Umu);
taExp(tmp,Umu);
ProjectOnSpecialGroup(Umu);
// ProjectSUn(Umu);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void TepidConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
LieRandomize(pRNG, Umu, 0.01);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void ColdConfiguration(GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
Umu = 1.0;
for (int mu = 0; mu < Nd; mu++) {
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void ColdConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
ColdConfiguration(out);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out) {
taProj(in, out, group_name());
}
template <typename LatticeMatrixType>
static void taExp(const LatticeMatrixType &x, LatticeMatrixType &ex) {
typedef typename LatticeMatrixType::scalar_type ComplexType;
LatticeMatrixType xn(x.Grid());
RealD nfac = 1.0;
xn = x;
ex = xn + ComplexType(1.0); // 1+x
// Do a 12th order exponentiation
for (int i = 2; i <= 12; ++i) {
nfac = nfac / RealD(i); // 1/2, 1/2.3 ...
xn = xn * x; // x2, x3,x4....
ex = ex + xn * nfac; // x2/2!, x3/3!....
}
}
// Ta are hermitian (?)
// Anti herm is i Ta basis
static void LieAlgebraProject(LatticeAlgebraMatrix &out,const LatticeMatrix &in, int b)
{
conformable(in, out);
GridBase *grid = out.Grid();
LatticeComplex tmp(grid);
Matrix ta;
// Using Luchang's projection convention
// 2 Tr{Ta Tb} A_b= 2/2 delta ab A_b = A_a
autoView(out_v,out,AcceleratorWrite);
autoView(in_v,in,AcceleratorRead);
int N = ncolour;
int NNm1 = N * (N - 1);
int hNNm1= NNm1/2;
RealD sqrt_2 = sqrt(2.0);
Complex ci(0.0,1.0);
for(int su2Index=0;su2Index<hNNm1;su2Index++){
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
int ax = su2Index*2;
int ay = su2Index*2+1;
accelerator_for(ss,grid->oSites(),1,{
// in is traceless ANTI-hermitian whereas Grid generators are Hermitian.
// trace( Ta x Ci in)
// Bet I need to move to real part with mult by -i
out_v[ss]()()(ax,b) = 0.5*(real(in_v[ss]()()(i2,i1)) - real(in_v[ss]()()(i1,i2)));
out_v[ss]()()(ay,b) = 0.5*(imag(in_v[ss]()()(i1,i2)) + imag(in_v[ss]()()(i2,i1)));
});
}
for(int diagIndex=0;diagIndex<N-1;diagIndex++){
int k = diagIndex + 1; // diagIndex starts from 0
int a = NNm1+diagIndex;
RealD scale = 1.0/sqrt(2.0*k*(k+1));
accelerator_for(ss,grid->oSites(),vComplex::Nsimd(),{
auto tmp = in_v[ss]()()(0,0);
for(int i=1;i<k;i++){
tmp=tmp+in_v[ss]()()(i,i);
}
tmp = tmp - in_v[ss]()()(k,k)*k;
out_v[ss]()()(a,b) =imag(tmp) * scale;
});
}
}
};
template <int ncolour>
using SU = GaugeGroup<ncolour, GroupName::SU>;
template <int ncolour>
using Sp = GaugeGroup<ncolour, GroupName::Sp>;
typedef SU<2> SU2;
typedef SU<3> SU3;
typedef SU<4> SU4;
typedef SU<5> SU5;
typedef SU<Nc> FundamentalMatrices;
typedef Sp<2> Sp2;
typedef Sp<4> Sp4;
typedef Sp<6> Sp6;
typedef Sp<8> Sp8;
template <int N,class vComplex_t>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu)
{
GaugeGroup<N,GroupName::SU>::ProjectOnSpecialGroup(Umu);
}
template <int N,class vComplex_t>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >,Nd> > &U)
{
GaugeGroup<N,GroupName::SU>::ProjectOnSpecialGroup(U);
}
template <int N,class vComplex_t>
static void ProjectSpn(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu)
{
GaugeGroup<N,GroupName::Sp>::ProjectOnSpecialGroup(Umu);
}
template <int N,class vComplex_t>
static void ProjectSpn(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >,Nd> > &U)
{
GaugeGroup<N,GroupName::Sp>::ProjectOnSpecialGroup(U);
}
// Explicit specialisation for SU(3).
static void ProjectSU3(Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
{
GridBase *grid = Umu.Grid();
const int x = 0;
const int y = 1;
const int z = 2;
// Reunitarise
Umu = ProjectOnGroup(Umu);
autoView(Umu_v, Umu, CpuWrite);
thread_for(ss, grid->oSites(), {
auto cm = Umu_v[ss];
cm()()(2, x) = adj(cm()()(0, y) * cm()()(1, z) -
cm()()(0, z) * cm()()(1, y)); // x= yz-zy
cm()()(2, y) = adj(cm()()(0, z) * cm()()(1, x) -
cm()()(0, x) * cm()()(1, z)); // y= zx-xz
cm()()(2, z) = adj(cm()()(0, x) * cm()()(1, y) -
cm()()(0, y) * cm()()(1, x)); // z= xy-yx
Umu_v[ss] = cm;
});
}
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >, Nd> > &U)
{
GridBase *grid = U.Grid();
// Reunitarise
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
Umu = ProjectOnGroup(Umu);
ProjectSU3(Umu);
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,371 @@
////////////////////////////////////////////////////////////////////////
//
// * Two index representation generators
//
// * Normalisation for the fundamental generators:
// trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
//
// base for NxN two index (anti-symmetric) matrices
// normalized to 1 (d_ij is the kroenecker delta)
//
// (e^(ij)_{kl} = 1 / sqrt(2) (d_ik d_jl +/- d_jk d_il)
//
// Then the generators are written as
//
// (iT_a)^(ij)(lk) = i * ( tr[e^(ij)^dag e^(lk) T^trasp_a] +
// tr[e^(lk)e^(ij)^dag T_a] ) //
//
//
////////////////////////////////////////////////////////////////////////
// Authors: David Preti, Guido Cossu
#ifndef QCD_UTIL_GAUGEGROUPTWOINDEX_H
#define QCD_UTIL_GAUGEGROUPTWOINDEX_H
NAMESPACE_BEGIN(Grid);
enum TwoIndexSymmetry { Symmetric = 1, AntiSymmetric = -1 };
constexpr inline Real delta(int a, int b) { return (a == b) ? 1.0 : 0.0; }
namespace detail {
template <class cplx, int nc, TwoIndexSymmetry S>
struct baseOffDiagonalSpHelper;
template <class cplx, int nc>
struct baseOffDiagonalSpHelper<cplx, nc, AntiSymmetric> {
static const int ngroup = nc / 2;
static void baseOffDiagonalSp(int i, int j, iScalar<iScalar<iMatrix<cplx, nc> > > &eij) {
eij = Zero();
RealD tmp;
if ((i == ngroup + j) && (1 <= j) && (j < ngroup)) {
for (int k = 0; k < j+1; k++) {
if (k < j) {
tmp = 1 / sqrt(j * (j + 1));
eij()()(k, k + ngroup) = tmp;
eij()()(k + ngroup, k) = -tmp;
}
if (k == j) {
tmp = -j / sqrt(j * (j + 1));
eij()()(k, k + ngroup) = tmp;
eij()()(k + ngroup, k) = -tmp;
}
}
}
else if (i != ngroup + j) {
for (int k = 0; k < nc; k++)
for (int l = 0; l < nc; l++) {
eij()()(l, k) =
delta(i, k) * delta(j, l) - delta(j, k) * delta(i, l);
}
}
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
};
template <class cplx, int nc>
struct baseOffDiagonalSpHelper<cplx, nc, Symmetric> {
static void baseOffDiagonalSp(int i, int j, iScalar<iScalar<iMatrix<cplx, nc> > > &eij) {
eij = Zero();
for (int k = 0; k < nc; k++)
for (int l = 0; l < nc; l++)
eij()()(l, k) =
delta(i, k) * delta(j, l) + delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
};
} // closing detail namespace
template <int ncolour, TwoIndexSymmetry S, class group_name>
class GaugeGroupTwoIndex : public GaugeGroup<ncolour, group_name> {
public:
// The chosen convention is that we are taking ncolour to be N in SU<N> but 2N
// in Sp(2N). ngroup is equal to N for SU but 2N/2 = N for Sp(2N).
static_assert(std::is_same<group_name, GroupName::SU>::value or
std::is_same<group_name, GroupName::Sp>::value,
"ngroup is only implemented for SU and Sp currently.");
static const int ngroup =
std::is_same<group_name, GroupName::SU>::value ? ncolour : ncolour / 2;
static const int Dimension =
(ncolour * (ncolour + S) / 2) + (std::is_same<group_name, GroupName::Sp>::value ? (S - 1) / 2 : 0);
static const int DimensionAS =
(ncolour * (ncolour - 1) / 2) + (std::is_same<group_name, GroupName::Sp>::value ? (- 1) : 0);
static const int DimensionS =
ncolour * (ncolour + 1) / 2;
static const int NumGenerators =
GaugeGroup<ncolour, group_name>::AlgebraDimension;
template <typename vtype>
using iGroupTwoIndexMatrix = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
typedef iGroupTwoIndexMatrix<Complex> TIMatrix;
typedef iGroupTwoIndexMatrix<ComplexF> TIMatrixF;
typedef iGroupTwoIndexMatrix<ComplexD> TIMatrixD;
typedef iGroupTwoIndexMatrix<vComplex> vTIMatrix;
typedef iGroupTwoIndexMatrix<vComplexF> vTIMatrixF;
typedef iGroupTwoIndexMatrix<vComplexD> vTIMatrixD;
typedef Lattice<vTIMatrix> LatticeTwoIndexMatrix;
typedef Lattice<vTIMatrixF> LatticeTwoIndexMatrixF;
typedef Lattice<vTIMatrixD> LatticeTwoIndexMatrixD;
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
LatticeTwoIndexField;
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
LatticeTwoIndexFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
LatticeTwoIndexFieldD;
template <typename vtype>
using iGroupMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef iGroupMatrix<Complex> Matrix;
typedef iGroupMatrix<ComplexF> MatrixF;
typedef iGroupMatrix<ComplexD> MatrixD;
private:
template <class cplx>
static void baseDiagonal(int Index, iGroupMatrix<cplx> &eij) {
eij = Zero();
eij()()(Index - ncolour * (ncolour - 1) / 2,
Index - ncolour * (ncolour - 1) / 2) = 1.0;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iGroupMatrix<cplx> &eij, GroupName::SU) {
eij = Zero();
for (int k = 0; k < ncolour; k++)
for (int l = 0; l < ncolour; l++)
eij()()(l, k) =
delta(i, k) * delta(j, l) + S * delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iGroupMatrix<cplx> &eij, GroupName::Sp) {
detail::baseOffDiagonalSpHelper<cplx, ncolour, S>::baseOffDiagonalSp(i, j, eij);
}
public:
template <class cplx>
static void base(int Index, iGroupMatrix<cplx> &eij) {
// returns (e)^(ij)_{kl} necessary for change of base U_F -> U_R
assert(Index < Dimension);
eij = Zero();
// for the linearisation of the 2 indexes
static int a[ncolour * (ncolour - 1) / 2][2]; // store the a <-> i,j
static bool filled = false;
if (!filled) {
int counter = 0;
for (int i = 1; i < ncolour; i++) {
for (int j = 0; j < i; j++) {
if (std::is_same<group_name, GroupName::Sp>::value)
{
if (j==0 && i==ngroup+j && S==-1) {
//std::cout << "skipping" << std::endl; // for Sp2n this vanishes identically.
j = j+1;
}
}
a[counter][0] = i;
a[counter][1] = j;
counter++;
}
}
filled = true;
}
if (Index < ncolour*ncolour - DimensionS)
{
baseOffDiagonal(a[Index][0], a[Index][1], eij, group_name());
} else {
baseDiagonal(Index, eij);
}
}
static void printBase(void) {
for (int gen = 0; gen < Dimension; gen++) {
Matrix tmp;
base(gen, tmp);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << tmp << std::endl;
}
}
template <class cplx>
static void generator(int Index, iGroupTwoIndexMatrix<cplx> &i2indTa) {
Vector<iGroupMatrix<cplx> > ta(NumGenerators);
Vector<iGroupMatrix<cplx> > eij(Dimension);
iGroupMatrix<cplx> tmp;
for (int a = 0; a < NumGenerators; a++)
GaugeGroup<ncolour, group_name>::generator(a, ta[a]);
for (int a = 0; a < Dimension; a++) base(a, eij[a]);
for (int a = 0; a < Dimension; a++) {
tmp = transpose(eij[a]*ta[Index]) + transpose(eij[a]) * ta[Index];
for (int b = 0; b < Dimension; b++) {
Complex iTr = TensorRemove(timesI(trace(tmp * eij[b])));
i2indTa()()(a, b) = iTr;
}
}
}
static void printGenerators(void) {
for (int gen = 0; gen < NumGenerators; gen++) {
TIMatrix i2indTa;
generator(gen, i2indTa);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << i2indTa << std::endl;
}
}
static void testGenerators(void) {
TIMatrix i2indTa, i2indTb;
std::cout << GridLogMessage << "2IndexRep - Checking if traceless"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(trace(i2indTa)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "2IndexRep - Checking if antihermitean"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(adj(i2indTa) + i2indTa) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage
<< "2IndexRep - Checking Tr[Ta*Tb]=delta(a,b)*(N +- 2)/2"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
for (int b = 0; b < NumGenerators; b++) {
generator(a, i2indTa);
generator(b, i2indTb);
// generator returns iTa, so we need a minus sign here
Complex Tr = -TensorRemove(trace(i2indTa * i2indTb));
std::cout << GridLogMessage << "a=" << a << "b=" << b << "Tr=" << Tr
<< std::endl;
if (a == b) {
assert(real(Tr) - ((ncolour + S * 2) * 0.5) < 1e-8);
} else {
assert(real(Tr) < 1e-8);
}
assert(imag(Tr) < 1e-8);
}
}
std::cout << GridLogMessage << std::endl;
}
static void TwoIndexLieAlgebraMatrix(
const typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h,
LatticeTwoIndexMatrix &out, Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeTwoIndexMatrix la(grid);
TIMatrix i2indTa;
out = Zero();
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
la = peekColour(h, a) * i2indTa;
out += la;
}
out *= scale;
}
// Projects the algebra components
// of a lattice matrix ( of dimension ncol*ncol -1 )
static void projectOnAlgebra(
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
TIMatrix i2indTa;
Real coefficient = -2.0 / (ncolour + 2 * S) * scale;
// 2/(Nc +/- 2) for the normalization of the trace in the two index rep
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
pokeColour(h_out, real(trace(i2indTa * in)) * coefficient, a);
}
}
// a projector that keeps the generators stored to avoid the overhead of
// recomputing them
static void projector(
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
// to store the generators
static std::vector<TIMatrix> i2indTa(NumGenerators);
h_out = Zero();
static bool precalculated = false;
if (!precalculated) {
precalculated = true;
for (int a = 0; a < NumGenerators; a++) generator(a, i2indTa[a]);
}
Real coefficient =
-2.0 / (ncolour + 2 * S) * scale; // 2/(Nc +/- 2) for the normalization
// of the trace in the two index rep
for (int a = 0; a < NumGenerators; a++) {
auto tmp = real(trace(i2indTa[a] * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
};
template <int ncolour, TwoIndexSymmetry S>
using SU_TwoIndex = GaugeGroupTwoIndex<ncolour, S, GroupName::SU>;
// Some useful type names
typedef SU_TwoIndex<Nc, Symmetric> TwoIndexSymmMatrices;
typedef SU_TwoIndex<Nc, AntiSymmetric> TwoIndexAntiSymmMatrices;
typedef SU_TwoIndex<2, Symmetric> SU2TwoIndexSymm;
typedef SU_TwoIndex<3, Symmetric> SU3TwoIndexSymm;
typedef SU_TwoIndex<4, Symmetric> SU4TwoIndexSymm;
typedef SU_TwoIndex<5, Symmetric> SU5TwoIndexSymm;
typedef SU_TwoIndex<2, AntiSymmetric> SU2TwoIndexAntiSymm;
typedef SU_TwoIndex<3, AntiSymmetric> SU3TwoIndexAntiSymm;
typedef SU_TwoIndex<4, AntiSymmetric> SU4TwoIndexAntiSymm;
typedef SU_TwoIndex<5, AntiSymmetric> SU5TwoIndexAntiSymm;
template <int ncolour, TwoIndexSymmetry S>
using Sp_TwoIndex = GaugeGroupTwoIndex<ncolour, S, GroupName::Sp>;
typedef Sp_TwoIndex<Nc, Symmetric> SpTwoIndexSymmMatrices;
typedef Sp_TwoIndex<Nc, AntiSymmetric> SpTwoIndexAntiSymmMatrices;
typedef Sp_TwoIndex<2, Symmetric> Sp2TwoIndexSymm;
typedef Sp_TwoIndex<4, Symmetric> Sp4TwoIndexSymm;
typedef Sp_TwoIndex<4, AntiSymmetric> Sp4TwoIndexAntiSymm;
NAMESPACE_END(Grid);
#endif

View File

@ -1,932 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/utils/SUn.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_UTIL_SUN_H
#define QCD_UTIL_SUN_H
NAMESPACE_BEGIN(Grid);
template<int N, class Vec>
Lattice<iScalar<iScalar<iScalar<Vec> > > > Determinant(const Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iScalar<Vec> > > > ret(grid);
typedef typename Vec::scalar_type scalar;
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<scalar, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
scalar tmp= Us()()(i,j);
ComplexD ztmp(real(tmp),imag(tmp));
EigenU(i,j)=ztmp;
}}
ComplexD detD = EigenU.determinant();
typename Vec::scalar_type det(detD.real(),detD.imag());
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N, class Vec>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
Umu = ProjectOnGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for(int i=0;i<N;i++){
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
element = element * det;
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
}
}
template<int N,class Vec>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<Vec, N> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
template <int ncolour>
class SU {
public:
static const int Dimension = ncolour;
static const int AdjointDimension = ncolour * ncolour - 1;
static int su2subgroups(void) { return (ncolour * (ncolour - 1)) / 2; }
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
template <typename vtype>
using iSU2Matrix = iScalar<iScalar<iMatrix<vtype, 2> > >;
template <typename vtype>
using iSUnAlgebraVector =
iScalar<iScalar<iVector<vtype, AdjointDimension> > >;
//////////////////////////////////////////////////////////////////////////////////////////////////
// Types can be accessed as SU<2>::Matrix , SU<2>::vSUnMatrix,
// SU<2>::LatticeMatrix etc...
//////////////////////////////////////////////////////////////////////////////////////////////////
typedef iSUnMatrix<Complex> Matrix;
typedef iSUnMatrix<ComplexF> MatrixF;
typedef iSUnMatrix<ComplexD> MatrixD;
typedef iSUnMatrix<vComplex> vMatrix;
typedef iSUnMatrix<vComplexF> vMatrixF;
typedef iSUnMatrix<vComplexD> vMatrixD;
// For the projectors to the algebra
// these should be real...
// keeping complex for consistency with the SIMD vector types
typedef iSUnAlgebraVector<Complex> AlgebraVector;
typedef iSUnAlgebraVector<ComplexF> AlgebraVectorF;
typedef iSUnAlgebraVector<ComplexD> AlgebraVectorD;
typedef iSUnAlgebraVector<vComplex> vAlgebraVector;
typedef iSUnAlgebraVector<vComplexF> vAlgebraVectorF;
typedef iSUnAlgebraVector<vComplexD> vAlgebraVectorD;
typedef Lattice<vMatrix> LatticeMatrix;
typedef Lattice<vMatrixF> LatticeMatrixF;
typedef Lattice<vMatrixD> LatticeMatrixD;
typedef Lattice<vAlgebraVector> LatticeAlgebraVector;
typedef Lattice<vAlgebraVectorF> LatticeAlgebraVectorF;
typedef Lattice<vAlgebraVectorD> LatticeAlgebraVectorD;
typedef iSU2Matrix<Complex> SU2Matrix;
typedef iSU2Matrix<ComplexF> SU2MatrixF;
typedef iSU2Matrix<ComplexD> SU2MatrixD;
typedef iSU2Matrix<vComplex> vSU2Matrix;
typedef iSU2Matrix<vComplexF> vSU2MatrixF;
typedef iSU2Matrix<vComplexD> vSU2MatrixD;
typedef Lattice<vSU2Matrix> LatticeSU2Matrix;
typedef Lattice<vSU2MatrixF> LatticeSU2MatrixF;
typedef Lattice<vSU2MatrixD> LatticeSU2MatrixD;
////////////////////////////////////////////////////////////////////////
// There are N^2-1 generators for SU(N).
//
// We take a traceless hermitian generator basis as follows
//
// * Normalisation: trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
// * Off diagonal
// - pairs of rows i1,i2 behaving like pauli matrices signma_x, sigma_y
//
// - there are (Nc-1-i1) slots for i2 on each row [ x 0 x ]
// direct count off each row
//
// - Sum of all pairs is Nc(Nc-1)/2: proof arithmetic series
//
// (Nc-1) + (Nc-2)+... 1 ==> Nc*(Nc-1)/2
// 1+ 2+ + + Nc-1
//
// - There are 2 x Nc (Nc-1)/ 2 of these = Nc^2 - Nc
//
// - We enumerate the row-col pairs.
// - for each row col pair there is a (sigma_x) and a (sigma_y) like
// generator
//
//
// t^a_ij = { in 0.. Nc(Nc-1)/2 -1} => 1/2(delta_{i,i1} delta_{j,i2} +
// delta_{i,i1} delta_{j,i2})
// t^a_ij = { in Nc(Nc-1)/2 ... Nc(Nc-1) - 1} => i/2( delta_{i,i1}
// delta_{j,i2} - i delta_{i,i1} delta_{j,i2})
//
// * Diagonal; must be traceless and normalised
// - Sequence is
// N (1,-1,0,0...)
// N (1, 1,-2,0...)
// N (1, 1, 1,-3,0...)
// N (1, 1, 1, 1,-4,0...)
//
// where 1/2 = N^2 (1+.. m^2)etc.... for the m-th diagonal generator
// NB this gives the famous SU3 result for su2 index 8
//
// N= sqrt(1/2 . 1/6 ) = 1/2 . 1/sqrt(3)
//
// ( 1 )
// ( 1 ) / sqrt(3) /2 = 1/2 lambda_8
// ( -2)
//
////////////////////////////////////////////////////////////////////////
template <class cplx>
static void generator(int lieIndex, iSUnMatrix<cplx> &ta) {
// map lie index to which type of generator
int diagIndex;
int su2Index;
int sigxy;
int NNm1 = ncolour * (ncolour - 1);
if (lieIndex >= NNm1) {
diagIndex = lieIndex - NNm1;
generatorDiagonal(diagIndex, ta);
return;
}
sigxy = lieIndex & 0x1; // even or odd
su2Index = lieIndex >> 1;
if (sigxy)
generatorSigmaY(su2Index, ta);
else
generatorSigmaX(su2Index, ta);
}
template <class cplx>
static void generatorSigmaY(int su2Index, iSUnMatrix<cplx> &ta) {
ta = Zero();
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = 1.0;
ta()()(i2, i1) = 1.0;
ta = ta * 0.5;
}
template <class cplx>
static void generatorSigmaX(int su2Index, iSUnMatrix<cplx> &ta) {
ta = Zero();
cplx i(0.0, 1.0);
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta = ta * 0.5;
}
template <class cplx>
static void generatorDiagonal(int diagIndex, iSUnMatrix<cplx> &ta) {
// diag ({1, 1, ..., 1}(k-times), -k, 0, 0, ...)
ta = Zero();
int k = diagIndex + 1; // diagIndex starts from 0
for (int i = 0; i <= diagIndex; i++) { // k iterations
ta()()(i, i) = 1.0;
}
ta()()(k, k) = -k; // indexing starts from 0
RealD nrm = 1.0 / std::sqrt(2.0 * k * (k + 1));
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
static void su2SubGroupIndex(int &i1, int &i2, int su2_index) {
assert((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (ncolour - 1 - i1); i1++) {
spare = spare - (ncolour - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
//////////////////////////////////////////////////////////////////////////////////////////
// Pull out a subgroup and project on to real coeffs x pauli basis
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx>
static void su2Extract(Lattice<iSinglet<vcplx> > &Determinant,
Lattice<iSU2Matrix<vcplx> > &subgroup,
const Lattice<iSUnMatrix<vcplx> > &source,
int su2_index) {
GridBase *grid(source.Grid());
conformable(subgroup, source);
conformable(subgroup, Determinant);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
autoView( subgroup_v , subgroup,AcceleratorWrite);
autoView( source_v , source,AcceleratorRead);
autoView( Determinant_v , Determinant,AcceleratorWrite);
accelerator_for(ss, grid->oSites(), 1, {
subgroup_v[ss]()()(0, 0) = source_v[ss]()()(i0, i0);
subgroup_v[ss]()()(0, 1) = source_v[ss]()()(i0, i1);
subgroup_v[ss]()()(1, 0) = source_v[ss]()()(i1, i0);
subgroup_v[ss]()()(1, 1) = source_v[ss]()()(i1, i1);
iSU2Matrix<vcplx> Sigma = subgroup_v[ss];
Sigma = Sigma - adj(Sigma) + trace(adj(Sigma));
subgroup_v[ss] = Sigma;
// this should be purely real
Determinant_v[ss] =
Sigma()()(0, 0) * Sigma()()(1, 1) - Sigma()()(0, 1) * Sigma()()(1, 0);
});
}
//////////////////////////////////////////////////////////////////////////////////////////
// Set matrix to one and insert a pauli subgroup
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx>
static void su2Insert(const Lattice<iSU2Matrix<vcplx> > &subgroup,
Lattice<iSUnMatrix<vcplx> > &dest, int su2_index) {
GridBase *grid(dest.Grid());
conformable(subgroup, dest);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
dest = 1.0; // start out with identity
autoView( dest_v , dest, AcceleratorWrite);
autoView( subgroup_v, subgroup, AcceleratorRead);
accelerator_for(ss, grid->oSites(),1,
{
dest_v[ss]()()(i0, i0) = subgroup_v[ss]()()(0, 0);
dest_v[ss]()()(i0, i1) = subgroup_v[ss]()()(0, 1);
dest_v[ss]()()(i1, i0) = subgroup_v[ss]()()(1, 0);
dest_v[ss]()()(i1, i1) = subgroup_v[ss]()()(1, 1);
});
}
///////////////////////////////////////////////
// Generate e^{ Re Tr Staple Link} dlink
//
// *** Note Staple should be appropriate linear compbination between all
// staples.
// *** If already by beta pass coefficient 1.0.
// *** This routine applies the additional 1/Nc factor that comes after trace
// in action.
//
///////////////////////////////////////////////
static void SubGroupHeatBath(GridSerialRNG &sRNG, GridParallelRNG &pRNG,
RealD beta, // coeff multiplying staple in action (with no 1/Nc)
LatticeMatrix &link,
const LatticeMatrix &barestaple, // multiplied by action coeffs so th
int su2_subgroup, int nheatbath, LatticeInteger &wheremask)
{
GridBase *grid = link.Grid();
const RealD twopi = 2.0 * M_PI;
LatticeMatrix staple(grid);
staple = barestaple * (beta / ncolour);
LatticeMatrix V(grid);
V = link * staple;
// Subgroup manipulation in the lie algebra space
LatticeSU2Matrix u(grid); // Kennedy pendleton "u" real projected normalised Sigma
LatticeSU2Matrix uinv(grid);
LatticeSU2Matrix ua(grid); // a in pauli form
LatticeSU2Matrix b(grid); // rotated matrix after hb
// Some handy constant fields
LatticeComplex ones(grid);
ones = 1.0;
LatticeComplex zeros(grid);
zeros = Zero();
LatticeReal rones(grid);
rones = 1.0;
LatticeReal rzeros(grid);
rzeros = Zero();
LatticeComplex udet(grid); // determinant of real(staple)
LatticeInteger mask_true(grid);
mask_true = 1;
LatticeInteger mask_false(grid);
mask_false = 0;
/*
PLB 156 P393 (1985) (Kennedy and Pendleton)
Note: absorb "beta" into the def of sigma compared to KP paper; staple
passed to this routine has "beta" already multiplied in
Action linear in links h and of form:
beta S = beta Sum_p (1 - 1/Nc Re Tr Plaq )
Writing Sigma = 1/Nc (beta Sigma') where sum over staples is "Sigma' "
beta S = const - beta/Nc Re Tr h Sigma'
= const - Re Tr h Sigma
Decompose h and Sigma into (1, sigma_j) ; h_i real, h^2=1, Sigma_i complex
arbitrary.
Tr h Sigma = h_i Sigma_j Tr (sigma_i sigma_j) = h_i Sigma_j 2 delta_ij
Re Tr h Sigma = 2 h_j Re Sigma_j
Normalised re Sigma_j = xi u_j
With u_j a unit vector and U can be in SU(2);
Re Tr h Sigma = 2 h_j Re Sigma_j = 2 xi (h.u)
4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
xi = sqrt(Det)/2;
Write a= u h in SU(2); a has pauli decomp a_j;
Note: Product b' xi is unvariant because scaling Sigma leaves
normalised vector "u" fixed; Can rescale Sigma so b' = 1.
*/
////////////////////////////////////////////////////////
// Real part of Pauli decomposition
// Note a subgroup can project to zero in cold start
////////////////////////////////////////////////////////
su2Extract(udet, u, V, su2_subgroup);
//////////////////////////////////////////////////////
// Normalising this vector if possible; else identity
//////////////////////////////////////////////////////
LatticeComplex xi(grid);
LatticeSU2Matrix lident(grid);
SU2Matrix ident = Complex(1.0);
SU2Matrix pauli1;
SU<2>::generator(0, pauli1);
SU2Matrix pauli2;
SU<2>::generator(1, pauli2);
SU2Matrix pauli3;
SU<2>::generator(2, pauli3);
pauli1 = timesI(pauli1) * 2.0;
pauli2 = timesI(pauli2) * 2.0;
pauli3 = timesI(pauli3) * 2.0;
LatticeComplex cone(grid);
LatticeReal adet(grid);
adet = abs(toReal(udet));
lident = Complex(1.0);
cone = Complex(1.0);
Real machine_epsilon = 1.0e-7;
u = where(adet > machine_epsilon, u, lident);
udet = where(adet > machine_epsilon, udet, cone);
xi = 0.5 * sqrt(udet); // 4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 0.5 * u *
pow(xi, -1.0); // u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
// Debug test for sanity
uinv = adj(u);
b = u * uinv - 1.0;
assert(norm2(b) < 1.0e-4);
/*
Measure: Haar measure dh has d^4a delta(1-|a^2|)
In polars:
da = da0 r^2 sin theta dr dtheta dphi delta( 1 - r^2 -a0^2)
= da0 r^2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r)(sqrt(1-a0^) +
r) )
= da0 r/2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r) )
Action factor Q(h) dh = e^-S[h] dh = e^{ xi Tr uh} dh // beta enters
through xi
= e^{2 xi (h.u)} dh
= e^{2 xi h0u0}.e^{2 xi h1u1}.e^{2 xi
h2u2}.e^{2 xi h3u3} dh
Therefore for each site, take xi for that site
i) generate |a0|<1 with dist
(1-a0^2)^0.5 e^{2 xi a0 } da0
Take alpha = 2 xi = 2 xi [ recall 2 beta/Nc unmod staple norm]; hence 2.0/Nc
factor in Chroma ]
A. Generate two uniformly distributed pseudo-random numbers R and R', R'',
R''' in the unit interval;
B. Set X = -(ln R)/alpha, X' =-(ln R')/alpha;
C. Set C = cos^2(2pi R"), with R" another uniform random number in [0,1] ;
D. Set A = XC;
E. Let d = X'+A;
F. If R'''^2 :> 1 - 0.5 d, go back to A;
G. Set a0 = 1 - d;
Note that in step D setting B ~ X - A and using B in place of A in step E will
generate a second independent a 0 value.
*/
/////////////////////////////////////////////////////////
// count the number of sites by picking "1"'s out of hat
/////////////////////////////////////////////////////////
Integer hit = 0;
LatticeReal rtmp(grid);
rtmp = where(wheremask, rones, rzeros);
RealD numSites = sum(rtmp);
RealD numAccepted;
LatticeInteger Accepted(grid);
Accepted = Zero();
LatticeInteger newlyAccepted(grid);
std::vector<LatticeReal> xr(4, grid);
std::vector<LatticeReal> a(4, grid);
LatticeReal d(grid);
d = Zero();
LatticeReal alpha(grid);
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
xi = 2.0 *xi;
alpha = toReal(xi);
do {
// A. Generate two uniformly distributed pseudo-random numbers R and R',
// R'', R''' in the unit interval;
random(pRNG, xr[0]);
random(pRNG, xr[1]);
random(pRNG, xr[2]);
random(pRNG, xr[3]);
// B. Set X = - ln R/alpha, X' = -ln R'/alpha
xr[1] = -log(xr[1]) / alpha;
xr[2] = -log(xr[2]) / alpha;
// C. Set C = cos^2(2piR'')
xr[3] = cos(xr[3] * twopi);
xr[3] = xr[3] * xr[3];
LatticeReal xrsq(grid);
// D. Set A = XC;
// E. Let d = X'+A;
xrsq = xr[2] + xr[1] * xr[3];
d = where(Accepted, d, xr[2] + xr[1] * xr[3]);
// F. If R'''^2 :> 1 - 0.5 d, go back to A;
LatticeReal thresh(grid);
thresh = 1.0 - d * 0.5;
xrsq = xr[0] * xr[0];
LatticeInteger ione(grid);
ione = 1;
LatticeInteger izero(grid);
izero = Zero();
newlyAccepted = where(xrsq < thresh, ione, izero);
Accepted = where(newlyAccepted, newlyAccepted, Accepted);
Accepted = where(wheremask, Accepted, izero);
// FIXME need an iSum for integer to avoid overload on return type??
rtmp = where(Accepted, rones, rzeros);
numAccepted = sum(rtmp);
hit++;
} while ((numAccepted < numSites) && (hit < nheatbath));
// G. Set a0 = 1 - d;
a[0] = Zero();
a[0] = where(wheremask, 1.0 - d, a[0]);
//////////////////////////////////////////
// ii) generate a_i uniform on two sphere radius (1-a0^2)^0.5
//////////////////////////////////////////
LatticeReal a123mag(grid);
a123mag = sqrt(abs(1.0 - a[0] * a[0]));
LatticeReal cos_theta(grid);
LatticeReal sin_theta(grid);
LatticeReal phi(grid);
random(pRNG, phi);
phi = phi * twopi; // uniform in [0,2pi]
random(pRNG, cos_theta);
cos_theta = (cos_theta * 2.0) - 1.0; // uniform in [-1,1]
sin_theta = sqrt(abs(1.0 - cos_theta * cos_theta));
a[1] = a123mag * sin_theta * cos(phi);
a[2] = a123mag * sin_theta * sin(phi);
a[3] = a123mag * cos_theta;
ua = toComplex(a[0]) * ident + toComplex(a[1]) * pauli1 +
toComplex(a[2]) * pauli2 + toComplex(a[3]) * pauli3;
b = 1.0;
b = where(wheremask, uinv * ua, b);
su2Insert(b, V, su2_subgroup);
// mask the assignment back based on Accptance
link = where(Accepted, V * link, link);
//////////////////////////////
// Debug Checks
// SU2 check
LatticeSU2Matrix check(grid); // rotated matrix after hb
u = Zero();
check = ua * adj(ua) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
check = b * adj(b) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
LatticeMatrix Vcheck(grid);
Vcheck = Zero();
Vcheck = where(Accepted, V * adj(V) - 1.0, Vcheck);
// std::cout<<GridLogMessage << "SU3 check " <<norm2(Vcheck)<<std::endl;
assert(norm2(Vcheck) < 1.0e-4);
// Verify the link stays in SU(3)
// std::cout<<GridLogMessage <<"Checking the modified link"<<std::endl;
Vcheck = link * adj(link) - 1.0;
assert(norm2(Vcheck) < 1.0e-4);
/////////////////////////////////
}
static void printGenerators(void) {
for (int gen = 0; gen < AdjointDimension; gen++) {
Matrix ta;
generator(gen, ta);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << ta << std::endl;
}
}
static void testGenerators(void) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
for (int b = 0; b < AdjointDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << " " << std::endl;
assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
// reunitarise??
template <typename LatticeMatrixType>
static void LieRandomize(GridParallelRNG &pRNG, LatticeMatrixType &out, double scale = 1.0)
{
GridBase *grid = out.Grid();
typedef typename LatticeMatrixType::vector_type vector_type;
typedef iSinglet<vector_type> vTComplexType;
typedef Lattice<vTComplexType> LatticeComplexType;
typedef typename GridTypeMapper<typename LatticeMatrixType::vector_object>::scalar_object MatrixType;
LatticeComplexType ca(grid);
LatticeMatrixType lie(grid);
LatticeMatrixType la(grid);
ComplexD ci(0.0, scale);
// ComplexD cone(1.0, 0.0);
MatrixType ta;
lie = Zero();
for (int a = 0; a < AdjointDimension; a++) {
random(pRNG, ca);
ca = (ca + conjugate(ca)) * 0.5;
ca = ca - 0.5;
generator(a, ta);
la = ci * ca * ta;
lie = lie + la; // e^{i la ta}
}
taExp(lie, out);
}
static void GaussianFundamentalLieAlgebraMatrix(GridParallelRNG &pRNG,
LatticeMatrix &out,
Real scale = 1.0) {
GridBase *grid = out.Grid();
LatticeReal ca(grid);
LatticeMatrix la(grid);
Complex ci(0.0, scale);
Matrix ta;
out = Zero();
for (int a = 0; a < AdjointDimension; a++) {
gaussian(pRNG, ca);
generator(a, ta);
la = toComplex(ca) * ta;
out += la;
}
out *= ci;
}
static void FundamentalLieAlgebraMatrix(const LatticeAlgebraVector &h,
LatticeMatrix &out,
Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeMatrix la(grid);
Matrix ta;
out = Zero();
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
la = peekColour(h, a) * timesI(ta) * scale;
out += la;
}
}
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename Gimpl>
static void GaugeTransform(typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
GridBase *grid = Umu.Grid();
conformable(grid,g.Grid());
typename Gimpl::GaugeLinkField U(grid);
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U= PeekIndex<LorentzIndex>(Umu,mu);
U = g*U*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
PokeIndex<LorentzIndex>(Umu,U,mu);
}
}
template<typename Gimpl>
static void GaugeTransform( std::vector<typename Gimpl::GaugeLinkField> &U, typename Gimpl::GaugeLinkField &g){
GridBase *grid = g.Grid();
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U[mu] = g*U[mu]*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
}
}
template<typename Gimpl>
static void RandomGaugeTransform(GridParallelRNG &pRNG, typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
LieRandomize(pRNG,g,1.0);
GaugeTransform<Gimpl>(Umu,g);
}
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
// inverse operation: FundamentalLieAlgebraMatrix
static void projectOnAlgebra(LatticeAlgebraVector &h_out, const LatticeMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
Matrix Ta;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, Ta);
pokeColour(h_out, - 2.0 * (trace(timesI(Ta) * in)) * scale, a);
}
}
template <typename GaugeField>
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
LatticeMatrixType tmp(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
// LieRandomize(pRNG, Umu, 1.0);
// PokeIndex<LorentzIndex>(out, Umu, mu);
gaussian(pRNG,Umu);
tmp = Ta(Umu);
taExp(tmp,Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template<typename GaugeField>
static void TepidConfiguration(GridParallelRNG &pRNG,GaugeField &out){
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
for(int mu=0;mu<Nd;mu++){
LieRandomize(pRNG,Umu,0.01);
PokeIndex<LorentzIndex>(out,Umu,mu);
}
}
template<typename GaugeField>
static void ColdConfiguration(GaugeField &out){
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
Umu=1.0;
for(int mu=0;mu<Nd;mu++){
PokeIndex<LorentzIndex>(out,Umu,mu);
}
}
template<typename GaugeField>
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
ColdConfiguration(out);
}
template<typename LatticeMatrixType>
static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){
out = Ta(in);
}
template <typename LatticeMatrixType>
static void taExp(const LatticeMatrixType &x, LatticeMatrixType &ex) {
typedef typename LatticeMatrixType::scalar_type ComplexType;
LatticeMatrixType xn(x.Grid());
RealD nfac = 1.0;
xn = x;
ex = xn + ComplexType(1.0); // 1+x
// Do a 12th order exponentiation
for (int i = 2; i <= 12; ++i) {
nfac = nfac / RealD(i); // 1/2, 1/2.3 ...
xn = xn * x; // x2, x3,x4....
ex = ex + xn * nfac; // x2/2!, x3/3!....
}
}
};
template<int N>
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > Inverse(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
iScalar<iScalar<iMatrix<ComplexD, N> > > Ui;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
Eigen::MatrixXcd EigenUinv = EigenU.inverse();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
Ui()()(i,j) = EigenUinv(i,j);
}}
pokeLocalSite(Ui,ret_v,lcoor);
});
return ret;
}
// Explicit specialisation for SU(3).
// Explicit specialisation for SU(3).
static void
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
{
GridBase *grid=Umu.Grid();
const int x=0;
const int y=1;
const int z=2;
// Reunitarise
Umu = ProjectOnGroup(Umu);
autoView(Umu_v,Umu,CpuWrite);
thread_for(ss,grid->oSites(),{
auto cm = Umu_v[ss];
cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
Umu_v[ss]=cm;
});
}
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSU3(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
typedef SU<2> SU2;
typedef SU<3> SU3;
typedef SU<4> SU4;
typedef SU<5> SU5;
typedef SU<Nc> FundamentalMatrices;
NAMESPACE_END(Grid);
#endif

580
Grid/qcd/utils/SUn.impl.h Normal file
View File

@ -0,0 +1,580 @@
// This file is #included into the body of the class template definition of
// GaugeGroup. So, image there to be
//
// template <int ncolour, class group_name>
// class GaugeGroup {
//
// around it.
//
// Please note that the unconventional file extension makes sure that it
// doesn't get found by the scripts/filelist during bootstrapping.
private:
template <ONLY_IF_SU>
static int su2subgroups(GroupName::SU) { return (ncolour * (ncolour - 1)) / 2; }
////////////////////////////////////////////////////////////////////////
// There are N^2-1 generators for SU(N).
//
// We take a traceless hermitian generator basis as follows
//
// * Normalisation: trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
// * Off diagonal
// - pairs of rows i1,i2 behaving like pauli matrices signma_x, sigma_y
//
// - there are (Nc-1-i1) slots for i2 on each row [ x 0 x ]
// direct count off each row
//
// - Sum of all pairs is Nc(Nc-1)/2: proof arithmetic series
//
// (Nc-1) + (Nc-2)+... 1 ==> Nc*(Nc-1)/2
// 1+ 2+ + + Nc-1
//
// - There are 2 x Nc (Nc-1)/ 2 of these = Nc^2 - Nc
//
// - We enumerate the row-col pairs.
// - for each row col pair there is a (sigma_x) and a (sigma_y) like
// generator
//
//
// t^a_ij = { in 0.. Nc(Nc-1)/2 -1} => 1/2(delta_{i,i1} delta_{j,i2} +
// delta_{i,i1} delta_{j,i2})
// t^a_ij = { in Nc(Nc-1)/2 ... Nc(Nc-1) - 1} => i/2( delta_{i,i1}
// delta_{j,i2} - i delta_{i,i1} delta_{j,i2})
//
// * Diagonal; must be traceless and normalised
// - Sequence is
// N (1,-1,0,0...)
// N (1, 1,-2,0...)
// N (1, 1, 1,-3,0...)
// N (1, 1, 1, 1,-4,0...)
//
// where 1/2 = N^2 (1+.. m^2)etc.... for the m-th diagonal generator
// NB this gives the famous SU3 result for su2 index 8
//
// N= sqrt(1/2 . 1/6 ) = 1/2 . 1/sqrt(3)
//
// ( 1 )
// ( 1 ) / sqrt(3) /2 = 1/2 lambda_8
// ( -2)
//
////////////////////////////////////////////////////////////////////////
template <class cplx, ONLY_IF_SU>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta, GroupName::SU) {
// map lie index to which type of generator
int diagIndex;
int su2Index;
int sigxy;
int NNm1 = ncolour * (ncolour - 1);
if (lieIndex >= NNm1) {
diagIndex = lieIndex - NNm1;
generatorDiagonal(diagIndex, ta);
return;
}
sigxy = lieIndex & 0x1; // even or odd
su2Index = lieIndex >> 1;
if (sigxy)
generatorSigmaY(su2Index, ta);
else
generatorSigmaX(su2Index, ta);
}
template <class cplx, ONLY_IF_SU>
static void generatorSigmaY(int su2Index, iGroupMatrix<cplx> &ta) {
ta = Zero();
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = 1.0;
ta()()(i2, i1) = 1.0;
ta = ta * 0.5;
}
template <class cplx, ONLY_IF_SU>
static void generatorSigmaX(int su2Index, iGroupMatrix<cplx> &ta) {
ta = Zero();
cplx i(0.0, 1.0);
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta = ta * 0.5;
}
template <class cplx, ONLY_IF_SU>
static void generatorDiagonal(int diagIndex, iGroupMatrix<cplx> &ta) {
// diag ({1, 1, ..., 1}(k-times), -k, 0, 0, ...)
ta = Zero();
int k = diagIndex + 1; // diagIndex starts from 0
for (int i = 0; i <= diagIndex; i++) { // k iterations
ta()()(i, i) = 1.0;
}
ta()()(k, k) = -k; // indexing starts from 0
RealD nrm = 1.0 / std::sqrt(2.0 * k * (k + 1));
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
static void su2SubGroupIndex(int &i1, int &i2, int su2_index, GroupName::SU) {
assert((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (ncolour - 1 - i1); i1++) {
spare = spare - (ncolour - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
public:
//////////////////////////////////////////////////////////////////////////////////////////
// Pull out a subgroup and project on to real coeffs x pauli basis
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx, ONLY_IF_SU>
static void su2Extract(Lattice<iSinglet<vcplx> > &Determinant,
Lattice<iSU2Matrix<vcplx> > &subgroup,
const Lattice<iGroupMatrix<vcplx> > &source,
int su2_index) {
GridBase *grid(source.Grid());
conformable(subgroup, source);
conformable(subgroup, Determinant);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
autoView(subgroup_v, subgroup, AcceleratorWrite);
autoView(source_v, source, AcceleratorRead);
autoView(Determinant_v, Determinant, AcceleratorWrite);
accelerator_for(ss, grid->oSites(), 1, {
subgroup_v[ss]()()(0, 0) = source_v[ss]()()(i0, i0);
subgroup_v[ss]()()(0, 1) = source_v[ss]()()(i0, i1);
subgroup_v[ss]()()(1, 0) = source_v[ss]()()(i1, i0);
subgroup_v[ss]()()(1, 1) = source_v[ss]()()(i1, i1);
iSU2Matrix<vcplx> Sigma = subgroup_v[ss];
Sigma = Sigma - adj(Sigma) + trace(adj(Sigma));
subgroup_v[ss] = Sigma;
// this should be purely real
Determinant_v[ss] =
Sigma()()(0, 0) * Sigma()()(1, 1) - Sigma()()(0, 1) * Sigma()()(1, 0);
});
}
//////////////////////////////////////////////////////////////////////////////////////////
// Set matrix to one and insert a pauli subgroup
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx, ONLY_IF_SU>
static void su2Insert(const Lattice<iSU2Matrix<vcplx> > &subgroup,
Lattice<iGroupMatrix<vcplx> > &dest, int su2_index) {
GridBase *grid(dest.Grid());
conformable(subgroup, dest);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
dest = 1.0; // start out with identity
autoView(dest_v, dest, AcceleratorWrite);
autoView(subgroup_v, subgroup, AcceleratorRead);
accelerator_for(ss, grid->oSites(), 1, {
dest_v[ss]()()(i0, i0) = subgroup_v[ss]()()(0, 0);
dest_v[ss]()()(i0, i1) = subgroup_v[ss]()()(0, 1);
dest_v[ss]()()(i1, i0) = subgroup_v[ss]()()(1, 0);
dest_v[ss]()()(i1, i1) = subgroup_v[ss]()()(1, 1);
});
}
///////////////////////////////////////////////
// Generate e^{ Re Tr Staple Link} dlink
//
// *** Note Staple should be appropriate linear compbination between all
// staples.
// *** If already by beta pass coefficient 1.0.
// *** This routine applies the additional 1/Nc factor that comes after trace
// in action.
//
///////////////////////////////////////////////
template <ONLY_IF_SU>
static void SubGroupHeatBath(
GridSerialRNG &sRNG, GridParallelRNG &pRNG,
RealD beta, // coeff multiplying staple in action (with no 1/Nc)
LatticeMatrix &link,
const LatticeMatrix &barestaple, // multiplied by action coeffs so th
int su2_subgroup, int nheatbath, LatticeInteger &wheremask) {
GridBase *grid = link.Grid();
const RealD twopi = 2.0 * M_PI;
LatticeMatrix staple(grid);
staple = barestaple * (beta / ncolour);
LatticeMatrix V(grid);
V = link * staple;
// Subgroup manipulation in the lie algebra space
LatticeSU2Matrix u(
grid); // Kennedy pendleton "u" real projected normalised Sigma
LatticeSU2Matrix uinv(grid);
LatticeSU2Matrix ua(grid); // a in pauli form
LatticeSU2Matrix b(grid); // rotated matrix after hb
// Some handy constant fields
LatticeComplex ones(grid);
ones = 1.0;
LatticeComplex zeros(grid);
zeros = Zero();
LatticeReal rones(grid);
rones = 1.0;
LatticeReal rzeros(grid);
rzeros = Zero();
LatticeComplex udet(grid); // determinant of real(staple)
LatticeInteger mask_true(grid);
mask_true = 1;
LatticeInteger mask_false(grid);
mask_false = 0;
/*
PLB 156 P393 (1985) (Kennedy and Pendleton)
Note: absorb "beta" into the def of sigma compared to KP paper; staple
passed to this routine has "beta" already multiplied in
Action linear in links h and of form:
beta S = beta Sum_p (1 - 1/Nc Re Tr Plaq )
Writing Sigma = 1/Nc (beta Sigma') where sum over staples is "Sigma' "
beta S = const - beta/Nc Re Tr h Sigma'
= const - Re Tr h Sigma
Decompose h and Sigma into (1, sigma_j) ; h_i real, h^2=1, Sigma_i complex
arbitrary.
Tr h Sigma = h_i Sigma_j Tr (sigma_i sigma_j) = h_i Sigma_j 2 delta_ij
Re Tr h Sigma = 2 h_j Re Sigma_j
Normalised re Sigma_j = xi u_j
With u_j a unit vector and U can be in SU(2);
Re Tr h Sigma = 2 h_j Re Sigma_j = 2 xi (h.u)
4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
xi = sqrt(Det)/2;
Write a= u h in SU(2); a has pauli decomp a_j;
Note: Product b' xi is unvariant because scaling Sigma leaves
normalised vector "u" fixed; Can rescale Sigma so b' = 1.
*/
////////////////////////////////////////////////////////
// Real part of Pauli decomposition
// Note a subgroup can project to zero in cold start
////////////////////////////////////////////////////////
su2Extract(udet, u, V, su2_subgroup);
//////////////////////////////////////////////////////
// Normalising this vector if possible; else identity
//////////////////////////////////////////////////////
LatticeComplex xi(grid);
LatticeSU2Matrix lident(grid);
SU2Matrix ident = Complex(1.0);
SU2Matrix pauli1;
GaugeGroup<2, GroupName::SU>::generator(0, pauli1);
SU2Matrix pauli2;
GaugeGroup<2, GroupName::SU>::generator(1, pauli2);
SU2Matrix pauli3;
GaugeGroup<2, GroupName::SU>::generator(2, pauli3);
pauli1 = timesI(pauli1) * 2.0;
pauli2 = timesI(pauli2) * 2.0;
pauli3 = timesI(pauli3) * 2.0;
LatticeComplex cone(grid);
LatticeReal adet(grid);
adet = abs(toReal(udet));
lident = Complex(1.0);
cone = Complex(1.0);
Real machine_epsilon = 1.0e-7;
u = where(adet > machine_epsilon, u, lident);
udet = where(adet > machine_epsilon, udet, cone);
xi = 0.5 * sqrt(udet); // 4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 0.5 * u * pow(xi, -1.0); // u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
// Debug test for sanity
uinv = adj(u);
b = u * uinv - 1.0;
assert(norm2(b) < 1.0e-4);
/*
Measure: Haar measure dh has d^4a delta(1-|a^2|)
In polars:
da = da0 r^2 sin theta dr dtheta dphi delta( 1 - r^2 -a0^2)
= da0 r^2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r)(sqrt(1-a0^) +
r) )
= da0 r/2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r) )
Action factor Q(h) dh = e^-S[h] dh = e^{ xi Tr uh} dh // beta
enters through xi = e^{2 xi (h.u)} dh = e^{2 xi h0u0}.e^{2 xi h1u1}.e^{2
xi h2u2}.e^{2 xi h3u3} dh
Therefore for each site, take xi for that site
i) generate |a0|<1 with dist
(1-a0^2)^0.5 e^{2 xi a0 } da0
Take alpha = 2 xi = 2 xi [ recall 2 beta/Nc unmod staple norm];
hence 2.0/Nc factor in Chroma ] A. Generate two uniformly distributed
pseudo-random numbers R and R', R'', R''' in the unit interval; B. Set X =
-(ln R)/alpha, X' =-(ln R')/alpha; C. Set C = cos^2(2pi R"), with R"
another uniform random number in [0,1] ; D. Set A = XC; E. Let d = X'+A;
F. If R'''^2 :> 1 - 0.5 d, go back to A;
G. Set a0 = 1 - d;
Note that in step D setting B ~ X - A and using B in place of A in step E
will generate a second independent a 0 value.
*/
/////////////////////////////////////////////////////////
// count the number of sites by picking "1"'s out of hat
/////////////////////////////////////////////////////////
Integer hit = 0;
LatticeReal rtmp(grid);
rtmp = where(wheremask, rones, rzeros);
RealD numSites = sum(rtmp);
RealD numAccepted;
LatticeInteger Accepted(grid);
Accepted = Zero();
LatticeInteger newlyAccepted(grid);
std::vector<LatticeReal> xr(4, grid);
std::vector<LatticeReal> a(4, grid);
LatticeReal d(grid);
d = Zero();
LatticeReal alpha(grid);
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
xi = 2.0 * xi;
alpha = toReal(xi);
do {
// A. Generate two uniformly distributed pseudo-random numbers R and R',
// R'', R''' in the unit interval;
random(pRNG, xr[0]);
random(pRNG, xr[1]);
random(pRNG, xr[2]);
random(pRNG, xr[3]);
// B. Set X = - ln R/alpha, X' = -ln R'/alpha
xr[1] = -log(xr[1]) / alpha;
xr[2] = -log(xr[2]) / alpha;
// C. Set C = cos^2(2piR'')
xr[3] = cos(xr[3] * twopi);
xr[3] = xr[3] * xr[3];
LatticeReal xrsq(grid);
// D. Set A = XC;
// E. Let d = X'+A;
xrsq = xr[2] + xr[1] * xr[3];
d = where(Accepted, d, xr[2] + xr[1] * xr[3]);
// F. If R'''^2 :> 1 - 0.5 d, go back to A;
LatticeReal thresh(grid);
thresh = 1.0 - d * 0.5;
xrsq = xr[0] * xr[0];
LatticeInteger ione(grid);
ione = 1;
LatticeInteger izero(grid);
izero = Zero();
newlyAccepted = where(xrsq < thresh, ione, izero);
Accepted = where(newlyAccepted, newlyAccepted, Accepted);
Accepted = where(wheremask, Accepted, izero);
// FIXME need an iSum for integer to avoid overload on return type??
rtmp = where(Accepted, rones, rzeros);
numAccepted = sum(rtmp);
hit++;
} while ((numAccepted < numSites) && (hit < nheatbath));
// G. Set a0 = 1 - d;
a[0] = Zero();
a[0] = where(wheremask, 1.0 - d, a[0]);
//////////////////////////////////////////
// ii) generate a_i uniform on two sphere radius (1-a0^2)^0.5
//////////////////////////////////////////
LatticeReal a123mag(grid);
a123mag = sqrt(abs(1.0 - a[0] * a[0]));
LatticeReal cos_theta(grid);
LatticeReal sin_theta(grid);
LatticeReal phi(grid);
random(pRNG, phi);
phi = phi * twopi; // uniform in [0,2pi]
random(pRNG, cos_theta);
cos_theta = (cos_theta * 2.0) - 1.0; // uniform in [-1,1]
sin_theta = sqrt(abs(1.0 - cos_theta * cos_theta));
a[1] = a123mag * sin_theta * cos(phi);
a[2] = a123mag * sin_theta * sin(phi);
a[3] = a123mag * cos_theta;
ua = toComplex(a[0]) * ident + toComplex(a[1]) * pauli1 +
toComplex(a[2]) * pauli2 + toComplex(a[3]) * pauli3;
b = 1.0;
b = where(wheremask, uinv * ua, b);
su2Insert(b, V, su2_subgroup);
// mask the assignment back based on Accptance
link = where(Accepted, V * link, link);
//////////////////////////////
// Debug Checks
// SU2 check
LatticeSU2Matrix check(grid); // rotated matrix after hb
u = Zero();
check = ua * adj(ua) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
check = b * adj(b) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
LatticeMatrix Vcheck(grid);
Vcheck = Zero();
Vcheck = where(Accepted, V * adj(V) - 1.0, Vcheck);
// std::cout<<GridLogMessage << "SU3 check " <<norm2(Vcheck)<<std::endl;
assert(norm2(Vcheck) < 1.0e-4);
// Verify the link stays in SU(3)
// std::cout<<GridLogMessage <<"Checking the modified link"<<std::endl;
Vcheck = link * adj(link) - 1.0;
assert(norm2(Vcheck) < 1.0e-4);
/////////////////////////////////
}
template <ONLY_IF_SU>
static void testGenerators(GroupName::SU) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
for (int b = 0; b < AdjointDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << " " << std::endl;
assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
template <int N, class vtype>
static Lattice<iScalar<iScalar<iMatrix<vtype, N> > > >
ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vtype, N> > > > &Umu, GroupName::SU) {
return ProjectOnGroup(Umu);
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r, GroupName::SU) {
return ProjectOnGroup(r);
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r, GroupName::SU) {
return ProjectOnGroup(r);
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg, GroupName::SU) {
return ProjectOnGroup(arg);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out, GroupName::SU) {
out = Ta(in);
}
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename Gimpl>
static void GaugeTransform(typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
GridBase *grid = Umu.Grid();
conformable(grid,g.Grid());
typename Gimpl::GaugeLinkField U(grid);
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U= PeekIndex<LorentzIndex>(Umu,mu);
U = g*U*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
PokeIndex<LorentzIndex>(Umu,U,mu);
}
}
template<typename Gimpl>
static void GaugeTransform( std::vector<typename Gimpl::GaugeLinkField> &U, typename Gimpl::GaugeLinkField &g){
GridBase *grid = g.Grid();
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U[mu] = g*U[mu]*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
}
}
template<typename Gimpl>
static void RandomGaugeTransform(GridParallelRNG &pRNG, typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
LieRandomize(pRNG,g,1.0);
GaugeTransform<Gimpl>(Umu,g);
}

View File

@ -51,6 +51,10 @@ public:
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> > LatticeAdjFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> > LatticeAdjFieldD;
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef Lattice<iScalar<iScalar<iVector<vComplex, Dimension> > > > LatticeAdjVector;
template <class cplx>
@ -58,8 +62,8 @@ public:
// returns i(T_Adj)^index necessary for the projectors
// see definitions above
iAdjTa = Zero();
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > ta(ncolour * ncolour - 1);
typename SU<ncolour>::template iSUnMatrix<cplx> tmp;
Vector<iSUnMatrix<cplx> > ta(ncolour * ncolour - 1);
iSUnMatrix<cplx> tmp;
// FIXME not very efficient to get all the generators everytime
for (int a = 0; a < Dimension; a++) SU<ncolour>::generator(a, ta[a]);
@ -67,8 +71,7 @@ public:
for (int a = 0; a < Dimension; a++) {
tmp = ta[a] * ta[Index] - ta[Index] * ta[a];
for (int b = 0; b < (ncolour * ncolour - 1); b++) {
typename SU<ncolour>::template iSUnMatrix<cplx> tmp1 =
2.0 * tmp * ta[b]; // 2.0 from the normalization
iSUnMatrix<cplx> tmp1 = 2.0 * tmp * ta[b]; // 2.0 from the normalization
Complex iTr = TensorRemove(timesI(trace(tmp1)));
//iAdjTa()()(b, a) = iTr;
iAdjTa()()(a, b) = iTr;
@ -134,8 +137,7 @@ public:
for (int a = 0; a < Dimension; a++) {
generator(a, iTa);
LatticeComplex tmp = real(trace(iTa * in)) * coefficient;
pokeColour(h_out, tmp, a);
pokeColour(h_out, real(trace(iTa * in)) * coefficient, a);
}
}

View File

@ -1,273 +0,0 @@
////////////////////////////////////////////////////////////////////////
//
// * Two index representation generators
//
// * Normalisation for the fundamental generators:
// trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
//
// base for NxN two index (anti-symmetric) matrices
// normalized to 1 (d_ij is the kroenecker delta)
//
// (e^(ij)_{kl} = 1 / sqrt(2) (d_ik d_jl +/- d_jk d_il)
//
// Then the generators are written as
//
// (iT_a)^(ij)(lk) = i * ( tr[e^(ij)^dag e^(lk) T^trasp_a] +
// tr[e^(lk)e^(ij)^dag T_a] ) //
//
//
////////////////////////////////////////////////////////////////////////
// Authors: David Preti, Guido Cossu
#ifndef QCD_UTIL_SUN2INDEX_H
#define QCD_UTIL_SUN2INDEX_H
NAMESPACE_BEGIN(Grid);
enum TwoIndexSymmetry { Symmetric = 1, AntiSymmetric = -1 };
inline Real delta(int a, int b) { return (a == b) ? 1.0 : 0.0; }
template <int ncolour, TwoIndexSymmetry S>
class SU_TwoIndex : public SU<ncolour> {
public:
static const int Dimension = ncolour * (ncolour + S) / 2;
static const int NumGenerators = SU<ncolour>::AdjointDimension;
template <typename vtype>
using iSUnTwoIndexMatrix = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
typedef iSUnTwoIndexMatrix<Complex> TIMatrix;
typedef iSUnTwoIndexMatrix<ComplexF> TIMatrixF;
typedef iSUnTwoIndexMatrix<ComplexD> TIMatrixD;
typedef iSUnTwoIndexMatrix<vComplex> vTIMatrix;
typedef iSUnTwoIndexMatrix<vComplexF> vTIMatrixF;
typedef iSUnTwoIndexMatrix<vComplexD> vTIMatrixD;
typedef Lattice<vTIMatrix> LatticeTwoIndexMatrix;
typedef Lattice<vTIMatrixF> LatticeTwoIndexMatrixF;
typedef Lattice<vTIMatrixD> LatticeTwoIndexMatrixD;
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
LatticeTwoIndexField;
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
LatticeTwoIndexFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
LatticeTwoIndexFieldD;
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef iSUnMatrix<Complex> Matrix;
typedef iSUnMatrix<ComplexF> MatrixF;
typedef iSUnMatrix<ComplexD> MatrixD;
template <class cplx>
static void base(int Index, iSUnMatrix<cplx> &eij) {
// returns (e)^(ij)_{kl} necessary for change of base U_F -> U_R
assert(Index < NumGenerators);
eij = Zero();
// for the linearisation of the 2 indexes
static int a[ncolour * (ncolour - 1) / 2][2]; // store the a <-> i,j
static bool filled = false;
if (!filled) {
int counter = 0;
for (int i = 1; i < ncolour; i++) {
for (int j = 0; j < i; j++) {
a[counter][0] = i;
a[counter][1] = j;
counter++;
}
}
filled = true;
}
if (Index < ncolour * (ncolour - 1) / 2) {
baseOffDiagonal(a[Index][0], a[Index][1], eij);
} else {
baseDiagonal(Index, eij);
}
}
template <class cplx>
static void baseDiagonal(int Index, iSUnMatrix<cplx> &eij) {
eij = Zero();
eij()()(Index - ncolour * (ncolour - 1) / 2,
Index - ncolour * (ncolour - 1) / 2) = 1.0;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iSUnMatrix<cplx> &eij) {
eij = Zero();
for (int k = 0; k < ncolour; k++)
for (int l = 0; l < ncolour; l++)
eij()()(l, k) = delta(i, k) * delta(j, l) +
S * delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
static void printBase(void) {
for (int gen = 0; gen < Dimension; gen++) {
Matrix tmp;
base(gen, tmp);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << tmp << std::endl;
}
}
template <class cplx>
static void generator(int Index, iSUnTwoIndexMatrix<cplx> &i2indTa) {
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > ta(
ncolour * ncolour - 1);
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > eij(Dimension);
typename SU<ncolour>::template iSUnMatrix<cplx> tmp;
i2indTa = Zero();
for (int a = 0; a < ncolour * ncolour - 1; a++)
SU<ncolour>::generator(a, ta[a]);
for (int a = 0; a < Dimension; a++) base(a, eij[a]);
for (int a = 0; a < Dimension; a++) {
tmp = transpose(ta[Index]) * adj(eij[a]) + adj(eij[a]) * ta[Index];
for (int b = 0; b < Dimension; b++) {
typename SU<ncolour>::template iSUnMatrix<cplx> tmp1 =
tmp * eij[b];
Complex iTr = TensorRemove(timesI(trace(tmp1)));
i2indTa()()(a, b) = iTr;
}
}
}
static void printGenerators(void) {
for (int gen = 0; gen < ncolour * ncolour - 1; gen++) {
TIMatrix i2indTa;
generator(gen, i2indTa);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << i2indTa << std::endl;
}
}
static void testGenerators(void) {
TIMatrix i2indTa, i2indTb;
std::cout << GridLogMessage << "2IndexRep - Checking if traceless"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(trace(i2indTa)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "2IndexRep - Checking if antihermitean"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(adj(i2indTa) + i2indTa) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage
<< "2IndexRep - Checking Tr[Ta*Tb]=delta(a,b)*(N +- 2)/2"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
for (int b = 0; b < ncolour * ncolour - 1; b++) {
generator(a, i2indTa);
generator(b, i2indTb);
// generator returns iTa, so we need a minus sign here
Complex Tr = -TensorRemove(trace(i2indTa * i2indTb));
std::cout << GridLogMessage << "a=" << a << "b=" << b << "Tr=" << Tr
<< std::endl;
}
}
std::cout << GridLogMessage << std::endl;
}
static void TwoIndexLieAlgebraMatrix(
const typename SU<ncolour>::LatticeAlgebraVector &h,
LatticeTwoIndexMatrix &out, Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeTwoIndexMatrix la(grid);
TIMatrix i2indTa;
out = Zero();
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
la = peekColour(h, a) * i2indTa;
out += la;
}
out *= scale;
}
// Projects the algebra components
// of a lattice matrix ( of dimension ncol*ncol -1 )
static void projectOnAlgebra(
typename SU<ncolour>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
TIMatrix i2indTa;
Real coefficient = -2.0 / (ncolour + 2 * S) * scale;
// 2/(Nc +/- 2) for the normalization of the trace in the two index rep
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
auto tmp = real(trace(i2indTa * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
// a projector that keeps the generators stored to avoid the overhead of
// recomputing them
static void projector(typename SU<ncolour>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
// to store the generators
static std::vector<TIMatrix> i2indTa(ncolour * ncolour -1);
h_out = Zero();
static bool precalculated = false;
if (!precalculated) {
precalculated = true;
for (int a = 0; a < ncolour * ncolour - 1; a++) generator(a, i2indTa[a]);
}
Real coefficient =
-2.0 / (ncolour + 2 * S) * scale; // 2/(Nc +/- 2) for the normalization
// of the trace in the two index rep
for (int a = 0; a < ncolour * ncolour - 1; a++) {
auto tmp = real(trace(i2indTa[a] * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
};
// Some useful type names
typedef SU_TwoIndex<Nc, Symmetric> TwoIndexSymmMatrices;
typedef SU_TwoIndex<Nc, AntiSymmetric> TwoIndexAntiSymmMatrices;
typedef SU_TwoIndex<2, Symmetric> SU2TwoIndexSymm;
typedef SU_TwoIndex<3, Symmetric> SU3TwoIndexSymm;
typedef SU_TwoIndex<4, Symmetric> SU4TwoIndexSymm;
typedef SU_TwoIndex<5, Symmetric> SU5TwoIndexSymm;
typedef SU_TwoIndex<2, AntiSymmetric> SU2TwoIndexAntiSymm;
typedef SU_TwoIndex<3, AntiSymmetric> SU3TwoIndexAntiSymm;
typedef SU_TwoIndex<4, AntiSymmetric> SU4TwoIndexAntiSymm;
typedef SU_TwoIndex<5, AntiSymmetric> SU5TwoIndexAntiSymm;
NAMESPACE_END(Grid);
#endif

317
Grid/qcd/utils/Sp2n.impl.h Normal file
View File

@ -0,0 +1,317 @@
// This file is #included into the body of the class template definition of
// GaugeGroup. So, image there to be
//
// template <int ncolour, class group_name>
// class GaugeGroup {
//
// around it.
//
// Please note that the unconventional file extension makes sure that it
// doesn't get found by the scripts/filelist during bootstrapping.
private:
template <ONLY_IF_Sp>
static int su2subgroups(GroupName::Sp) { return (ncolour/2 * (ncolour/2 - 1)) / 2; }
// Sp(2N) has N(2N+1) = 2N^2+N generators
//
// normalise the generators such that
// Trace ( Ta Tb) = 1/2 delta_ab
//
// N generators in the cartan, 2N^2 off
// off diagonal:
// there are 6 types named a,b,c,d and w,z
// abcd are N(N-1)/2 each while wz are N each
template <class cplx, ONLY_IF_Sp>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta, GroupName::Sp) {
// map lie index into type of generators: diagonal, abcd type, wz type
const int nsp = ncolour/2;
int diagIndex;
int aIndex, bIndex, cIndex, dIndex;
int wIndex, zIndex; // a,b,c,d are N(N-1)/2 and w,z are N
const int mod = nsp * (nsp - 1) * 0.5;
const int offdiag =
2 * nsp * nsp; // number of generators not in the cartan subalgebra
const int wmod = 4 * mod;
const int zmod = wmod + nsp;
if (lieIndex >= offdiag) {
diagIndex = lieIndex - offdiag; // 0, ... ,N-1
// std::cout << GridLogMessage << "diag type " << std::endl;
generatorDiagtype(diagIndex, ta);
return;
}
if ((lieIndex >= wmod) && (lieIndex < zmod)) {
// std::cout << GridLogMessage << "w type " << std::endl;
wIndex = lieIndex - wmod; // 0, ... ,N-1
generatorWtype(wIndex, ta);
return;
}
if ((lieIndex >= zmod) && (lieIndex < offdiag)) {
// std::cout << GridLogMessage << "z type " << std::endl;
// std::cout << GridLogMessage << "lie index " << lieIndex << std::endl;
// std::cout << GridLogMessage << "z mod " << zmod << std::endl;
zIndex = lieIndex - zmod; // 0, ... ,N-1
generatorZtype(zIndex, ta);
return;
}
if (lieIndex < mod) { // atype 0, ... , N(N-1)/2=mod
// std::cout << GridLogMessage << "a type " << std::endl;
aIndex = lieIndex;
// std::cout << GridLogMessage << "a indx " << aIndex << std::endl;
generatorAtype(aIndex, ta);
return;
}
if ((lieIndex >= mod) && lieIndex < 2 * mod) { // btype mod, ... , 2mod-1
// std::cout << GridLogMessage << "b type " << std::endl;
bIndex = lieIndex - mod;
generatorBtype(bIndex, ta);
return;
}
if ((lieIndex >= 2 * mod) &&
lieIndex < 3 * mod) { // ctype 2mod, ... , 3mod-1
// std::cout << GridLogMessage << "c type " << std::endl;
cIndex = lieIndex - 2 * mod;
generatorCtype(cIndex, ta);
return;
}
if ((lieIndex >= 3 * mod) &&
lieIndex < wmod) { // ctype 3mod, ... , 4mod-1 = wmod-1
// std::cout << GridLogMessage << "d type " << std::endl;
dIndex = lieIndex - 3 * mod;
generatorDtype(dIndex, ta);
return;
}
} // end of generator
template <class cplx, ONLY_IF_Sp>
static void generatorDiagtype(int diagIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i) = - ta(i+N,i+N) = 1/2 for each i index of the cartan subalgebra
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2;
ta()()(diagIndex, diagIndex) = nrm;
ta()()(diagIndex + nsp, diagIndex + nsp) = -nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorAtype(int aIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j) = ta(j,i) = -ta(i+N,j+N) = -ta(j+N,i+N) = 1 / 2 sqrt(2)
// with i<j and i=0,...,N-2
// follows that j=i+1, ... , N
int i1, i2;
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, aIndex);
ta()()(i1, i2) = 1;
ta()()(i2, i1) = 1;
ta()()(i1 + nsp, i2 + nsp) = -1;
ta()()(i2 + nsp, i1 + nsp) = -1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorBtype(int bIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j) = -ta(j,i) = ta(i+N,j+N) = -ta(j+N,i+N) = i / 1/ 2 sqrt(2)
// with i<j and i=0,...,N-2
// follows that j=i+1, ... , N-1
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
cplx i(0.0, 1.0);
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, bIndex);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta()()(i1 + nsp, i2 + nsp) = i;
ta()()(i2 + nsp, i1 + nsp) = -i;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorCtype(int cIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j+N) = ta(j,i+N) = ta(i+N,j) = ta(j+N,i) = 1 / 2 sqrt(2)
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, cIndex);
ta()()(i1, i2 + nsp) = 1;
ta()()(i2, i1 + nsp) = 1;
ta()()(i1 + nsp, i2) = 1;
ta()()(i2 + nsp, i1) = 1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorDtype(int dIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j+N) = ta(j,i+N) = -ta(i+N,j) = -ta(j+N,i) = i / 2 sqrt(2)
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
cplx i(0.0, 1.0);
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, dIndex);
ta()()(i1, i2 + nsp) = i;
ta()()(i2, i1 + nsp) = i;
ta()()(i1 + nsp, i2) = -i;
ta()()(i2 + nsp, i1) = -i;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorWtype(int wIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i+N) = ta(i+N,i) = 1/2
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2; // check
ta()()(wIndex, wIndex + nsp) = 1;
ta()()(wIndex + nsp, wIndex) = 1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorZtype(int zIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i+N) = - ta(i+N,i) = i/2
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2; // check
cplx i(0.0, 1.0);
ta()()(zIndex, zIndex + nsp) = i;
ta()()(zIndex + nsp, zIndex) = -i;
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
template <ONLY_IF_Sp>
static void su2SubGroupIndex(int &i1, int &i2, int su2_index, GroupName::Sp) {
const int nsp=ncolour/2;
assert((su2_index >= 0) && (su2_index < (nsp * (nsp - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (nsp - 1 - i1); i1++) {
spare = spare - (nsp - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
static void testGenerators(GroupName::Sp) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab "
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
for (int b = 0; b < AlgebraDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << std::endl;
assert(abs(tr) < 1.0e-6);
}
}
template <int N>
static Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > >
ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu, GroupName::Sp) {
return ProjectOnSpGroup(Umu);
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r, GroupName::Sp) {
return ProjectOnSpGroup(r);
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r, GroupName::Sp) {
return ProjectOnSpGroup(r);
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg, GroupName::Sp) {
return ProjectOnSpGroup(arg);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out, GroupName::Sp) {
out = SpTa(in);
}
public:
template <ONLY_IF_Sp>
static void Omega(LatticeColourMatrixD &in) {
const int nsp=ncolour/2;
LatticeColourMatrixD OmegaLatt(in.Grid());
LatticeColourMatrixD identity(in.Grid());
ColourMatrix Omega;
OmegaLatt = Zero();
Omega = Zero();
identity = 1.;
for (int i = 0; i < nsp; i++) {
Omega()()(i, nsp + i) = 1.;
Omega()()(nsp + i, i) = -1;
}
OmegaLatt = OmegaLatt + (identity * Omega);
in = OmegaLatt;
}
template <ONLY_IF_Sp, class vtype, int N>
static void Omega(iScalar<iScalar<iMatrix<vtype, N> > > &in) {
const int nsp=ncolour/2;
iScalar<iScalar<iMatrix<vtype, N> > > Omega;
Omega = Zero();
for (int i = 0; i < nsp; i++) {
Omega()()(i, nsp + i) = 1.;
Omega()()(nsp + i, i) = -1;
}
in = Omega;
}

View File

@ -8,9 +8,9 @@
#include <Grid/qcd/utils/ScalarObjs.h>
// Include representations
#include <Grid/qcd/utils/SUn.h>
#include <Grid/qcd/utils/GaugeGroup.h>
#include <Grid/qcd/utils/SUnAdjoint.h>
#include <Grid/qcd/utils/SUnTwoIndex.h>
#include <Grid/qcd/utils/GaugeGroupTwoIndex.h>
// All-to-all contraction kernels that touch the
// internal lattice structure

View File

@ -290,7 +290,7 @@ public:
}
*/
//////////////////////////////////////////////////
// the sum over all staples on each site
// the sum over all nu-oriented staples for nu != mu on each site
//////////////////////////////////////////////////
static void Staple(GaugeMat &staple, const GaugeLorentz &Umu, int mu) {
@ -300,6 +300,10 @@ public:
for (int d = 0; d < Nd; d++) {
U[d] = PeekIndex<LorentzIndex>(Umu, d);
}
Staple(staple, U, mu);
}
static void Staple(GaugeMat &staple, const std::vector<GaugeMat> &U, int mu) {
staple = Zero();
for (int nu = 0; nu < Nd; nu++) {
@ -335,6 +339,203 @@ public:
}
}
/////////////
//Staples for each direction mu, summed over nu != mu
//staple: output staples for each mu (Nd)
//U: link array (Nd)
/////////////
static void StapleAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U) {
assert(staple.size() == Nd); assert(U.size() == Nd);
for(int mu=0;mu<Nd;mu++) Staple(staple[mu], U, mu);
}
//A workspace class allowing reuse of the stencil
class WilsonLoopPaddedStencilWorkspace{
std::unique_ptr<GeneralLocalStencil> stencil;
size_t nshift;
void generateStencil(GridBase* padded_grid){
double t0 = usecond();
//Generate shift arrays
std::vector<Coordinate> shifts = this->getShifts();
nshift = shifts.size();
double t1 = usecond();
//Generate local stencil
stencil.reset(new GeneralLocalStencil(padded_grid,shifts));
double t2 = usecond();
std::cout << GridLogPerformance << " WilsonLoopPaddedWorkspace timings: coord:" << (t1-t0)/1000 << "ms, stencil:" << (t2-t1)/1000 << "ms" << std::endl;
}
public:
//Get the stencil. If not already generated, or if generated using a different Grid than in PaddedCell, it will be created on-the-fly
const GeneralLocalStencil & getStencil(const PaddedCell &pcell){
assert(pcell.depth >= this->paddingDepth());
if(!stencil || stencil->Grid() != (GridBase*)pcell.grids.back() ) generateStencil((GridBase*)pcell.grids.back());
return *stencil;
}
size_t Nshift() const{ return nshift; }
virtual std::vector<Coordinate> getShifts() const = 0;
virtual int paddingDepth() const = 0; //padding depth required
virtual ~WilsonLoopPaddedStencilWorkspace(){}
};
//This workspace allows the sharing of a common PaddedCell object between multiple stencil workspaces
class WilsonLoopPaddedWorkspace{
std::vector<WilsonLoopPaddedStencilWorkspace*> stencil_wk;
std::unique_ptr<PaddedCell> pcell;
void generatePcell(GridBase* unpadded_grid){
assert(stencil_wk.size());
int max_depth = 0;
for(auto const &s : stencil_wk) max_depth=std::max(max_depth, s->paddingDepth());
pcell.reset(new PaddedCell(max_depth, dynamic_cast<GridCartesian*>(unpadded_grid)));
}
public:
//Add a stencil definition. This should be done before the first call to retrieve a stencil object.
//Takes ownership of the pointer
void addStencil(WilsonLoopPaddedStencilWorkspace *stencil){
assert(!pcell);
stencil_wk.push_back(stencil);
}
const GeneralLocalStencil & getStencil(const size_t stencil_idx, GridBase* unpadded_grid){
if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
return stencil_wk[stencil_idx]->getStencil(*pcell);
}
const PaddedCell & getPaddedCell(GridBase* unpadded_grid){
if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
return *pcell;
}
~WilsonLoopPaddedWorkspace(){
for(auto &s : stencil_wk) delete s;
}
};
//A workspace class allowing reuse of the stencil
class StaplePaddedAllWorkspace: public WilsonLoopPaddedStencilWorkspace{
public:
std::vector<Coordinate> getShifts() const override{
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
Coordinate shift_mnu(Nd,0); shift_mnu[nu]=-1;
Coordinate shift_mnu_pmu(Nd,0); shift_mnu_pmu[nu]=-1; shift_mnu_pmu[mu]=1;
//U_nu(x+mu)U^dag_mu(x+nu) U^dag_nu(x)
shifts.push_back(shift_0);
shifts.push_back(shift_nu);
shifts.push_back(shift_mu);
//U_nu^dag(x-nu+mu) U_mu^dag(x-nu) U_nu(x-nu)
shifts.push_back(shift_mnu);
shifts.push_back(shift_mnu);
shifts.push_back(shift_mnu_pmu);
}
}
}
return shifts;
}
int paddingDepth() const override{ return 1; }
};
//Padded cell implementation of the staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
StaplePaddedAllWorkspace wk;
StaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
}
//Padded cell implementation of the staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
//gStencil: the precomputed generalized local stencil for the staple
static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil)
{
double t0 = usecond();
assert(U_padded.size() == Nd); assert(staple.size() == Nd);
assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
assert(Cell.depth >= 1);
GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
int shift_mu_off = gStencil._npoints/Nd;
//Open views to padded gauge links and keep open over mu loop
typedef LatticeView<typename GaugeMat::vector_object> GaugeViewType;
size_t vsize = Nd*sizeof(GaugeViewType);
GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
GaugeMat gStaple(ggrid);
int outer_off = 0;
for(int mu=0;mu<Nd;mu++){
{ //view scope
autoView( gStaple_v , gStaple, AcceleratorWrite);
auto gStencil_v = gStencil.View();
accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), {
decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
stencil_ss = Zero();
int off = outer_off;
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
GeneralStencilEntry const* e = gStencil_v.GetEntry(off++,ss);
auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
auto U2 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U2 * U1 * U0;
e = gStencil_v.GetEntry(off++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(off++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U2 * U1 * U0;
}
}
coalescedWrite(gStaple_v[ss],stencil_ss);
}
);
} //ensure views are all closed!
staple[mu] = Cell.Extract(gStaple);
outer_off += shift_mu_off;
}//mu loop
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
free(Ug_dirs_v_host);
acceleratorFreeDevice(Ug_dirs_v);
double t1=usecond();
std::cout << GridLogPerformance << "StaplePaddedAll timing:" << (t1-t0)/1000 << "ms" << std::endl;
}
//////////////////////////////////////////////////
// the sum over all staples on each site in direction mu,nu, upper part
//////////////////////////////////////////////////
@ -707,18 +908,14 @@ public:
// the sum over all staples on each site
//////////////////////////////////////////////////
static void RectStapleDouble(GaugeMat &U2, const GaugeMat &U, int mu) {
U2 = U * Cshift(U, mu, 1);
U2 = U * Gimpl::CshiftLink(U, mu, 1);
}
////////////////////////////////////////////////////////////////////////////
// Hop by two optimisation strategy does not work nicely with Gparity. (could
// do,
// but need to track two deep where cross boundary and apply a conjugation).
// Must differentiate this in Gimpl, and use Gimpl::isPeriodicGaugeField to do
// so .
// Hop by two optimisation strategy. Use RectStapleDouble to obtain 'U2'
////////////////////////////////////////////////////////////////////////////
static void RectStapleOptimised(GaugeMat &Stap, std::vector<GaugeMat> &U2,
std::vector<GaugeMat> &U, int mu) {
static void RectStapleOptimised(GaugeMat &Stap, const std::vector<GaugeMat> &U2,
const std::vector<GaugeMat> &U, int mu) {
Stap = Zero();
@ -732,9 +929,9 @@ public:
// Up staple ___ ___
// | |
tmp = Cshift(adj(U[nu]), nu, -1);
tmp = Gimpl::CshiftLink(adj(U[nu]), nu, -1);
tmp = adj(U2[mu]) * tmp;
tmp = Cshift(tmp, mu, -2);
tmp = Gimpl::CshiftLink(tmp, mu, -2);
Staple2x1 = Gimpl::CovShiftForward(U[nu], nu, tmp);
@ -742,14 +939,14 @@ public:
// |___ ___|
//
tmp = adj(U2[mu]) * U[nu];
Staple2x1 += Gimpl::CovShiftBackward(U[nu], nu, Cshift(tmp, mu, -2));
Staple2x1 += Gimpl::CovShiftBackward(U[nu], nu, Gimpl::CshiftLink(tmp, mu, -2));
// ___ ___
// | ___|
// |___ ___|
//
Stap += Cshift(Gimpl::CovShiftForward(U[mu], mu, Staple2x1), mu, 1);
Stap += Gimpl::CshiftLink(Gimpl::CovShiftForward(U[mu], mu, Staple2x1), mu, 1);
// ___ ___
// |___ |
@ -758,7 +955,7 @@ public:
// tmp= Staple2x1* Cshift(U[mu],mu,-2);
// Stap+= Cshift(tmp,mu,1) ;
Stap += Cshift(Staple2x1, mu, 1) * Cshift(U[mu], mu, -1);
Stap += Gimpl::CshiftLink(Staple2x1, mu, 1) * Gimpl::CshiftLink(U[mu], mu, -1);
;
// --
@ -766,10 +963,10 @@ public:
//
// | |
tmp = Cshift(adj(U2[nu]), nu, -2);
tmp = Gimpl::CshiftLink(adj(U2[nu]), nu, -2);
tmp = Gimpl::CovShiftBackward(U[mu], mu, tmp);
tmp = U2[nu] * Cshift(tmp, nu, 2);
Stap += Cshift(tmp, mu, 1);
tmp = U2[nu] * Gimpl::CshiftLink(tmp, nu, 2);
Stap += Gimpl::CshiftLink(tmp, mu, 1);
// | |
//
@ -778,25 +975,12 @@ public:
tmp = Gimpl::CovShiftBackward(U[mu], mu, U2[nu]);
tmp = adj(U2[nu]) * tmp;
tmp = Cshift(tmp, nu, -2);
Stap += Cshift(tmp, mu, 1);
tmp = Gimpl::CshiftLink(tmp, nu, -2);
Stap += Gimpl::CshiftLink(tmp, mu, 1);
}
}
}
static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu) {
RectStapleUnoptimised(Stap, Umu, mu);
}
static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap,
std::vector<GaugeMat> &U2, std::vector<GaugeMat> &U,
int mu) {
if (Gimpl::isPeriodicGaugeField()) {
RectStapleOptimised(Stap, U2, U, mu);
} else {
RectStapleUnoptimised(Stap, Umu, mu);
}
}
static void RectStapleUnoptimised(GaugeMat &Stap, const GaugeLorentz &Umu,
int mu) {
GridBase *grid = Umu.Grid();
@ -895,6 +1079,288 @@ public:
}
}
static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu) {
RectStapleUnoptimised(Stap, Umu, mu);
}
static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap,
std::vector<GaugeMat> &U2, std::vector<GaugeMat> &U,
int mu) {
RectStapleOptimised(Stap, U2, U, mu);
}
//////////////////////////////////////////////////////
//Compute the rectangular staples for all orientations
//Stap : Array of staples (Nd)
//U: Gauge links in each direction (Nd)
/////////////////////////////////////////////////////
static void RectStapleAll(std::vector<GaugeMat> &Stap, const std::vector<GaugeMat> &U){
assert(Stap.size() == Nd); assert(U.size() == Nd);
std::vector<GaugeMat> U2(Nd,U[0].Grid());
for(int mu=0;mu<Nd;mu++) RectStapleDouble(U2[mu], U[mu], mu);
for(int mu=0;mu<Nd;mu++) RectStapleOptimised(Stap[mu], U2, U, mu);
}
//A workspace class allowing reuse of the stencil
class RectStaplePaddedAllWorkspace: public WilsonLoopPaddedStencilWorkspace{
public:
std::vector<Coordinate> getShifts() const override{
std::vector<Coordinate> shifts;
for (int mu = 0; mu < Nd; mu++){
for (int nu = 0; nu < Nd; nu++) {
if (nu != mu) {
auto genShift = [&](int mushift,int nushift){
Coordinate out(Nd,0); out[mu]=mushift; out[nu]=nushift; return out;
};
//tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
shifts.push_back(genShift(0,0));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(+1,+1));
shifts.push_back(genShift(+2,0));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(+1,-1));
shifts.push_back(genShift(+2,-1));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,-1));
shifts.push_back(genShift(-1,-1));
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(+1,-1));
//tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,+1));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(+1,0));
//tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
shifts.push_back(genShift(0,0));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(0,+2));
shifts.push_back(genShift(+1,+1));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(0,-2));
shifts.push_back(genShift(0,-2));
shifts.push_back(genShift(+1,-2));
shifts.push_back(genShift(+1,-1));
}
}
}
return shifts;
}
int paddingDepth() const override{ return 2; }
};
//Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
RectStaplePaddedAllWorkspace wk;
RectStaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
}
//Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
//gStencil: the stencil
static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil) {
double t0 = usecond();
assert(U_padded.size() == Nd); assert(staple.size() == Nd);
assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
assert(Cell.depth >= 2);
GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
size_t nshift = gStencil._npoints;
int mu_off_delta = nshift / Nd;
//Open views to padded gauge links and keep open over mu loop
typedef LatticeView<typename GaugeMat::vector_object> GaugeViewType;
size_t vsize = Nd*sizeof(GaugeViewType);
GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
GaugeMat gStaple(ggrid); //temp staple object on padded grid
int offset = 0;
for(int mu=0; mu<Nd; mu++){
{ //view scope
autoView( gStaple_v , gStaple, AcceleratorWrite);
auto gStencil_v = gStencil.View();
accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), {
decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
stencil_ss = Zero();
int s=offset;
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
//tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
GeneralStencilEntry const* e = gStencil_v.GetEntry(s++,ss);
auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
auto U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
e = gStencil_v.GetEntry(s++,ss);
U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
}
}
coalescedWrite(gStaple_v[ss],stencil_ss);
}
);
offset += mu_off_delta;
}//kernel/view scope
staple[mu] = Cell.Extract(gStaple);
}//mu loop
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
free(Ug_dirs_v_host);
acceleratorFreeDevice(Ug_dirs_v);
double t1 = usecond();
std::cout << GridLogPerformance << "RectStaplePaddedAll timings:" << (t1-t0)/1000 << "ms" << std::endl;
}
//A workspace for reusing the PaddedCell and GeneralLocalStencil objects
class StapleAndRectStapleAllWorkspace: public WilsonLoopPaddedWorkspace{
public:
StapleAndRectStapleAllWorkspace(){
this->addStencil(new StaplePaddedAllWorkspace);
this->addStencil(new RectStaplePaddedAllWorkspace);
}
};
//////////////////////////////////////////////////////
//Compute the 1x1 and 1x2 staples for all orientations
//Stap : Array of staples (Nd)
//RectStap: Array of rectangular staples (Nd)
//U: Gauge links in each direction (Nd)
/////////////////////////////////////////////////////
static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U){
StapleAndRectStapleAllWorkspace wk;
StapleAndRectStapleAll(Stap,RectStap,U,wk);
}
//////////////////////////////////////////////////////
//Compute the 1x1 and 1x2 staples for all orientations
//Stap : Array of staples (Nd)
//RectStap: Array of rectangular staples (Nd)
//U: Gauge links in each direction (Nd)
//wk: a workspace containing stored PaddedCell and GeneralLocalStencil objects to maximize reuse
/////////////////////////////////////////////////////
static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U, StapleAndRectStapleAllWorkspace &wk){
#if 0
StapleAll(Stap, U);
RectStapleAll(RectStap, U);
#else
double t0 = usecond();
GridCartesian* unpadded_grid = dynamic_cast<GridCartesian*>(U[0].Grid());
const PaddedCell &Ghost = wk.getPaddedCell(unpadded_grid);
CshiftImplGauge<Gimpl> cshift_impl;
std::vector<GaugeMat> U_pad(Nd, Ghost.grids.back());
for(int mu=0;mu<Nd;mu++) U_pad[mu] = Ghost.Exchange(U[mu], cshift_impl);
double t1 = usecond();
StaplePaddedAll(Stap, U_pad, Ghost, wk.getStencil(0,unpadded_grid) );
double t2 = usecond();
RectStaplePaddedAll(RectStap, U_pad, Ghost, wk.getStencil(1,unpadded_grid));
double t3 = usecond();
std::cout << GridLogPerformance << "StapleAndRectStapleAll timings: pad:" << (t1-t0)/1000 << "ms, staple:" << (t2-t1)/1000 << "ms, rect-staple:" << (t3-t2)/1000 << "ms" << std::endl;
#endif
}
//////////////////////////////////////////////////
// Wilson loop of size (R1, R2), oriented in mu,nu plane
//////////////////////////////////////////////////

View File

@ -1133,4 +1133,13 @@ static_assert(sizeof(SIMD_Ftype) == sizeof(SIMD_Itype), "SIMD vector lengths inc
NAMESPACE_END(Grid);
#ifdef GRID_SYCL
template<> struct sycl::is_device_copyable<Grid::vComplexF> : public std::true_type {};
template<> struct sycl::is_device_copyable<Grid::vComplexD> : public std::true_type {};
template<> struct sycl::is_device_copyable<Grid::vRealF > : public std::true_type {};
template<> struct sycl::is_device_copyable<Grid::vRealD > : public std::true_type {};
template<> struct sycl::is_device_copyable<Grid::vInteger > : public std::true_type {};
#endif
#endif

View File

@ -218,6 +218,10 @@ public:
// -------------------------------------------------
// misc
// -------------------------------------------------
void discardhi(uint64_t z) {
_s[3] += z;
encrypt_counter();
}
// req: 26.5.1.4 Random number engine requirements, p.908 table 117, row 9
// Advances es state ei to ei+z by any means equivalent to z
@ -387,4 +391,4 @@ private:
#undef MIXK
#undef MIX2
#endif
#endif

View File

@ -43,7 +43,7 @@ class GeneralLocalStencilView {
int _npoints; // Move to template param?
GeneralStencilEntry* _entries_p;
accelerator_inline GeneralStencilEntry * GetEntry(int point,int osite) {
accelerator_inline GeneralStencilEntry * GetEntry(int point,int osite) const {
return & this->_entries_p[point+this->_npoints*osite];
}
@ -79,63 +79,113 @@ public:
this->_entries.resize(npoints* osites);
this->_entries_p = &_entries[0];
thread_for(site, osites, {
Coordinate Coor;
Coordinate NbrCoor;
Coordinate Coor;
Coordinate NbrCoor;
for(Integer site=0;site<osites;site++){
for(Integer ii=0;ii<npoints;ii++){
Integer lex = site*npoints+ii;
GeneralStencilEntry SE;
////////////////////////////////////////////////
// Outer index of neighbour Offset calculation
////////////////////////////////////////////////
grid->oCoorFromOindex(Coor,site);
for(int d=0;d<Coor.size();d++){
int rd = grid->_rdimensions[d];
NbrCoor[d] = (Coor[d] + shifts[ii][d] + rd )%rd;
for(Integer ii=0;ii<npoints;ii++){
Integer lex = site*npoints+ii;
GeneralStencilEntry SE;
////////////////////////////////////////////////
// Outer index of neighbour Offset calculation
////////////////////////////////////////////////
grid->oCoorFromOindex(Coor,site);
for(int d=0;d<Coor.size();d++){
int rd = grid->_rdimensions[d];
NbrCoor[d] = (Coor[d] + shifts[ii][d] + rd )%rd;
}
SE._offset = grid->oIndexReduced(NbrCoor);
////////////////////////////////////////////////
// Inner index permute calculation
// Simpler version using icoor calculation
////////////////////////////////////////////////
SE._permute =0;
for(int d=0;d<Coor.size();d++){
int fd = grid->_fdimensions[d];
int rd = grid->_rdimensions[d];
int ly = grid->_simd_layout[d];
assert((ly==1)||(ly==2));
int shift = (shifts[ii][d]+fd)%fd; // make it strictly positive 0.. L-1
int x = Coor[d]; // x in [0... rd-1] as an oSite
int permute_dim = grid->PermuteDim(d);
int permute_slice=0;
if(permute_dim){
int num = shift%rd; // Slice within dest osite cell of slice zero
int wrap = shift/rd; // Number of osite local volume cells crossed through
// x+num < rd dictates whether we are in same permute state as slice 0
if ( x< rd-num ) permute_slice=wrap;
else permute_slice=(wrap+1)%ly;
}
if ( permute_slice ) {
int ptype =grid->PermuteType(d);
uint8_t mask =0x1<<ptype;
SE._permute |= mask;
}
}
////////////////////////////////////////////////
// Store in look up table
////////////////////////////////////////////////
this->_entries[lex] = SE;
}
SE._offset = grid->oIndexReduced(NbrCoor);
////////////////////////////////////////////////
// Inner index permute calculation
// Simpler version using icoor calculation
////////////////////////////////////////////////
SE._permute =0;
for(int d=0;d<Coor.size();d++){
int fd = grid->_fdimensions[d];
int rd = grid->_rdimensions[d];
int ly = grid->_simd_layout[d];
assert((ly==1)||(ly==2));
int shift = (shifts[ii][d]+fd)%fd; // make it strictly positive 0.. L-1
int x = Coor[d]; // x in [0... rd-1] as an oSite
int permute_dim = grid->PermuteDim(d);
int permute_slice=0;
if(permute_dim){
int num = shift%rd; // Slice within dest osite cell of slice zero
int wrap = shift/rd; // Number of osite local volume cells crossed through
// x+num < rd dictates whether we are in same permute state as slice 0
if ( x< rd-num ) permute_slice=wrap;
else permute_slice=(wrap+1)%ly;
}
if ( permute_slice ) {
int ptype =grid->PermuteType(d);
uint8_t mask =0x1<<ptype;
SE._permute |= mask;
}
}
////////////////////////////////////////////////
// Store in look up table
////////////////////////////////////////////////
this->_entries[lex] = SE;
}
}
});
}
};
////////////////////////////////////////////////
// Some machinery to streamline making a stencil
////////////////////////////////////////////////
class shiftSignal {
public:
enum {
BACKWARD_CONST = 16,
NO_SHIFT = -1
};
};
// TODO: put a check somewhere that BACKWARD_CONST > Nd!
/*! @brief signals that you want to go backwards in direction dir */
inline int Back(const int dir) {
// generalShift will use BACKWARD_CONST to determine whether we step forward or
// backward. Trick inspired by SIMULATeQCD.
return dir + shiftSignal::BACKWARD_CONST;
}
/*! @brief shift one unit in direction dir */
template<typename... Args>
void generalShift(Coordinate& shift, int dir) {
if (dir >= shiftSignal::BACKWARD_CONST) {
dir -= shiftSignal::BACKWARD_CONST;
shift[dir]+=-1;
} else if (dir == shiftSignal::NO_SHIFT) {
; // do nothing
} else {
shift[dir]+=1;
}
}
/*! @brief follow a path of directions, shifting one unit in each direction */
template<typename... Args>
void generalShift(Coordinate& shift, int dir, Args... args) {
if (dir >= shiftSignal::BACKWARD_CONST) {
dir -= shiftSignal::BACKWARD_CONST;
shift[dir]+=-1;
} else if (dir == shiftSignal::NO_SHIFT) {
; // do nothing
} else {
shift[dir]+=1;
}
generalShift(shift, args...);
}
NAMESPACE_END(Grid);

View File

@ -32,6 +32,7 @@
#include <Grid/stencil/SimpleCompressor.h> // subdir aggregate
#include <Grid/stencil/Lebesgue.h> // subdir aggregate
#include <Grid/stencil/GeneralLocalStencil.h>
//////////////////////////////////////////////////////////////////////////////////////////
// Must not lose sight that goal is to be able to construct really efficient
@ -69,57 +70,6 @@ struct DefaultImplParams {
void Gather_plane_table_compute (GridBase *grid,int dimension,int plane,int cbmask,
int off,std::vector<std::pair<int,int> > & table);
/*
template<class vobj,class cobj,class compressor>
void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so) __attribute__((noinline));
template<class vobj,class cobj,class compressor>
void Gather_plane_simple_table (commVector<std::pair<int,int> >& table,const Lattice<vobj> &rhs,cobj *buffer,compressor &compress, int off,int so)
{
int num=table.size();
std::pair<int,int> *table_v = & table[0];
auto rhs_v = rhs.View(AcceleratorRead);
accelerator_forNB( i,num, vobj::Nsimd(), {
compress.Compress(buffer[off+table_v[i].first],rhs_v[so+table_v[i].second]);
});
rhs_v.ViewClose();
}
///////////////////////////////////////////////////////////////////
// Gather for when there *is* need to SIMD split with compression
///////////////////////////////////////////////////////////////////
template<class cobj,class vobj,class compressor>
void Gather_plane_exchange_table(const Lattice<vobj> &rhs,
commVector<cobj *> pointers,
int dimension,int plane,
int cbmask,compressor &compress,int type) __attribute__((noinline));
template<class cobj,class vobj,class compressor>
void Gather_plane_exchange_table(commVector<std::pair<int,int> >& table,
const Lattice<vobj> &rhs,
std::vector<cobj *> &pointers,int dimension,int plane,int cbmask,
compressor &compress,int type)
{
assert( (table.size()&0x1)==0);
int num=table.size()/2;
int so = plane*rhs.Grid()->_ostride[dimension]; // base offset for start of plane
auto rhs_v = rhs.View(AcceleratorRead);
auto rhs_p = &rhs_v[0];
auto p0=&pointers[0][0];
auto p1=&pointers[1][0];
auto tp=&table[0];
accelerator_forNB(j, num, vobj::Nsimd(), {
compress.CompressExchange(p0,p1, rhs_p, j,
so+tp[2*j ].second,
so+tp[2*j+1].second,
type);
});
rhs_v.ViewClose();
}
*/
void DslashResetCounts(void);
void DslashGetCounts(uint64_t &dirichlet,uint64_t &partial,uint64_t &full);
void DslashLogFull(void);
@ -257,6 +207,10 @@ public:
struct Packet {
void * send_buf;
void * recv_buf;
#ifndef ACCELERATOR_AWARE_MPI
void * host_send_buf; // Allocate this if not MPI_CUDA_AWARE
void * host_recv_buf; // Allocate this if not MPI_CUDA_AWARE
#endif
Integer to_rank;
Integer from_rank;
Integer do_send;
@ -323,7 +277,7 @@ public:
Vector<int> surface_list;
stencilVector<StencilEntry> _entries; // Resident in managed memory
commVector<StencilEntry> _entries_device; // Resident in managed memory
commVector<StencilEntry> _entries_device; // Resident in device memory
std::vector<Packet> Packets;
std::vector<Merge> Mergers;
std::vector<Merge> MergersSHM;
@ -407,33 +361,16 @@ public:
// Use OpenMP Tasks for cleaner ???
// must be called *inside* parallel region
//////////////////////////////////////////
/*
void CommunicateThreaded()
{
#ifdef GRID_OMP
int mythread = omp_get_thread_num();
int nthreads = CartesianCommunicator::nCommThreads;
#else
int mythread = 0;
int nthreads = 1;
#endif
if (nthreads == -1) nthreads = 1;
if (mythread < nthreads) {
for (int i = mythread; i < Packets.size(); i += nthreads) {
uint64_t bytes = _grid->StencilSendToRecvFrom(Packets[i].send_buf,
Packets[i].to_rank,
Packets[i].recv_buf,
Packets[i].from_rank,
Packets[i].bytes,i);
}
}
}
*/
////////////////////////////////////////////////////////////////////////
// Non blocking send and receive. Necessarily parallel.
////////////////////////////////////////////////////////////////////////
void CommunicateBegin(std::vector<std::vector<CommsRequest_t> > &reqs)
{
// All GPU kernel tasks must complete
// accelerator_barrier(); // All kernels should ALREADY be complete
// _grid->StencilBarrier(); // Everyone is here, so noone running slow and still using receive buffer
// But the HaloGather had a barrier too.
#ifdef ACCELERATOR_AWARE_MPI
for(int i=0;i<Packets.size();i++){
_grid->StencilSendToRecvFromBegin(MpiReqs,
Packets[i].send_buf,
@ -442,16 +379,54 @@ public:
Packets[i].from_rank,Packets[i].do_recv,
Packets[i].xbytes,Packets[i].rbytes,i);
}
#else
#warning "Using COPY VIA HOST BUFFERS IN STENCIL"
for(int i=0;i<Packets.size();i++){
// Introduce a host buffer with a cheap slab allocator and zero cost wipe all
Packets[i].host_send_buf = _grid->HostBufferMalloc(Packets[i].xbytes);
Packets[i].host_recv_buf = _grid->HostBufferMalloc(Packets[i].rbytes);
if ( Packets[i].do_send ) {
acceleratorCopyFromDevice(Packets[i].send_buf, Packets[i].host_send_buf,Packets[i].xbytes);
}
_grid->StencilSendToRecvFromBegin(MpiReqs,
Packets[i].host_send_buf,
Packets[i].to_rank,Packets[i].do_send,
Packets[i].host_recv_buf,
Packets[i].from_rank,Packets[i].do_recv,
Packets[i].xbytes,Packets[i].rbytes,i);
}
#endif
// Get comms started then run checksums
// Having this PRIOR to the dslash seems to make Sunspot work... (!)
for(int i=0;i<Packets.size();i++){
if ( Packets[i].do_send )
FlightRecorder::xmitLog(Packets[i].send_buf,Packets[i].xbytes);
}
}
void CommunicateComplete(std::vector<std::vector<CommsRequest_t> > &reqs)
{
_grid->StencilSendToRecvFromComplete(MpiReqs,0);
_grid->StencilSendToRecvFromComplete(MpiReqs,0); // MPI is done
if ( this->partialDirichlet ) DslashLogPartial();
else if ( this->fullDirichlet ) DslashLogDirichlet();
else DslashLogFull();
acceleratorCopySynchronise();
// acceleratorCopySynchronise() is in the StencilSendToRecvFromComplete
// accelerator_barrier();
_grid->StencilBarrier();
#ifndef ACCELERATOR_AWARE_MPI
#warning "Using COPY VIA HOST BUFFERS IN STENCIL"
for(int i=0;i<Packets.size();i++){
if ( Packets[i].do_recv ) {
acceleratorCopyToDevice(Packets[i].host_recv_buf, Packets[i].recv_buf,Packets[i].rbytes);
}
}
_grid->HostBufferFreeAll();
#endif
// run any checksums
for(int i=0;i<Packets.size();i++){
if ( Packets[i].do_recv )
FlightRecorder::recvLog(Packets[i].recv_buf,Packets[i].rbytes,Packets[i].from_rank);
}
}
////////////////////////////////////////////////////////////////////////
// Blocking send and receive. Either sequential or parallel.
@ -527,6 +502,7 @@ public:
template<class compressor>
void HaloGather(const Lattice<vobj> &source,compressor &compress)
{
// accelerator_barrier();
_grid->StencilBarrier();// Synch shared memory on a single nodes
assert(source.Grid()==_grid);
@ -539,10 +515,9 @@ public:
compress.Point(point);
HaloGatherDir(source,compress,point,face_idx);
}
accelerator_barrier();
accelerator_barrier(); // All my local gathers are complete
face_table_computed=1;
assert(u_comm_offset==_unified_buffer_size);
}
/////////////////////////
@ -578,6 +553,7 @@ public:
accelerator_forNB(j, words, cobj::Nsimd(), {
coalescedWrite(to[j] ,coalescedRead(from [j]));
});
acceleratorFenceComputeStream();
}
}
@ -668,6 +644,7 @@ public:
for(int i=0;i<dd.size();i++){
decompressor::DecompressFace(decompress,dd[i]);
}
acceleratorFenceComputeStream(); // dependent kernels
}
////////////////////////////////////////
// Set up routines
@ -705,7 +682,7 @@ public:
}
}
}
std::cout << GridLogDebug << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
//std::cout << "BuildSurfaceList size is "<<surface_list.size()<<std::endl;
}
/// Introduce a block structure and switch off comms on boundaries
void DirichletBlock(const Coordinate &dirichlet_block)
@ -760,7 +737,8 @@ public:
int checkerboard,
const std::vector<int> &directions,
const std::vector<int> &distances,
Parameters p=Parameters())
Parameters p=Parameters(),
bool preserve_shm=false)
{
face_table_computed=0;
_grid = grid;
@ -854,7 +832,9 @@ public:
/////////////////////////////////////////////////////////////////////////////////
const int Nsimd = grid->Nsimd();
_grid->ShmBufferFreeAll();
// Allow for multiple stencils to exist simultaneously
if (!preserve_shm)
_grid->ShmBufferFreeAll();
int maxl=2;
u_simd_send_buf.resize(maxl);
@ -1220,7 +1200,6 @@ public:
///////////////////////////////////////////////////////////
int do_send = (comms_send|comms_partial_send) && (!shm_send );
int do_recv = (comms_send|comms_partial_send) && (!shm_recv );
AddPacket((void *)&send_buf[comm_off],
(void *)&recv_buf[comm_off],
xmit_to_rank, do_send,

View File

@ -73,6 +73,16 @@ vobj coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm,int
return vec;
}
}
//'perm_mask' acts as a bitmask
template<class vobj> accelerator_inline
vobj coalescedReadGeneralPermute(const vobj & __restrict__ vec,int perm_mask,int nd,int lane=0)
{
auto obj = vec, tmp = vec;
for (int d=0;d<nd;d++)
if (perm_mask & (0x1 << d)) { permute(obj,tmp,d); tmp=obj;}
return obj;
}
template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const vobj & __restrict__ extracted,int lane=0)
{
@ -83,7 +93,7 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__
{
vstream(vec, extracted);
}
#else
#else //==GRID_SIMT
//#ifndef GRID_SYCL
@ -166,6 +176,14 @@ typename vobj::scalar_object coalescedReadPermute(const vobj & __restrict__ vec,
return extractLane(plane,vec);
}
template<class vobj> accelerator_inline
typename vobj::scalar_object coalescedReadGeneralPermute(const vobj & __restrict__ vec,int perm_mask,int nd,int lane=acceleratorSIMTlane(vobj::Nsimd()))
{
int plane = lane;
for (int d=0;d<nd;d++)
plane = (perm_mask & (0x1 << d)) ? plane ^ (vobj::Nsimd() >> (d + 1)) : plane;
return extractLane(plane,vec);
}
template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const typename vobj::scalar_object & __restrict__ extracted,int lane=acceleratorSIMTlane(vobj::Nsimd()))
{
insertLane(lane,vec,extracted);

View File

@ -66,13 +66,61 @@ template<class vtype,int N> accelerator_inline iMatrix<vtype,N> Ta(const iMatrix
return ret;
}
template<class vtype> accelerator_inline iScalar<vtype> SpTa(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
ret._internal = SpTa(r._internal);
return ret;
}
template<class vtype,int N> accelerator_inline iVector<vtype,N> SpTa(const iVector<vtype,N>&r)
{
iVector<vtype,N> ret;
for(int i=0;i<N;i++){
ret._internal[i] = SpTa(r._internal[i]);
}
return ret;
}
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> SpTa(const iMatrix<vtype,N> &arg)
{
// Generalises Ta to Sp2n
// Applies the following projections
// P_{antihermitian} P_{antihermitian-Sp-algebra} P_{traceless}
// where the ordering matters
// P_{traceless} subtracts the trace
// P_{antihermitian-Sp-algebra} provides the block structure of the algebra based on U = exp(T) i.e. anti-hermitian generators
// P_{antihermitian} does in-adj(in) / 2
iMatrix<vtype,N> ret(arg);
double factor = (1.0/(double)N);
vtype nrm;
nrm = 0.5;
ret = arg - (trace(arg)*factor);
for(int c1=0;c1<N/2;c1++)
{
for(int c2=0;c2<N/2;c2++)
{
ret._internal[c1][c2] = nrm*(conjugate(ret._internal[c1+N/2][c2+N/2]) + ret._internal[c1][c2]); // new[up-left] = old[up-left]+old*[down-right]
ret._internal[c1][c2+N/2] = nrm*(ret._internal[c1][c2+N/2] - conjugate(ret._internal[c1+N/2][c2])); // new[up-right] = old[up-right]-old*[down-left]
}
for(int c2=N/2;c2<N;c2++)
{
ret._internal[c1+N/2][c2-N/2] = -conjugate(ret._internal[c1][c2]); // reconstructs lower blocks
ret._internal[c1+N/2][c2] = conjugate(ret._internal[c1][c2-N/2]); // from upper blocks
}
}
ret = (ret - adj(ret))*0.5;
return ret;
}
///////////////////////////////////////////////
// ProjectOnGroup function for scalar, vector, matrix
// Projects on orthogonal, unitary group
///////////////////////////////////////////////
template<class vtype> accelerator_inline iScalar<vtype> ProjectOnGroup(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
@ -90,10 +138,12 @@ template<class vtype,int N> accelerator_inline iVector<vtype,N> ProjectOnGroup(c
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
{
typedef typename iMatrix<vtype,N>::scalar_type scalar;
// need a check for the group type?
iMatrix<vtype,N> ret(arg);
vtype nrm;
vtype inner;
scalar one(1.0);
for(int c1=0;c1<N;c1++){
// Normalises row c1
@ -102,7 +152,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
@ -127,7 +177,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
}
@ -135,6 +185,85 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
return ret;
}
// re-do for sp2n
// Ta cannot be defined here for Sp2n because I need the generators from the Sp class
// It is defined in gauge impl types
template<class vtype> accelerator_inline iScalar<vtype> ProjectOnSpGroup(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
ret._internal = ProjectOnSpGroup(r._internal);
return ret;
}
template<class vtype,int N> accelerator_inline iVector<vtype,N> ProjectOnSpGroup(const iVector<vtype,N>&r)
{
iVector<vtype,N> ret;
for(int i=0;i<N;i++){
ret._internal[i] = ProjectOnSpGroup(r._internal[i]);
}
return ret;
}
// int N is 2n in Sp(2n)
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> ProjectOnSpGroup(const iMatrix<vtype,N> &arg)
{
// need a check for the group type?
iMatrix<vtype,N> ret(arg);
vtype nrm;
vtype inner;
for(int c1=0;c1<N/2;c1++)
{
for (int b=0; b<c1; b++) // remove the b-rows from U_c1
{
decltype(ret._internal[b][b]*ret._internal[b][b]) pr;
decltype(ret._internal[b][b]*ret._internal[b][b]) prn;
zeroit(pr);
zeroit(prn);
for(int c=0; c<N; c++)
{
pr += conjugate(ret._internal[c1][c])*ret._internal[b][c]; // <U_c1 | U_b >
prn += conjugate(ret._internal[c1][c])*ret._internal[b+N/2][c]; // <U_c1 | U_{b+N} >
}
for(int c=0; c<N; c++)
{
ret._internal[c1][c] -= (conjugate(pr) * ret._internal[b][c] + conjugate(prn) * ret._internal[b+N/2][c] ); // U_c1 -= ( <U_c1 | U_b > U_b + <U_c1 | U_{b+N} > U_{b+N} )
}
}
zeroit(inner);
for(int c2=0;c2<N;c2++)
{
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
}
nrm = sqrt(inner);
nrm = 1.0/nrm;
for(int c2=0;c2<N;c2++)
{
ret._internal[c1][c2]*= nrm;
}
for(int c2=0;c2<N/2;c2++)
{
ret._internal[c1+N/2][c2+N/2] = conjugate(ret._internal[c1][c2]); // down right in the new matrix = (up-left)* of the old matrix
}
for(int c2=N/2;c2<N;c2++)
{
ret._internal[c1+N/2][c2-N/2] = -conjugate(ret._internal[c1][c2]);; // down left in the new matrix = -(up-right)* of the old
}
}
return ret;
}
NAMESPACE_END(Grid);
#endif

View File

@ -53,7 +53,6 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
}
// Specialisation: Cayley-Hamilton exponential for SU(3)
#if 0
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>

View File

@ -69,6 +69,35 @@ accelerator_inline auto trace(const iVector<vtype,N> &arg) -> iVector<decltype(t
}
return ret;
}
////////////////////////////
// Fast path traceProduct
////////////////////////////
template<class S1 , class S2, IfNotGridTensor<S1> = 0, IfNotGridTensor<S2> = 0>
accelerator_inline auto traceProduct( const S1 &arg1,const S2 &arg2)
-> decltype(arg1*arg2)
{
return arg1*arg2;
}
template<class vtype,class rtype,int N >
accelerator_inline auto traceProduct(const iMatrix<vtype,N> &arg1,const iMatrix<rtype,N> &arg2) -> iScalar<decltype(trace(arg1._internal[0][0]*arg2._internal[0][0]))>
{
iScalar<decltype( trace(arg1._internal[0][0]*arg2._internal[0][0] )) > ret;
zeroit(ret._internal);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
ret._internal=ret._internal+traceProduct(arg1._internal[i][j],arg2._internal[j][i]);
}}
return ret;
}
template<class vtype,class rtype >
accelerator_inline auto traceProduct(const iScalar<vtype> &arg1,const iScalar<rtype> &arg2) -> iScalar<decltype(trace(arg1._internal*arg2._internal))>
{
iScalar<decltype(trace(arg1._internal*arg2._internal))> ret;
ret._internal=traceProduct(arg1._internal,arg2._internal);
return ret;
}
NAMESPACE_END(Grid);

Some files were not shown because too many files have changed in this diff Show More