1
0
mirror of https://github.com/paboyle/Grid.git synced 2025-06-14 13:57:07 +01:00

Compare commits

...

333 Commits

Author SHA1 Message Date
070b61f08f Simplifying the MultiRHS solver to make it do SRHS *and* MRHS 2024-03-06 14:04:33 -05:00
ee3b3c4c56 relocate deflation support 2024-02-27 11:52:23 -05:00
462d706a63 Move to a blas directory 2024-02-27 11:51:04 -05:00
ee0d460c8e Blas based block project & deflate for multiRHS 2024-02-27 11:41:44 -05:00
cd15abe9d1 Mrhs prep 2024-02-27 11:41:13 -05:00
9f40467e24 Warning squash 2024-02-27 11:40:36 -05:00
d0b6593823 More verbose on checksum 2024-02-27 11:40:14 -05:00
79fc821d8d reorg headers 2024-02-27 11:39:37 -05:00
d7fdb9a7e6 Reorg headers 2024-02-27 11:39:06 -05:00
b74de51c18 Reorder headers 2024-02-27 11:38:52 -05:00
44b466e072 Make InsertSliceFast the default at some point in future.
Should I do this now?
2024-02-21 14:51:24 -05:00
5e5b471bb2 Put/Get and DEviceToDevice 2024-02-21 14:47:06 -05:00
9c2565f64e Working and faster version 2024-02-21 14:46:43 -05:00
e1d0a7cec3 Batched blas 2024-02-21 14:38:20 -05:00
b19ae8f465 Nbasis method for convenience 2024-02-21 14:36:19 -05:00
cdff2c8e18 Updated mrhs adef 2024-02-21 14:27:19 -05:00
eb702f581b Running on 12 rhs on 18 nodes of frontier 2024-01-22 17:44:15 -05:00
3d13fd56c5 Precompute phases, save memory in hermitian 2024-01-22 17:43:35 -05:00
6f51b49ef8 Use stderr 2024-01-22 17:41:09 -05:00
addc638856 Fast localCopyRegion, blockProjectFast 2024-01-22 17:40:38 -05:00
42ae36bc28 WOrking 2024-01-17 16:39:14 -05:00
c69f73ff9f Working 2024-01-17 16:38:46 -05:00
ca5ae8a2e6 Revert to working. 2024-01-17 16:32:05 -05:00
d967eb53de Working for first time 2024-01-17 16:31:12 -05:00
839f9f1bbe Don't log memory by default 2024-01-17 16:25:50 -05:00
b754a152c6 Flag guard correctly 2024-01-17 16:25:28 -05:00
e07cb2b9de Accelerator memory 2024-01-17 16:24:31 -05:00
a1f8bbb078 accelerator memory print 2024-01-17 16:24:09 -05:00
7909683f3b MultiRHS 2024-01-17 16:21:07 -05:00
25f71913b7 MultiRHS coarse 2024-01-04 12:01:17 -05:00
34ddd2b7b1 MultiRHS coarse space 2024-01-04 12:00:53 -05:00
d5fd90b2f3 Add 48^3 rtest 2024-01-04 12:00:01 -05:00
b7c7000d0d Don't need the numerical rounding tolerance in multigrid 2023-12-22 18:10:23 -05:00
551f6c4edd Synchronise changes 2023-12-22 18:09:11 -05:00
defd814750 Speed up the coarsened matrix matrix evaluation.
It is block project limited.
Could be sped up with calls to Batched GEMM and a data layout change.
2023-12-22 18:07:03 -05:00
3d517bbd2a Synchronise decouple from the launch
Speeds up multileg stencils
2023-12-22 18:06:13 -05:00
78ab955fec Better padded cell exchange 2023-12-22 18:05:41 -05:00
dd13937bb6 Better opt face gather scatter 2023-12-22 18:03:38 -05:00
66a1b63aa9 Faster grid/blas layout change.
Halo exchange is now the only slow part.
Revisit
2023-12-21 20:50:18 -05:00
22c611bd1a Delete temp file 2023-12-21 18:32:31 -05:00
c9bb1bf8ea Passing new BLAs based 2023-12-21 18:31:17 -05:00
9e489887cf General coarse multiRHS move to BLAS implementation 2023-12-21 15:24:48 -05:00
9feb801bb9 Much simpler GPU implementation 2023-12-21 15:24:06 -05:00
c00b495933 Multigrid 2023-12-21 15:23:31 -05:00
d22eebe553 BLas options 2023-12-21 15:23:03 -05:00
8bcbd82680 BLAS based layout and implementation 2023-12-21 15:21:24 -05:00
dfa617c439 Batched SGEMM/DGEMM/ZGEMM/CGEMM
Hip, Cuda version and vanilla CPU
One MKL stub in comments, to be tested as different.
2023-12-21 14:01:18 -05:00
48d1f0df89 Optimised partially, working 2023-12-21 12:33:47 -05:00
b75cb7a12c Blas batched partial implementation on Frontier only for now 2023-12-21 12:31:33 -05:00
332563e037 Debugged, reducing verbose 2023-12-21 12:30:57 -05:00
0cce97a4fe verbosity only 2023-12-20 21:30:10 -05:00
95a8e4be64 rocblas 2023-12-20 21:27:59 -05:00
abcd6b8cb6 Faster version 2023-12-19 15:17:46 -05:00
e8f21c9b6d Memmory verbose control improvement 2023-12-19 15:16:58 -05:00
e054078b11 Verbose 2023-12-05 16:15:17 -05:00
6835a7f208 Better logging, test on 81 point stencil 2023-11-29 19:20:47 -05:00
f59993b979 Nbasis§ 2023-11-29 09:47:36 -05:00
2290b8f680 Verbose 2023-11-29 09:47:04 -05:00
2c54be651c Further updates 2023-11-29 09:43:29 -05:00
e859a199df Reduce volume to interior for coarse stencil -- worth up to 4x gain 2023-11-28 10:23:16 -05:00
0a3682ad0b MultiRHS work 2023-11-28 07:43:37 -05:00
59abaeb5cd Time stamp 2023-11-24 12:56:45 -05:00
3e448435d3 Restrict to interior 2023-11-23 18:23:29 -05:00
a294bc3c5b Relax constraints for multiRHS 2023-11-23 18:20:42 -05:00
b302ad3d49 multiRHS test in place, passes Yay! 2023-11-23 18:20:15 -05:00
82fc4b1e94 Finalise 2023-11-23 18:19:41 -05:00
b4f1740380 Finalise message 2023-11-23 18:19:16 -05:00
031f85247c multRHS initial support -- needs optimisation for multi project/promote.
Bug fix in freeing intermediate grids to stop double free
2023-11-23 18:18:35 -05:00
639cc6f73a better support for multiRHS coarse space
Still to add restriction of domain of last loop to interior of padded cell (expect about 4.5x on test volume on Crusher)
2023-11-23 18:16:26 -05:00
09946cf1ba Improved, works on 48^3 moving to multiRHS optimisations 2023-11-15 18:03:05 -05:00
f4fa95e7cb Use 5.3.0 2023-11-15 18:01:38 -05:00
100e29e35e Allow expression as argument to norm2 2023-11-15 18:00:44 -05:00
4cbe471a83 devVector 2023-11-15 18:00:07 -05:00
8bece1f861 Faster to transpose the matrix and apply with column major order 2023-11-15 17:58:38 -05:00
a3ca71ec01 Lots more setup options, still working on them 2023-11-15 17:58:04 -05:00
e0543e8af5 Implement flexible preconditioned CG 2023-11-15 17:57:39 -05:00
c1eb80d01a Print which have converged 2023-11-15 17:57:08 -05:00
a26121d97b Better printing 2023-11-15 17:56:45 -05:00
043031a757 Report resid on failed convergence 2023-11-15 17:56:22 -05:00
807aeebe4c Resize tol in constructor 2023-11-15 17:55:57 -05:00
8aa1a37aad For Mirs preconditioner solver 2023-11-15 17:55:32 -05:00
4efa042f50 C++17 change 2023-10-24 10:57:50 -04:00
c7cb37e970 c++17 accepted 2023-10-24 10:57:24 -04:00
d34b207eab Avoid HIP warnings 2023-10-24 10:57:04 -04:00
0e6fa6f6b8 DOn't need the Cshift for the period optimisation 2023-10-24 10:56:31 -04:00
38b87de53f This works around a stacksize limit on AMD GPU 2023-10-24 10:56:07 -04:00
aa5047a9e4 Faster blockProject blockPromote 2023-10-24 10:49:55 -04:00
24b6ee0df9 M4 file 2023-10-24 10:36:48 -04:00
1e79cc9cbe Avoid compiler error 2023-10-24 10:36:09 -04:00
b3925df9c3 Verbose on CPU-GPU xfer, remove performance by default 2023-10-24 10:25:01 -04:00
351795ac3a Better messaging 2023-10-20 19:33:04 -04:00
9c9c42d0df Tests on frontier with real speed up . 3.5x on 16^3 at mq=0.01 2023-10-20 19:27:13 -04:00
b6ad1bafc7 Normal memory SendToRecvFrom asynchronous for use in general stencil
code
2023-10-20 19:27:13 -04:00
a5ca40f446 Better verbose -- track CPU GPU motion under --log Memory, others go to
debug output stream
2023-10-20 19:27:13 -04:00
9ab54c5565 Overlap comms & data copy/buffer assembly in Ghost zone exchange 2023-10-20 19:27:13 -04:00
4341d96bde Massively sped up coarse grid mult, comms
Save 3ms spend (60% of time !) on cudaMalloc !!
2023-10-20 19:27:13 -04:00
5fac47a26d Faster halo exchange 2023-10-20 19:27:13 -04:00
e064f17346 Faster halo exchange 2023-10-20 19:27:13 -04:00
afe10ba2a2 More digits 2023-10-20 19:27:13 -04:00
7cc3435ba8 Imporved General coarsened matrix 2023-10-20 19:27:13 -04:00
541772313c Verbosity 2023-10-20 19:27:13 -04:00
3747494a09 Notify delet public 2023-10-20 19:27:13 -04:00
f2b98d0dcc Const safety 2023-10-20 19:27:13 -04:00
80471bf762 Alternate implementation involving face operations 2023-10-20 19:27:13 -04:00
a06f63c110 Improved I/O and non-lexico option exposed to SciDAC format 2023-10-20 19:27:13 -04:00
0ae4478cd9 Checkpoint the subspace and ldop 2023-10-20 19:27:13 -04:00
ae4e705e09 Use random vec as easier for debug 2023-10-20 19:27:13 -04:00
f5dcea9dbf Updates for Frontier 2023-10-20 19:27:12 -04:00
2207309f8a Spack rules 2023-10-16 18:38:24 -04:00
2111e7ab5f Run at physical mass 2023-10-06 21:20:21 -04:00
d29abfdcaf Transfer code to Frontier now 2023-10-06 21:03:34 -04:00
a751c42cc5 Checkpoint restore the setup 2023-10-06 21:03:08 -04:00
6a3bc9865e Verbose change 2023-10-06 21:02:04 -04:00
4d5f7e4377 Verbose change 2023-10-06 21:01:37 -04:00
78b117fb78 Comment fix 2023-10-06 21:01:15 -04:00
ded63a1319 Verbose change/pretty print 2023-10-06 21:00:53 -04:00
df3e4d1e9c Return fix 2023-10-06 21:00:21 -04:00
b58fd80379 I/O for coarse op and reorganise multigrid headers 2023-10-06 13:43:46 -04:00
7f6e0f57d0 No IO in file 2023-10-06 13:39:53 -04:00
cae27678d8 gpermute 2023-10-06 13:39:19 -04:00
48ff655bad Slightly less verbose 2023-10-06 10:47:52 -04:00
2525ad4623 Slight clean up 2023-10-06 10:47:32 -04:00
e7020017c5 Reorganise multigrid 2023-10-06 10:47:12 -04:00
eacebfad74 Reorganise multigrid into multiple headers 2023-10-06 10:46:21 -04:00
3bc2da5321 Merge branch 'feature/scidac-wp1' of https://github.com/paboyle/Grid into feature/scidac-wp1 2023-10-05 16:57:59 -04:00
2d710d6bfd Optimised parameters for 16^3 2023-10-05 16:56:55 -04:00
6532b7f32b Eliminate older inefficient coarsening implementation 2023-10-05 16:56:15 -04:00
7b41b92d99 Only need to bad non-local dimensions 2023-10-05 16:55:48 -04:00
dd557af84b ADEF1 and ADEF2 2 level CG 2023-10-05 16:55:19 -04:00
59b9d0e030 coalesceRead the blockSum 2023-10-05 16:54:48 -04:00
b82eee4733 Hermitian dealing with 2023-10-05 16:54:14 -04:00
6a87487544 Running on Frontier, fix RNG big volume y2k, affecting 5D RNG 2023-10-05 16:50:59 -04:00
fcf5023845 Running on Frontier 2023-10-05 16:50:59 -04:00
c8adad6d8b First runs on Summit. PopulateAdag needs work 2023-10-05 16:50:54 -04:00
737d3ffb98 ADEF1 and 1 hop projection 2023-10-03 14:22:18 -04:00
b01e67bab1 coalescedReadGeneralPermute now working 2023-10-02 17:46:57 -04:00
8a70314f54 Merge branch 'develop' into feature/scidac-wp1 2023-10-02 17:24:55 -04:00
afc316f501 Rename headers 2023-10-02 16:25:11 -04:00
f14bfd5c1b Relocate sub includes 2023-10-02 16:23:38 -04:00
c5f1420dea Merge remote-tracking branch 'LupoA/develop' into LupoA-develop 2023-10-02 16:22:35 -04:00
018e6da872 Merge pull request #440 from giltirn/feature/paddedcellgauge
Feature/paddedcellgauge
2023-10-02 10:00:42 -04:00
b77bccfac2 Merge pull request #444 from mmphys/feature/docX
Update doc complete list of Macports needed to build Grid on a fresh Mac
2023-10-02 09:57:11 -04:00
36ae6e5aba Fastest GPU version.
Need to work on the PaddedCell now to make much faster
2023-09-29 18:26:51 -04:00
9db585cfeb Temporary commit while optimisation is carried out 2023-09-29 17:11:35 -04:00
c564611ba7 Annoying hack that is useful to preserve for profiling 2023-09-29 17:11:12 -04:00
e187bcb85c Updating 2023-09-29 17:10:17 -04:00
be18ffe3b4 Further tuning and lanczos 2023-09-27 16:21:58 -04:00
0d63dce4e2 Timing info 2023-09-27 16:21:14 -04:00
26b30e1551 Flop count and projection to nearest neighbour (keeps redundant flops) 2023-09-27 16:20:11 -04:00
7fc58ac293 Verbose subspace init 2023-09-27 16:19:45 -04:00
3a86cce8c1 Compile 2023-09-27 16:19:18 -04:00
80359e0d49 Bland SYCL compile 2023-09-26 13:20:27 -07:00
3d437c5cc4 Making SYCL happy 2023-09-26 13:19:42 -07:00
37884d369f Coarse space is expensive, but gives a speed up in fine matrix multiplies now.
Down to optimisation
2023-09-25 17:24:19 -04:00
9246e653cd Basic non-local coarsening of operator test 2023-09-25 17:20:58 -04:00
64283c8673 Normal equations becomes linear function for easy base class pass aroudn 2023-09-25 17:19:39 -04:00
755002da9c Comparison convenience 2023-09-25 17:16:33 -04:00
31b8e8b437 Better messaging 2023-09-25 17:16:14 -04:00
0ec0de97e6 Adef2 implemented and working in an HDCG like context 2023-09-25 17:15:03 -04:00
6c3ade5d89 Improved the coarsening 2023-09-25 17:14:40 -04:00
980c5f9a34 Update chebyshev setup 2023-09-25 17:12:22 -04:00
471ca5f281 Power method more iterations 2023-09-07 10:55:05 -04:00
e82ddcff5d Working getting closer to HDCG but some low level engineering work still needed
+ MUCH work on optimisation
2023-09-07 10:53:51 -04:00
b9dcad89e8 Test cases for coarsening with non-local stencil 2023-09-07 10:53:22 -04:00
993f43ef4a Even odd use case 2023-09-07 10:53:06 -04:00
2b43308208 First cut non-local coarsening 2023-08-25 17:38:07 -04:00
04a1ac3a76 First cut for non-local coarsening 2023-08-25 17:37:38 -04:00
990b8798bd Merge remote-tracking branch 'refs/remotes/origin/develop' into develop 2023-08-25 17:36:45 -04:00
b334a73a44 Stencil improvement 2023-08-25 17:35:10 -04:00
5d113d1c70 Odd address sanitizer complain 2023-08-25 17:34:18 -04:00
c14977aeab Random vector option for test purposes 2023-08-25 17:33:31 -04:00
3e94838204 Spread out improvement 2023-08-25 17:31:28 -04:00
c0a0b8ca62 NEON and address sanitiser 2023-08-25 17:30:30 -04:00
bd56c95a6f Update documentation with complete list of Macports needed to build Grid on a fresh Mac 2023-07-14 13:50:06 +01:00
dbd8bb49dc Merge pull request #32 from LupoA/sp2n/develop
Sp2n/develop
2023-07-04 15:23:43 +00:00
3a29af0ce4 Fixed linker error 2023-07-04 16:08:44 +01:00
f7b79cdd45 Added test for ProjectSpn 2023-07-03 18:00:32 +01:00
075b9d22d0 adjoint rep implemented as 2indx symmetric 2023-07-02 13:58:31 +01:00
b92428f05f better test 2023-07-02 13:34:03 +01:00
34b11864b6 prettiest tests 2023-07-02 13:25:57 +01:00
1dfaa08afb The stencils for the staple and rect-staple padded cell implementations are now created and stored by workspace classes that allow for reuse providing the grids remain consistent
The workspaces are now used by the plaq+rectangle gauge action resulting in a further 2x performance improvement as measured on a 16^4 local volume for 2 nodes (16 ranks) of Crusher
2023-06-28 15:11:24 -04:00
f44dce390f Implemented acclerator-optimized versions of localCopyRegion and insertSliceLocal to speed up padding
Fixed const correctness on PaddedCell methods
Fixed compile issues on Crusher
Added timing breakdowns for PaddedCell::Expand and the padded implementations of the staples, visible under --log Performance
Optimized kernel for StaplePadded
Test_iwasaki_action_newstaple now repeats the calculation 10 times and reports average timings
2023-06-27 14:58:10 -04:00
bb71e9a96a Added PaddedCell and GeneralisedLocalStencil header includes to standard base headers
Moved versions of the padded-cell implementations of staple and rect-staple from test code to WilsonLoops header
Added StapleAndRectStapleAll which is now called by the plaq+rectangle action class. Under the hood it uses the padded cell implementations with maximal reuse of the padded gauge links
2023-06-27 11:23:30 -04:00
6f6844ccf1 Added new StapleAll and RectStapleAll functions that return the staples for all mu as an array
Modified plaq+rectangle gauge actions to use the above
Added a test code to confirm the above changes
2023-06-26 15:48:47 -04:00
4c6613d72c Modified RectStapleDouble and RectStapleOptimised to use Gauge-BC respecting CshiftLink
Added test code tests/debug/Test_optimized_staple_gaugebc demonstrating equivalence of above to RectStapleUnoptimised for cconj gauge BCs
Removed optimized staple only being used for periodic gauge BCs; it is now always used
2023-06-26 10:20:23 -04:00
559257bbe9 better documentation and filelist names 2023-06-23 16:16:48 +01:00
cff1f8d3b8 rm unused variables and formatting 2023-06-23 16:04:18 +01:00
f27d2083cd adjustments in SUn and Sp2n impl 2023-06-23 15:34:08 +01:00
36cc9c524f Threaded the constructor of GeneralLocalStencil 2023-06-23 09:57:38 -04:00
2822487450 rm unncessary line 2023-06-23 14:55:23 +01:00
e07fafe46a minor adjustments to twoindex 2023-06-23 12:18:04 +01:00
063d290bd8 missing function 2023-06-23 11:11:20 +01:00
4e6194d92a Avoid code duplication in ProjectSUn 2023-06-23 11:03:50 +01:00
de30c4e22a minor improvements 2023-06-23 10:49:41 +01:00
4241c7d4a3 Imported coalescedReadGeneralPermute GPU implementation from Christoph
Fixed bug in padded staple code where extract was being called on the result before the GPU view was closed
Fixed compile issue with pointer cast in padded staple code
Added timing summaries of padded staple code and timing breakdown of staple implementation to Test_padded_cell_staple
2023-06-21 16:01:01 -04:00
7b11075102 The user can now specify the implementation of Cshift used by the PaddedCell class through a virtual base class API. Implementations for default (regular Cshift) and for gauge links (which respects the gauge BCs)
Fixed const-correctness for PaddedCell and ConjugateGimpl::setDirections
Modified test code for padded-cell implementation of staple, rect-staple to use cconj BCs
2023-06-20 17:09:56 -04:00
abc658dca5 Added coalescedReadGeneralPermute CPU implementation based on Christoph's GPT code
In a test code, implemented a padded-cell version of the staple and rectangular-staple calculation
2023-06-20 16:14:25 -04:00
2372275b2c Merge pull request #36 from LupoA/sp2n/gpu-bugfix
Sp2n/gpu bugfix [close #30]
2023-06-20 13:46:00 +01:00
ef736e8aa4 Merge pull request #35 from LupoA/sp2n/enableSp
consistent enable sp config flag
2023-06-20 10:41:09 +00:00
5e539e2d54 Forgot some follow-ups on changed signature 2023-06-18 12:37:51 +01:00
96773f5254 Apparently forgot to remove one Lattice version 2023-06-18 12:21:39 +01:00
d80df09f3b consistent enable sp config flag 2023-06-16 19:16:46 +01:00
621e612c30 Fix non-zero ret on device bug 2023-06-16 16:27:49 +01:00
8c3792721b ClangFormat 2023-06-16 15:58:23 +01:00
c95bbd3948 Remove accelerated lattice version 2023-06-16 15:50:26 +01:00
e28ab7a732 Re-included instantiations for symmetric 2Index AS Sp 2023-06-16 14:20:37 +01:00
c797cbe737 deal with post-merge trauma 2023-06-16 14:20:37 +01:00
e09dfbf1c2 definetely the right merge upstream/develop 2023-06-16 14:19:46 +01:00
116d90b0ee First attempt on #30 2023-06-15 15:09:37 +01:00
b0646ca187 Remove some unused variables 2023-06-15 15:09:09 +01:00
4895ff260e Merge pull request #28 from LupoA/sp2n/config
compile sp2n fermion impl only if declared at config time
2023-06-09 13:07:48 +00:00
470d93006a compile sp2n fermion impl only if declared at config time 2023-06-07 12:53:33 +01:00
2f3d03f188 Merge pull request #27 from LupoA/sp2n/documentation
documentation for gaugegroup and sp2n
2023-06-01 16:42:27 +00:00
8db7c23bee improve documentation 2023-06-01 17:39:10 +01:00
69dc5172dc Merge pull request #26 from LupoA/sp2n/irreps
Sp2n/irreps
2023-06-01 16:28:15 +00:00
fd72eb6546 Merge branch 'sp2n/algorithm' into sp2n/irreps 2023-06-01 17:24:01 +01:00
b405767569 make private methods private 2023-05-26 17:02:16 +01:00
fe88a0c12f cleaner twoindex class, cleaner tests 2023-05-26 16:55:30 +01:00
e61a9ed2b4 partial revert 2023-05-26 13:54:26 +01:00
de8daa3824 group is SUn by default 2023-05-26 13:44:41 +01:00
3a50fb29cb directly call sp helper 2023-05-26 13:28:47 +01:00
6647d2656f rm unnecessary specialisation 2023-05-26 12:27:22 +01:00
a6f4dbeb6d remove redundant template parameter 2023-05-26 12:13:40 +01:00
92a282f2d8 Merge pull request #24 from LupoA/sp2n/fix_static_assert_symmetric
Move static_assert inside of function
2023-05-26 11:13:50 +01:00
ca2fd9fc7b documentation for gaugegroup and sp2n 2023-05-25 18:40:54 +01:00
be1a4f5860 implement TwoIndexSymm for sp2n 2023-05-22 17:21:03 +01:00
5897b93dd4 debug tests, fix dimension 2023-05-22 13:42:21 +01:00
af091e0881 DimensionHelper for 2index irreps 2023-05-21 16:56:06 +01:00
3c1e5e9517 Merge pull request #25 from LupoA/sp2n/unify_representations
Sp2n/unify representations [close #3]
2023-05-21 14:55:27 +01:00
85b2cb7a8a changing some hardcoded SUn lines 2023-05-21 14:50:28 +01:00
b8bdc2eefb Unified two index representations 2023-05-18 18:36:29 +01:00
0078826ff1 Move static_assert inside of function 2023-05-18 18:14:53 +01:00
e855c41772 Unified spfundamental.h with fundamental.h 2023-05-18 18:11:20 +01:00
d169c275b6 Merge pull request #22 from LupoA/sp2n/unify_twoindex
Unify TwoIndex
2023-05-18 14:55:02 +00:00
a5125e23f4 Typo 2023-05-18 15:41:35 +01:00
7b83c80757 Merge branch 'sp2n/unify_twoindex' of github.com:LupoA/Grid into sp2n/unify_twoindex 2023-05-18 15:36:14 +01:00
e41821e206 Disable two index symmetric 2023-05-18 15:29:55 +01:00
5a75ab15a2 typo in 2S dim 2023-05-17 20:47:57 +01:00
932c783fbf 2AS for every Nc! 2023-05-17 20:22:05 +01:00
55f9cce577 Revert "Added automated HMC test for Nc=4"
This reverts commit eee27b8b30.
2023-05-17 09:17:48 +01:00
b3533ca847 correct tests (failing) 2023-05-16 17:43:52 +01:00
fd2a637010 test 2index 2023-05-16 14:10:39 +01:00
eee27b8b30 Added automated HMC test for Nc=4 2023-05-15 18:37:33 +01:00
8522352aa3 ClangFormat 2023-05-15 18:36:05 +01:00
3beb8f4091 fixing typo, getting pre-changes physics 2023-05-15 16:00:15 +01:00
12a706e9b1 de-hardcode the number of generators 2023-05-15 15:48:21 +01:00
170aa7df01 fix (dimension to be improved) 2023-05-15 15:20:18 +01:00
e8ad1fef53 Unify TwoIndex 2023-05-12 14:35:50 +01:00
aa9df63a05 rename group projections based on determinants 2023-05-10 14:50:52 +01:00
3953312a93 Merge pull request #20 from LupoA/sp2n/unify_gaugeimpltypes
Sp2n/unify gaugeimpltypes
2023-05-03 15:17:10 +00:00
6e62f4f616 ClangFormat 2023-05-03 16:15:12 +01:00
6a7bdca53b Take over additional algebra tests from Alessandro 2023-05-03 16:02:02 +01:00
c7fba9aace Take over additional group tests from Alessandro 2023-05-03 16:01:48 +01:00
ac6c7cb8d6 Merge in Alessandro's changes [test fails] 2023-05-03 02:53:03 +01:00
c5924833a1 ClangFormat 2023-05-03 02:39:36 +01:00
ac0a74be0d Taken care of algebra tests 2023-05-03 02:32:42 +01:00
42b0e1125d Naming and argument types 2023-05-03 01:51:46 +01:00
339c4fda79 Extracted is_element_of Sp2n 2023-05-02 15:44:34 +01:00
9b85bf9402 better projection test 2023-05-02 15:42:20 +01:00
86b02c3cd8 cleaning up requested by Julian 2023-05-02 13:31:17 +01:00
7b3b7093fa cleaning up requested by Ed 2023-05-02 12:50:57 +01:00
881b08a465 Correct implementation of SpTa 2023-04-27 18:17:06 +01:00
3ee5444c69 Remove commented out stuff 2023-04-21 08:08:18 +01:00
5e28fe56d2 Remove code duplication: Iterating through vectors 2023-04-21 08:08:06 +01:00
5aabe074fe Rename Sympl* to Sp* 2023-04-18 11:50:20 +01:00
dace904c10 fix typo 2023-04-14 18:06:18 +01:00
be98d26610 small change I missed in previous commit 2023-04-13 17:48:43 +01:00
178376f24b minor stylistic changes 2023-04-06 12:08:17 +01:00
6a0eb466ee Merge pull request #19 from LupoA/refactoring_sp2n
refactoring sp2n
2023-04-05 10:50:58 +00:00
4ea29b8f0f Template group into GaugeImplTypes. Closing #2 2023-04-04 17:49:28 +01:00
778291230a expand ProjecOnGaugeGroup, change ProjectOnSp2nAlgebra into SpTa, fixing some of its issues 2023-04-04 17:48:13 +01:00
026e736dfa Projection on algebra can now be templated. Fix #12 2023-04-03 16:31:19 +01:00
4275b3f431 Fix typo and remove unnecessary lines 2023-04-03 12:01:52 +01:00
1b8176e2c0 fix code duplication 2023-03-17 14:58:00 +00:00
cbc053c3db Revert "projection on Sp2n algebra, to be used instead of Ta"
This reverts commit ba7f9d7b70.
2023-03-17 11:36:58 +00:00
cdf3f6ef6e Merge branch 'refactoring_sp2n' of https://github.com/LupoA/Grid into refactoring_sp2n 2023-03-15 15:59:50 +00:00
ba7f9d7b70 projection on Sp2n algebra, to be used instead of Ta 2023-03-15 15:55:12 +00:00
371fd123fb consequence of iSUnMatrix being no longer a member of the SU class 2023-03-14 10:47:07 +00:00
d6ff644aab Towards the day all tests compile 2023-03-14 10:43:25 +00:00
29586f6b5e Deactivate some tests for Nc!=3 2023-03-13 08:17:14 +00:00
fd057c838f add ProjectOnGaugeGroup and ProjectGn to allow future templating in GaugeImplTypes 2023-03-10 12:10:46 +00:00
f51222086c Move functions from GaugeGroup to group specific implementations 2023-03-09 16:22:20 +00:00
f73691ec47 Merge pull request #18 from nickforce989/sp2n/newbranch
Sp2n/newbranch
2023-02-13 10:22:27 +01:00
7ebda3e9ec Merge commit 'b10e1b7bc8bec809f874e9e48a3ccc7b2619c9d1' into sp2n/newbranch 2023-01-19 12:10:18 +00:00
b10e1b7bc8 Fixed files giving zero force computation on GPU, issue #8 2023-01-18 18:04:47 +00:00
d7dea44ce7 Merge pull request #17 from chillenzer/unify_gauge_groups
Fix compilation error in nvcc (closes #15)
2022-12-19 16:24:03 +00:00
37b6b82869 Fix file extensions 2022-12-18 16:12:56 +00:00
92ad5b8f74 Compiler error fix: NVCC requires names for templ. par. 2022-12-18 15:50:19 +00:00
8c80f1c168 Merge pull request #14 from chillenzer/unify_gauge_groups
Unify gauge groups (closes #5)
2022-12-01 17:35:46 +00:00
0af7d5a793 Rename Grid/qcd/utils/<Group>_impl.h -> Grid/qcd/utils/<Group>.h 2022-11-30 17:12:00 +00:00
505fa49983 Renamed SUn.h -> GaugeGroup.h 2022-11-30 17:09:48 +00:00
7bcf33def9 Removed Sp2n.h 2022-11-30 16:59:46 +00:00
a13820656a Removed iSUnMatrix, etc. 2022-11-30 15:09:03 +00:00
fa71b46a41 Hide nsp 2022-11-30 14:44:23 +00:00
b8b3ae6ac1 Make helper functions private 2022-11-30 13:29:14 +00:00
55c008da21 Removed forward declaration 2022-11-30 13:12:21 +00:00
2507606bd0 With function overloading (still dirty). 2022-11-30 12:54:36 +00:00
7c2ad4f8c8 Attempt with SFINAE (failed) 2022-11-30 11:57:39 +00:00
54c8025aad Remove unnecessary pwd in scripts/filelist 2022-11-28 17:50:38 +00:00
921e23e83c Separated out everything SU specific 2022-11-28 17:47:50 +00:00
6e750ecb0e Remove apparently forgotten file 2022-11-28 16:33:46 +00:00
b8f1f5d2a3 Introduce GaugeGroup 2022-11-25 17:45:32 +00:00
9273f2937c Autoformat google style 2022-11-25 17:44:08 +00:00
1aa28b47ae Add existing test to check 2022-11-25 17:40:40 +00:00
629cb2987a Fix typo in Makefile.am 2022-11-25 17:40:21 +00:00
03235d6368 Fixed type in configure.ac 2022-11-25 16:57:40 +00:00
22064c7e4c Fixing #11 2022-11-25 13:10:29 +00:00
2de03e5172 Revert "Revert "Fixing issue #11: consistent use of ncolour and nsp""
This reverts commit 3af4929dda.
2022-11-23 19:40:28 +00:00
3af4929dda Revert "Fixing issue #11: consistent use of ncolour and nsp"
This reverts commit 1ba429345b.
2022-11-23 19:34:59 +00:00
1ba429345b Fixing issue #11: consistent use of ncolour and nsp 2022-11-23 18:45:01 +00:00
88bdd4344b 2indx antisymm representation of sp2n 2021-11-04 18:27:35 +00:00
4044536eea add projection on sp2n algebra 2021-10-26 10:20:44 +01:00
4d8ae6221c fix projection 2021-10-22 10:44:54 +01:00
4e31e4e094 Better tests 2021-10-13 15:07:23 +01:00
0d6674e489 hot start for sp2n 2021-10-12 18:53:54 +01:00
b145fd4f5b necessary to merge 2021-10-12 17:08:46 +01:00
8a5b794f25 necessary change to merge with upstrm 2021-10-12 16:04:03 +01:00
291e80f88a sp2n as config option 2021-10-12 16:00:32 +01:00
1ace5850ae first hmc 2021-10-12 16:00:32 +01:00
283f14b7c1 fix sp2n projection 2021-10-12 16:00:32 +01:00
1d6e708083 tests! 2021-10-12 16:00:32 +01:00
89457e25e3 sp fermion instantiation 2021-10-12 16:00:32 +01:00
7e3b298d3d project on sp2n 2021-10-12 16:00:32 +01:00
7ff3e5eed4 gauge and fermion implementation for sp2n 2021-10-12 16:00:32 +01:00
19eb51cf41 sp2n generators 2021-10-12 15:53:33 +01:00
470d4dcc6d sp2n as config option 2021-10-12 15:47:56 +01:00
ed03bfd555 first hmc 2021-10-12 12:16:47 +01:00
8c0fbcccae fix sp2n projection 2021-10-12 12:12:16 +01:00
d4866157fe tests! 2021-10-12 09:06:15 +01:00
b6496b6cb5 sp fermion instantiation 2021-10-11 16:32:10 +01:00
4f5fe57920 project on sp2n 2021-10-11 16:28:15 +01:00
11fb943b1e gauge and fermion implementation for sp2n 2021-10-11 16:21:25 +01:00
046a23121e sp2n generators 2021-10-05 15:51:22 +01:00
144 changed files with 13301 additions and 2324 deletions

View File

@ -59,6 +59,7 @@ Author: paboyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice.h>
#include <Grid/cshift/Cshift.h>
#include <Grid/stencil/Stencil.h>
#include <Grid/stencil/GeneralLocalStencil.h>
#include <Grid/parallelIO/BinaryIO.h>
#include <Grid/algorithms/Algorithms.h>
NAMESPACE_CHECK(GridCore)

View File

@ -66,6 +66,10 @@ if BUILD_FERMION_REPS
extra_sources+=$(ADJ_FERMION_FILES)
extra_sources+=$(TWOIND_FERMION_FILES)
endif
if BUILD_SP
extra_sources+=$(SP_FERMION_FILES)
extra_sources+=$(SP_TWOIND_FERMION_FILES)
endif
lib_LIBRARIES = libGrid.a

View File

@ -29,6 +29,9 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#ifndef GRID_ALGORITHMS_H
#define GRID_ALGORITHMS_H
NAMESPACE_CHECK(blas);
#include <Grid/algorithms/blas/BatchedBlas.h>
NAMESPACE_CHECK(algorithms);
#include <Grid/algorithms/SparseMatrix.h>
#include <Grid/algorithms/LinearOperator.h>
@ -44,7 +47,10 @@ NAMESPACE_CHECK(SparseMatrix);
#include <Grid/algorithms/approx/RemezGeneral.h>
#include <Grid/algorithms/approx/ZMobius.h>
NAMESPACE_CHECK(approx);
#include <Grid/algorithms/iterative/Deflation.h>
#include <Grid/algorithms/deflation/Deflation.h>
#include <Grid/algorithms/deflation/MultiRHSBlockProject.h>
#include <Grid/algorithms/deflation/MultiRHSDeflation.h>
NAMESPACE_CHECK(deflation);
#include <Grid/algorithms/iterative/ConjugateGradient.h>
NAMESPACE_CHECK(ConjGrad);
#include <Grid/algorithms/iterative/BiCGSTAB.h>
@ -67,10 +73,10 @@ NAMESPACE_CHECK(BiCGSTAB);
#include <Grid/algorithms/iterative/MixedPrecisionFlexibleGeneralisedMinimalResidual.h>
#include <Grid/algorithms/iterative/ImplicitlyRestartedLanczos.h>
#include <Grid/algorithms/iterative/PowerMethod.h>
#include <Grid/algorithms/iterative/AdefGeneric.h>
NAMESPACE_CHECK(PowerMethod);
#include <Grid/algorithms/CoarsenedMatrix.h>
NAMESPACE_CHECK(CoarsendMatrix);
#include <Grid/algorithms/multigrid/MultiGrid.h>
NAMESPACE_CHECK(multigrid);
#include <Grid/algorithms/FFT.h>
#endif

View File

@ -145,6 +145,44 @@ public:
}
};
////////////////////////////////////////////////////////////////////
// Create a shifted HermOp
////////////////////////////////////////////////////////////////////
template<class Field>
class ShiftedHermOpLinearOperator : public LinearOperatorBase<Field> {
LinearOperatorBase<Field> &_Mat;
RealD _shift;
public:
ShiftedHermOpLinearOperator(LinearOperatorBase<Field> &Mat,RealD shift): _Mat(Mat), _shift(shift){};
// Support for coarsening to a multigrid
void OpDiag (const Field &in, Field &out) {
assert(0);
}
void OpDir (const Field &in, Field &out,int dir,int disp) {
assert(0);
}
void OpDirAll (const Field &in, std::vector<Field> &out){
assert(0);
};
void Op (const Field &in, Field &out){
assert(0);
}
void AdjOp (const Field &in, Field &out){
assert(0);
}
void HermOpAndNorm(const Field &in, Field &out,RealD &n1,RealD &n2){
HermOp(in,out);
ComplexD dot = innerProduct(in,out);
n1=real(dot);
n2=norm2(out);
}
void HermOp(const Field &in, Field &out){
_Mat.HermOp(in,out);
out = out + _shift*in;
}
};
////////////////////////////////////////////////////////////////////
// Wrap an already herm matrix
////////////////////////////////////////////////////////////////////

View File

@ -90,9 +90,8 @@ public:
order=_order;
if(order < 2) exit(-1);
Coeffs.resize(order);
Coeffs.assign(0.,order);
Coeffs[order-1] = 1.;
Coeffs.resize(order,0.0);
Coeffs[order-1] = 1.0;
};
// PB - more efficient low pass drops high modes above the low as 1/x uses all Chebyshev's.

View File

@ -40,7 +40,7 @@ public:
RealD norm;
RealD lo,hi;
MultiShiftFunction(int n,RealD _lo,RealD _hi): poles(n), residues(n), lo(_lo), hi(_hi) {;};
MultiShiftFunction(int n,RealD _lo,RealD _hi): poles(n), residues(n), tolerances(n), lo(_lo), hi(_hi) {;};
RealD approx(RealD x);
void csv(std::ostream &out);
void gnuplot(std::ostream &out);

View File

@ -0,0 +1,685 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: BatchedBlas.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#ifdef GRID_HIP
#include <hipblas/hipblas.h>
#endif
#ifdef GRID_CUDA
#include <hipblas/hipblas.h>
#endif
#ifdef GRID_SYCL
#error // need oneMKL version
#endif
///////////////////////////////////////////////////////////////////////
// Need to rearrange lattice data to be in the right format for a
// batched multiply. Might as well make these static, dense packed
///////////////////////////////////////////////////////////////////////
NAMESPACE_BEGIN(Grid);
#ifdef GRID_HIP
typedef hipblasHandle_t gridblasHandle_t;
#endif
#ifdef GRID_CUDA
typedef cudablasHandle_t gridblasHandle_t;
#endif
#ifdef GRID_SYCL
typedef int32_t gridblasHandle_t;
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
typedef int32_t gridblasHandle_t;
#endif
enum GridBLASOperation_t { GridBLAS_OP_N, GridBLAS_OP_T, GridBLAS_OP_C } ;
class GridBLAS {
public:
static gridblasHandle_t gridblasHandle;
static int gridblasInit;
static void Init(void)
{
if ( ! gridblasInit ) {
#ifdef GRID_CUDA
std::cout << "cublasCreate"<<std::endl;
cublasCreate(&gridblasHandle);
#endif
#ifdef GRID_HIP
std::cout << "hipblasCreate"<<std::endl;
hipblasCreate(&gridblasHandle);
#endif
#ifdef GRID_SYCL
#endif
gridblasInit=1;
}
}
// Force construct once
GridBLAS() { Init(); };
~GridBLAS() { };
/////////////////////////////////////////////////////////////////////////////////////
// BLAS GEMM conventions:
/////////////////////////////////////////////////////////////////////////////////////
// - C = alpha A * B + beta C
// Dimensions:
// - C_m.n
// - A_m.k
// - B_k.n
// - Flops = 8 M N K
// - Bytes = 2*sizeof(word) * (MN+MK+KN)
// M=60, N=12
// Flop/Byte = 8 . 60.60.12 / (60.12+60.60+60.12)/16 = 4 so expect about 4 TF/s on a GCD
/////////////////////////////////////////////////////////////////////////////////////
void synchronise(void)
{
#ifdef GRID_HIP
auto err = hipDeviceSynchronize();
assert(err==hipSuccess);
#endif
#ifdef GRID_CUDA
auto err = cudaDeviceSynchronize();
assert(err==cudaSuccess);
#endif
#ifdef GRID_SYCL
accelerator_barrier();
#endif
}
void gemmBatched(int m,int n, int k,
ComplexD alpha,
deviceVector<ComplexD*> &Amk, // pointer list to matrices
deviceVector<ComplexD*> &Bkn,
ComplexD beta,
deviceVector<ComplexD*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
ComplexF alpha,
deviceVector<ComplexF*> &Amk, // pointer list to matrices
deviceVector<ComplexF*> &Bkn,
ComplexF beta,
deviceVector<ComplexF*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
RealD alpha,
deviceVector<RealD*> &Amk, // pointer list to matrices
deviceVector<RealD*> &Bkn,
RealD beta,
deviceVector<RealD*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(int m,int n, int k,
RealF alpha,
deviceVector<RealF*> &Amk, // pointer list to matrices
deviceVector<RealF*> &Bkn,
RealF beta,
deviceVector<RealF*> &Cmn)
{
gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
m,n,k,
alpha,
Amk,
Bkn,
beta,
Cmn);
}
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
ComplexD alpha,
deviceVector<ComplexD*> &Amk, // pointer list to matrices
deviceVector<ComplexD*> &Bkn,
ComplexD beta,
deviceVector<ComplexD*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<ComplexD> alpha_p(1);
static deviceVector<ComplexD> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD));
RealD t0=usecond();
// std::cout << "ZgemmBatched mnk "<<m<<","<<n<<","<<k<<" count "<<batchCount<<std::endl;
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasZgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(hipblasDoubleComplex *) &alpha_p[0],
(hipblasDoubleComplex **)&Amk[0], lda,
(hipblasDoubleComplex **)&Bkn[0], ldb,
(hipblasDoubleComplex *) &beta_p[0],
(hipblasDoubleComplex **)&Cmn[0], ldc,
batchCount);
// std::cout << " hipblas return code " <<(int)err<<std::endl;
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasZgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(cuDoubleComplex *) &alpha_p[0],
(cuDoubleComplex **)&Amk[0], lda,
(cuDoubleComplex **)&Bkn[0], ldb,
(cuDoubleComplex *) &beta_p[0],
(cuDoubleComplex **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexD c_mn(0.0);
for (int kk = 0; kk < k, ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (*alpha_p)*c_mn + (*beta_p)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
// synchronise();
RealD t1=usecond();
RealD flops = 8.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(ComplexD)*(m*k+k*n+m*n)*batchCount;
// std::cout <<GridLogMessage<< " batched Blas copy "<<(t0-t2)/1.e3 <<" ms "<<std::endl;
// std::cout <<GridLogMessage<< " batched Blas zGemm call "<<m<<","<<n<<","<<k<<" "<< flops/(t1-t0)/1.e3 <<" GF/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
// std::cout <<GridLogMessage<< " batched Blas zGemm call "<<m<<","<<n<<","<<k<<" "<< bytes/(t1-t0)/1.e3 <<" GB/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
}
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
ComplexF alpha,
deviceVector<ComplexF*> &Amk, // pointer list to matrices
deviceVector<ComplexF*> &Bkn,
ComplexF beta,
deviceVector<ComplexF*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<ComplexF> alpha_p(1);
static deviceVector<ComplexF> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexF));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexF));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasCgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(hipblasComplex *) &alpha_p[0],
(hipblasComplex **)&Amk[0], lda,
(hipblasComplex **)&Bkn[0], ldb,
(hipblasComplex *) &beta_p[0],
(hipblasComplex **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasCgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(cuComplex *) &alpha_p[0],
(cuComplex **)&Amk[0], lda,
(cuComplex **)&Bkn[0], ldb,
(cuComplex *) &beta_p[0],
(cuComplex **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexD c_mn(0.0);
for (int kk = 0; kk < k, ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (*alpha_p)*c_mn + (*beta_p)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 8.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(ComplexF)*(m*k+k*n+m*n)*batchCount;
}
///////////////////////////////////////////////////////////////////////////
// Single precision real GEMM
///////////////////////////////////////////////////////////////////////////
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
RealF alpha,
deviceVector<RealF*> &Amk, // pointer list to matrices
deviceVector<RealF*> &Bkn,
RealF beta,
deviceVector<RealF*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<RealF> alpha_p(1);
static deviceVector<RealF> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(RealF));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(RealF));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasSgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(float *) &alpha_p[0],
(float **)&Amk[0], lda,
(float **)&Bkn[0], ldb,
(float *) &beta_p[0],
(float **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasSgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(float *) &alpha_p[0],
(float **)&Amk[0], lda,
(float **)&Bkn[0], ldb,
(float *) &beta_p[0],
(float **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
RealD c_mn(0.0);
for (int kk = 0; kk < k, ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (*alpha_p)*c_mn + (*beta_p)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 2.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(RealF)*(m*k+k*n+m*n)*batchCount;
}
///////////////////////////////////////////////////////////////////////////
// Double precision real GEMM
///////////////////////////////////////////////////////////////////////////
void gemmBatched(GridBLASOperation_t OpA,
GridBLASOperation_t OpB,
int m,int n, int k,
RealD alpha,
deviceVector<RealD*> &Amk, // pointer list to matrices
deviceVector<RealD*> &Bkn,
RealD beta,
deviceVector<RealD*> &Cmn)
{
RealD t2=usecond();
int32_t batchCount = Amk.size();
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
if(OpA!=GridBLAS_OP_N)
lda = k;
if(OpB!=GridBLAS_OP_N)
ldb = n;
static deviceVector<RealD> alpha_p(1);
static deviceVector<RealD> beta_p(1);
// can prestore the 1 and the zero on device
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(RealD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(RealD));
RealD t0=usecond();
assert(Bkn.size()==batchCount);
assert(Cmn.size()==batchCount);
#ifdef GRID_HIP
hipblasOperation_t hOpA;
hipblasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = HIPBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = HIPBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = HIPBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = HIPBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = HIPBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = HIPBLAS_OP_C;
auto err = hipblasDgemmBatched(gridblasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
m,n,k,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasOperation_t hOpA;
cublasOperation_t hOpB;
if ( OpA == GridBLAS_OP_N ) hOpA = CUBLAS_OP_N;
if ( OpA == GridBLAS_OP_T ) hOpA = CUBLAS_OP_T;
if ( OpA == GridBLAS_OP_C ) hOpA = CUBLAS_OP_C;
if ( OpB == GridBLAS_OP_N ) hOpB = CUBLAS_OP_N;
if ( OpB == GridBLAS_OP_T ) hOpB = CUBLAS_OP_T;
if ( OpB == GridBLAS_OP_C ) hOpB = CUBLAS_OP_C;
auto err = cublasDgemmBatched(gridblasHandle,
hOpA,
hOpB,
m,n,k,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
batchCount);
assert(err==CUBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_SYCL
/*
int64_t m64=m;
int64_t n64=n;
int64_t k64=k;
int64_t batchCount64=batchCount;
oneapi::mkl::blas::column_major::gemm_batch(*theGridAccelerator,
onemkl::transpose::N,
onemkl::transpose::N,
&m64,&n64,&k64,
(double *) &alpha_p[0],
(double **)&Amk[0], lda,
(double **)&Bkn[0], ldb,
(double *) &beta_p[0],
(double **)&Cmn[0], ldc,
1,&batchCount64);
*/
//MKLs cblas_<T>gemm_batch & OneAPI
#warning "oneMKL implementation not built "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
RealD c_mn(0.0);
for (int kk = 0; kk < k, ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (*alpha_p)*c_mn + (*beta_p)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
RealD t1=usecond();
RealD flops = 2.0*m*n*k*batchCount;
RealD bytes = 1.0*sizeof(RealD)*(m*k+k*n+m*n)*batchCount;
}
////////////////////////////////////////////////////////////////////////////////////////////////
// Strided case used by benchmark, but generally unused in Grid
// Keep a code example in double complex, but don't generate the single and real variants for now
////////////////////////////////////////////////////////////////////////////////////////////////
void gemmStridedBatched(int m,int n, int k,
ComplexD alpha,
ComplexD* Amk, // pointer list to matrices
ComplexD* Bkn,
ComplexD beta,
ComplexD* Cmn,
int batchCount)
{
// Use C-row major storage, so transpose calls
int lda = m; // m x k column major
int ldb = k; // k x n column major
int ldc = m; // m x b column major
int sda = m*k;
int sdb = k*n;
int sdc = m*n;
deviceVector<ComplexD> alpha_p(1);
deviceVector<ComplexD> beta_p(1);
acceleratorCopyToDevice((void *)&alpha,(void *)&alpha_p[0],sizeof(ComplexD));
acceleratorCopyToDevice((void *)&beta ,(void *)&beta_p[0],sizeof(ComplexD));
std::cout << "blasZgemmStridedBatched mnk "<<m<<","<<n<<","<<k<<" count "<<batchCount<<std::endl;
std::cout << "blasZgemmStridedBatched ld "<<lda<<","<<ldb<<","<<ldc<<std::endl;
std::cout << "blasZgemmStridedBatched sd "<<sda<<","<<sdb<<","<<sdc<<std::endl;
#ifdef GRID_HIP
auto err = hipblasZgemmStridedBatched(gridblasHandle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
m,n,k,
(hipblasDoubleComplex *) &alpha_p[0],
(hipblasDoubleComplex *) Amk, lda, sda,
(hipblasDoubleComplex *) Bkn, ldb, sdb,
(hipblasDoubleComplex *) &beta_p[0],
(hipblasDoubleComplex *) Cmn, ldc, sdc,
batchCount);
assert(err==HIPBLAS_STATUS_SUCCESS);
#endif
#ifdef GRID_CUDA
cublasZgemmStridedBatched(gridblasHandle,
CUBLAS_OP_N,
CUBLAS_OP_N,
m,n,k,
(cuDoubleComplex *) &alpha_p[0],
(cuDoubleComplex *) Amk, lda, sda,
(cuDoubleComplex *) Bkn, ldb, sdb,
(cuDoubleComplex *) &beta_p[0],
(cuDoubleComplex *) Cmn, ldc, sdc,
batchCount);
#endif
#ifdef GRID_SYCL
#warning "oneMKL implementation not made "
#endif
#if !defined(GRID_SYCL) && !defined(GRID_CUDA) && !defined(GRID_HIP)
// Need a default/reference implementation
for (int p = 0; p < batchCount; ++p) {
for (int mm = 0; mm < m; ++mm) {
for (int nn = 0; nn < n; ++nn) {
ComplexD c_mn(0.0);
for (int kk = 0; kk < k, ++kk)
c_mn += Amk[mm + kk*lda + p*sda] * Bkn[kk + nn*ldb + p*sdb];
Cmn[mm + nn*ldc + p*sdc] = (*alpha_p)*c_mn + (*beta_p)*Cmn[mm + nn*ldc + p*sdc];
}
}
}
#endif
}
void benchmark(int nbasis, int nrhs, int coarseVol, int nstencil)
{
int32_t N_A = nbasis*nbasis*coarseVol*nstencil;
int32_t N_B = nbasis*nrhs*coarseVol*nstencil; // One leg of stencil at a time
int32_t N_C = nbasis*nrhs*coarseVol*nstencil;
deviceVector<ComplexD> A(N_A); acceleratorMemSet(&A[0],0,N_A*sizeof(ComplexD));
deviceVector<ComplexD> B(N_B); acceleratorMemSet(&B[0],0,N_B*sizeof(ComplexD));
deviceVector<ComplexD> C(N_C); acceleratorMemSet(&C[0],0,N_C*sizeof(ComplexD));
ComplexD alpha(1.0);
ComplexD beta (1.0);
for(int i=0;i<10;i++){
RealD t0 = usecond();
for(int s=0;s<nstencil;s++){
gemmStridedBatched(nbasis,nrhs,nbasis,
alpha,
&A[0], // m x k
&B[0], // k x n
beta,
&C[0], // m x n
coarseVol);
}
synchronise();
RealD t1 = usecond();
RealD flops = 8.0*nbasis*nbasis*nrhs*coarseVol*nstencil;
RealD bytes = 1.0*sizeof(ComplexD)*(nbasis*nbasis+nbasis*nrhs*3)*coarseVol*nstencil;
std::cout << " batched Blas call "<<i<<" "<< flops/(t1-t0)/1.e3 <<" GF/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
std::cout << " batched Blas call "<<i<<" "<< bytes/(t1-t0)/1.e3 <<" GB/s "<<(t1-t0)/1.e3<<" ms "<<std::endl;
}
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,512 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: MultiRHSDeflation.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
/*
MultiRHS block projection
Import basis -> nblock x nbasis x (block x internal)
Import vector of fine lattice objects -> nblock x nrhs x (block x internal)
=> coarse_(nrhs x nbasis )^block = via batched GEMM
//template<class vobj,class CComplex,int nbasis,class VLattice>
//inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
// const VLattice &fineData,
// const VLattice &Basis)
*/
template<class Field>
class MultiRHSBlockProject
{
public:
typedef typename Field::scalar_type scalar;
typedef typename Field::scalar_object scalar_object;
typedef Field Fermion;
int nbasis;
GridBase *coarse_grid;
GridBase *fine_grid;
uint64_t block_vol;
uint64_t fine_vol;
uint64_t coarse_vol;
uint64_t words;
// Row major layout "C" order:
// BLAS_V[coarse_vol][nbasis][block_vol][words]
// BLAS_F[coarse_vol][nrhs][block_vol][words]
// BLAS_C[coarse_vol][nrhs][nbasis]
/*
* in Fortran column major notation (cuBlas order)
*
* Vxb = [v1(x)][..][vn(x)] ... x coarse vol
*
* Fxr = [r1(x)][..][rm(x)] ... x coarse vol
*
* Block project:
* C_br = V^dag F x coarse vol
*
* Block promote:
* F_xr = Vxb Cbr x coarse_vol
*/
deviceVector<scalar> BLAS_V; // words * block_vol * nbasis x coarse_vol
deviceVector<scalar> BLAS_F; // nrhs x fine_vol * words -- the sources
deviceVector<scalar> BLAS_C; // nrhs x coarse_vol * nbasis -- the coarse coeffs
RealD blasNorm2(deviceVector<scalar> &blas)
{
scalar ss(0.0);
std::vector<scalar> tmp(blas.size());
acceleratorCopyFromDevice(&blas[0],&tmp[0],blas.size()*sizeof(scalar));
for(int64_t s=0;s<blas.size();s++){
ss=ss+tmp[s]*adj(tmp[s]);
}
coarse_grid->GlobalSum(ss);
return real(ss);
}
MultiRHSBlockProject(){};
~MultiRHSBlockProject(){ Deallocate(); };
void Deallocate(void)
{
nbasis=0;
coarse_grid=nullptr;
fine_grid=nullptr;
fine_vol=0;
block_vol=0;
coarse_vol=0;
words=0;
BLAS_V.resize(0);
BLAS_F.resize(0);
BLAS_C.resize(0);
}
void Allocate(int _nbasis,GridBase *_fgrid,GridBase *_cgrid)
{
nbasis=_nbasis;
fine_grid=_fgrid;
coarse_grid=_cgrid;
fine_vol = fine_grid->lSites();
coarse_vol = coarse_grid->lSites();
block_vol = fine_vol/coarse_vol;
words = sizeof(scalar_object)/sizeof(scalar);
BLAS_V.resize (fine_vol * words * nbasis );
}
void ImportFineGridVectors(std::vector <Field > &vecs, deviceVector<scalar> &blas)
{
int nvec = vecs.size();
typedef typename Field::vector_object vobj;
std::cout << " BlockProjector importing "<<nvec<< " vectors" <<std::endl;
assert(vecs[0].Grid()==fine_grid);
subdivides(coarse_grid,fine_grid); // require they map
int _ndimension = coarse_grid->_ndimension;
assert(block_vol == fine_grid->oSites() / coarse_grid->oSites());
Coordinate block_r (_ndimension);
for(int d=0 ; d<_ndimension;d++){
block_r[d] = fine_grid->_rdimensions[d] / coarse_grid->_rdimensions[d];
}
uint64_t sz = blas.size();
acceleratorMemSet(&blas[0],0,blas.size()*sizeof(scalar));
Coordinate fine_rdimensions = fine_grid->_rdimensions;
Coordinate coarse_rdimensions = coarse_grid->_rdimensions;
int64_t bv= block_vol;
for(int v=0;v<vecs.size();v++){
// std::cout << " BlockProjector importing vector"<<v<<" "<<norm2(vecs[v])<<std::endl;
autoView( fineData , vecs[v], AcceleratorRead);
auto blasData_p = &blas[0];
auto fineData_p = &fineData[0];
int64_t osites = fine_grid->oSites();
// loop over fine sites
const int Nsimd = vobj::Nsimd();
// std::cout << "sz "<<sz<<std::endl;
// std::cout << "prod "<<Nsimd * coarse_grid->oSites() * block_vol * nvec * words<<std::endl;
assert(sz == Nsimd * coarse_grid->oSites() * block_vol * nvec * words);
uint64_t lwords= words; // local variable for copy in to GPU
accelerator_for(sf,osites,Nsimd,{
#ifdef GRID_SIMT
{
int lane=acceleratorSIMTlane(Nsimd); // buffer lane
#else
for(int lane=0;lane<Nsimd;lane++) {
#endif
// One thread per fine site
Coordinate coor_f(_ndimension);
Coordinate coor_b(_ndimension);
Coordinate coor_c(_ndimension);
// Fine site to fine coor
Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions);
for(int d=0;d<_ndimension;d++) coor_b[d] = coor_f[d]%block_r[d];
for(int d=0;d<_ndimension;d++) coor_c[d] = coor_f[d]/block_r[d];
int sc;// coarse site
int sb;// block site
Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions);
Lexicographic::IndexFromCoor(coor_b,sb,block_r);
scalar_object data = extractLane(lane,fineData[sf]);
// BLAS layout address calculation
// words * block_vol * nbasis x coarse_vol
// coarse oSite x block vole x lanes
int64_t site = (lane*osites + sc*bv)*nvec
+ v*bv
+ sb;
// assert(site*lwords<sz);
scalar_object * ptr = (scalar_object *)&blasData_p[site*lwords];
*ptr = data;
#ifdef GRID_SIMT
}
#else
}
#endif
});
// std::cout << " import fine Blas norm "<<blasNorm2(blas)<<std::endl;
// std::cout << " BlockProjector imported vector"<<v<<std::endl;
}
}
void ExportFineGridVectors(std::vector <Field> &vecs, deviceVector<scalar> &blas)
{
typedef typename Field::vector_object vobj;
int nvec = vecs.size();
assert(vecs[0].Grid()==fine_grid);
subdivides(coarse_grid,fine_grid); // require they map
int _ndimension = coarse_grid->_ndimension;
assert(block_vol == fine_grid->oSites() / coarse_grid->oSites());
Coordinate block_r (_ndimension);
for(int d=0 ; d<_ndimension;d++){
block_r[d] = fine_grid->_rdimensions[d] / coarse_grid->_rdimensions[d];
}
Coordinate fine_rdimensions = fine_grid->_rdimensions;
Coordinate coarse_rdimensions = coarse_grid->_rdimensions;
// std::cout << " export fine Blas norm "<<blasNorm2(blas)<<std::endl;
int64_t bv= block_vol;
for(int v=0;v<vecs.size();v++){
autoView( fineData , vecs[v], AcceleratorWrite);
auto blasData_p = &blas[0];
auto fineData_p = &fineData[0];
int64_t osites = fine_grid->oSites();
uint64_t lwords = words;
// std::cout << " Nsimd is "<<vobj::Nsimd() << std::endl;
// std::cout << " lwords is "<<lwords << std::endl;
// std::cout << " sizeof(scalar_object) is "<<sizeof(scalar_object) << std::endl;
// loop over fine sites
accelerator_for(sf,osites,vobj::Nsimd(),{
#ifdef GRID_SIMT
{
int lane=acceleratorSIMTlane(vobj::Nsimd()); // buffer lane
#else
for(int lane=0;lane<vobj::Nsimd();lane++) {
#endif
// One thread per fine site
Coordinate coor_f(_ndimension);
Coordinate coor_b(_ndimension);
Coordinate coor_c(_ndimension);
Lexicographic::CoorFromIndex(coor_f,sf,fine_rdimensions);
for(int d=0;d<_ndimension;d++) coor_b[d] = coor_f[d]%block_r[d];
for(int d=0;d<_ndimension;d++) coor_c[d] = coor_f[d]/block_r[d];
int sc;
int sb;
Lexicographic::IndexFromCoor(coor_c,sc,coarse_rdimensions);
Lexicographic::IndexFromCoor(coor_b,sb,block_r);
// BLAS layout address calculation
// words * block_vol * nbasis x coarse_vol
int64_t site = (lane*osites + sc*bv)*nvec
+ v*bv
+ sb;
scalar_object * ptr = (scalar_object *)&blasData_p[site*lwords];
scalar_object data = *ptr;
insertLane(lane,fineData[sf],data);
#ifdef GRID_SIMT
}
#else
}
#endif
});
}
}
template<class vobj>
void ImportCoarseGridVectors(std::vector <Lattice<vobj> > &vecs, deviceVector<scalar> &blas)
{
int nvec = vecs.size();
typedef typename vobj::scalar_object coarse_scalar_object;
std::cout << " BlockProjector importing coarse grid "<<nvec<< " vectors" <<std::endl;
assert(vecs[0].Grid()==coarse_grid);
int _ndimension = coarse_grid->_ndimension;
uint64_t sz = blas.size();
Coordinate coarse_rdimensions = coarse_grid->_rdimensions;
for(int v=0;v<vecs.size();v++){
// std::cout << " BlockProjector importing coarse vector"<<v<<" "<<norm2(vecs[v])<<std::endl;
autoView( coarseData , vecs[v], AcceleratorRead);
auto blasData_p = &blas[0];
auto coarseData_p = &coarseData[0];
int64_t osites = coarse_grid->oSites();
// loop over fine sites
const int Nsimd = vobj::Nsimd();
uint64_t cwords=sizeof(typename vobj::scalar_object)/sizeof(scalar);
assert(cwords==nbasis);
accelerator_for(sc,osites,Nsimd,{
#ifdef GRID_SIMT
{
int lane=acceleratorSIMTlane(Nsimd); // buffer lane
#else
for(int lane=0;lane<Nsimd;lane++) {
#endif
// C_br per site
int64_t blas_site = (lane*osites + sc)*nvec*cwords + v*cwords;
coarse_scalar_object data = extractLane(lane,coarseData[sc]);
coarse_scalar_object * ptr = (coarse_scalar_object *)&blasData_p[blas_site];
*ptr = data;
#ifdef GRID_SIMT
}
#else
}
#endif
});
// std::cout << " import coarsee Blas norm "<<blasNorm2(blas)<<std::endl;
}
}
template<class vobj>
void ExportCoarseGridVectors(std::vector <Lattice<vobj> > &vecs, deviceVector<scalar> &blas)
{
int nvec = vecs.size();
typedef typename vobj::scalar_object coarse_scalar_object;
std::cout << " BlockProjector importing coarse grid "<<nvec<< " vectors" <<std::endl;
assert(vecs[0].Grid()==coarse_grid);
int _ndimension = coarse_grid->_ndimension;
uint64_t sz = blas.size();
Coordinate coarse_rdimensions = coarse_grid->_rdimensions;
// std::cout << " export coarsee Blas norm "<<blasNorm2(blas)<<std::endl;
for(int v=0;v<vecs.size();v++){
// std::cout << " BlockProjector exporting coarse vector"<<v<<std::endl;
autoView( coarseData , vecs[v], AcceleratorWrite);
auto blasData_p = &blas[0];
auto coarseData_p = &coarseData[0];
int64_t osites = coarse_grid->oSites();
// loop over fine sites
const int Nsimd = vobj::Nsimd();
uint64_t cwords=sizeof(typename vobj::scalar_object)/sizeof(scalar);
assert(cwords==nbasis);
accelerator_for(sc,osites,Nsimd,{
// Wrap in a macro "FOR_ALL_LANES(lane,{ ... });
#ifdef GRID_SIMT
{
int lane=acceleratorSIMTlane(Nsimd); // buffer lane
#else
for(int lane=0;lane<Nsimd;lane++) {
#endif
int64_t blas_site = (lane*osites + sc)*nvec*cwords + v*cwords;
coarse_scalar_object * ptr = (coarse_scalar_object *)&blasData_p[blas_site];
coarse_scalar_object data = *ptr;
insertLane(lane,coarseData[sc],data);
#ifdef GRID_SIMT
}
#else
}
#endif
});
}
}
void ImportBasis(std::vector < Field > &vecs)
{
// std::cout << " BlockProjector Import basis size "<<vecs.size()<<std::endl;
ImportFineGridVectors(vecs,BLAS_V);
}
template<class cobj>
void blockProject(std::vector<Field> &fine,std::vector< Lattice<cobj> > & coarse)
{
int nrhs=fine.size();
int _nbasis = sizeof(typename cobj::scalar_object)/sizeof(scalar);
assert(nbasis==_nbasis);
BLAS_F.resize (fine_vol * words * nrhs );
BLAS_C.resize (coarse_vol * nbasis * nrhs );
/////////////////////////////////////////////
// Copy in the multi-rhs sources to same data layout
/////////////////////////////////////////////
// std::cout << "BlockProject import fine"<<std::endl;
ImportFineGridVectors(fine,BLAS_F);
deviceVector<scalar *> Vd(coarse_vol);
deviceVector<scalar *> Fd(coarse_vol);
deviceVector<scalar *> Cd(coarse_vol);
// std::cout << "BlockProject pointers"<<std::endl;
for(int c=0;c<coarse_vol;c++){
// BLAS_V[coarse_vol][nbasis][block_vol][words]
// BLAS_F[coarse_vol][nrhs][block_vol][words]
// BLAS_C[coarse_vol][nrhs][nbasis]
scalar * Vh = & BLAS_V[c*nbasis*block_vol*words];
scalar * Fh = & BLAS_F[c*nrhs*block_vol*words];
scalar * Ch = & BLAS_C[c*nrhs*nbasis];
acceleratorPut(Vd[c],Vh);
acceleratorPut(Fd[c],Fh);
acceleratorPut(Cd[c],Ch);
}
GridBLAS BLAS;
// std::cout << "BlockProject BLAS"<<std::endl;
int64_t vw = block_vol * words;
/////////////////////////////////////////
// C_br = V^dag R
/////////////////////////////////////////
BLAS.gemmBatched(GridBLAS_OP_C,GridBLAS_OP_N,
nbasis,nrhs,vw,
ComplexD(1.0),
Vd,
Fd,
ComplexD(0.0), // wipe out C
Cd);
BLAS.synchronise();
// std::cout << "BlockProject done"<<std::endl;
ExportCoarseGridVectors(coarse, BLAS_C);
// std::cout << "BlockProject done"<<std::endl;
}
template<class cobj>
void blockPromote(std::vector<Field> &fine,std::vector<Lattice<cobj> > & coarse)
{
int nrhs=fine.size();
int _nbasis = sizeof(typename cobj::scalar_object)/sizeof(scalar);
assert(nbasis==_nbasis);
BLAS_F.resize (fine_vol * words * nrhs );
BLAS_C.resize (coarse_vol * nbasis * nrhs );
ImportCoarseGridVectors(coarse, BLAS_C);
GridBLAS BLAS;
deviceVector<scalar *> Vd(coarse_vol);
deviceVector<scalar *> Fd(coarse_vol);
deviceVector<scalar *> Cd(coarse_vol);
for(int c=0;c<coarse_vol;c++){
// BLAS_V[coarse_vol][nbasis][block_vol][words]
// BLAS_F[coarse_vol][nrhs][block_vol][words]
// BLAS_C[coarse_vol][nrhs][nbasis]
scalar * Vh = & BLAS_V[c*nbasis*block_vol*words];
scalar * Fh = & BLAS_F[c*nrhs*block_vol*words];
scalar * Ch = & BLAS_C[c*nrhs*nbasis];
acceleratorPut(Vd[c],Vh);
acceleratorPut(Fd[c],Fh);
acceleratorPut(Cd[c],Ch);
}
/////////////////////////////////////////
// Block promote:
// F_xr = Vxb Cbr (x coarse_vol)
/////////////////////////////////////////
int64_t vw = block_vol * words;
BLAS.gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
vw,nrhs,nbasis,
ComplexD(1.0),
Vd,
Cd,
ComplexD(0.0), // wipe out C
Fd);
BLAS.synchronise();
// std::cout << " blas call done"<<std::endl;
ExportFineGridVectors(fine, BLAS_F);
// std::cout << " exported "<<std::endl;
}
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,234 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: MultiRHSDeflation.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
/* Need helper object for BLAS accelerated mrhs projection
i) MultiRHS Deflation
Import Evecs -> nev x vol x internal
Import vector of Lattice objects -> nrhs x vol x internal
=> Cij (nrhs x Nev) via GEMM.
=> Guess (nrhs x vol x internal) = C x evecs (via GEMM)
Export
ii) MultiRHS block projection
Import basis -> nblock x nbasis x (block x internal)
Import vector of fine lattice objects -> nblock x nrhs x (block x internal)
=> coarse_(nrhs x nbasis )^block = via batched GEMM
iii) Alternate interface:
Import higher dim Lattice object-> vol x nrhs layout
*/
template<class Field>
class MultiRHSDeflation
{
public:
typedef typename Field::scalar_type scalar;
typedef typename Field::scalar_object scalar_object;
int nev;
std::vector<RealD> eval;
GridBase *grid;
uint64_t vol;
uint64_t words;
deviceVector<scalar> BLAS_E; // nev x vol -- the eigenbasis (up to a 1/sqrt(lambda))
deviceVector<scalar> BLAS_R; // nrhs x vol -- the sources
deviceVector<scalar> BLAS_G; // nrhs x vol -- the guess
deviceVector<scalar> BLAS_C; // nrhs x nev -- the coefficients
MultiRHSDeflation(){};
~MultiRHSDeflation(){ Deallocate(); };
void Deallocate(void)
{
nev=0;
grid=nullptr;
vol=0;
words=0;
BLAS_E.resize(0);
BLAS_R.resize(0);
BLAS_C.resize(0);
BLAS_G.resize(0);
}
void Allocate(int _nev,GridBase *_grid)
{
nev=_nev;
grid=_grid;
vol = grid->lSites();
words = sizeof(scalar_object)/sizeof(scalar);
eval.resize(nev);
BLAS_E.resize (vol * words * nev );
std::cout << GridLogMessage << " Allocate for "<<nev<<" eigenvectors and volume "<<vol<<std::endl;
}
void ImportEigenVector(Field &evec,RealD &_eval, int ev)
{
assert(ev<eval.size());
std::cout << " ev " <<ev<<" eval "<<_eval<< std::endl;
eval[ev] = _eval;
int64_t offset = ev*vol*words;
autoView(v,evec,AcceleratorRead);
acceleratorCopyDeviceToDevice(&v[0],&BLAS_E[offset],sizeof(scalar_object)*vol);
}
void ImportEigenBasis(std::vector<Field> &evec,std::vector<RealD> &_eval)
{
ImportEigenBasis(evec,_eval,0,evec.size());
}
// Could use to import a batch of eigenvectors
void ImportEigenBasis(std::vector<Field> &evec,std::vector<RealD> &_eval, int _ev0, int _nev)
{
assert(_ev0+_nev<=evec.size());
Allocate(_nev,evec[0].Grid());
// Imports a sub-batch of eigenvectors, _ev0, ..., _ev0+_nev-1
for(int e=0;e<nev;e++){
std::cout << "Importing eigenvector "<<e<<" evalue "<<_eval[_ev0+e]<<std::endl;
ImportEigenVector(evec[_ev0+e],_eval[_ev0+e],e);
}
}
void DeflateSources(std::vector<Field> &source,std::vector<Field> & guess)
{
int nrhs = source.size();
assert(source.size()==guess.size());
assert(grid == guess[0].Grid());
conformable(guess[0],source[0]);
int64_t vw = vol * words;
std::cout << GridLogMessage << "MultiRHSDelation for "<<nrhs<<" sources with "<<nev<<" eigenvectors "<<std::endl;
RealD t0 = usecond();
BLAS_R.resize(nrhs * vw); // cost free if size doesn't change
BLAS_G.resize(nrhs * vw); // cost free if size doesn't change
BLAS_C.resize(nev * nrhs);// cost free if size doesn't change
/////////////////////////////////////////////
// Copy in the multi-rhs sources
/////////////////////////////////////////////
// for(int r=0;r<nrhs;r++){
// std::cout << " source["<<r<<"] = "<<norm2(source[r])<<std::endl;
// }
for(int r=0;r<nrhs;r++){
int64_t offset = r*vw;
autoView(v,source[r],AcceleratorRead);
acceleratorCopyDeviceToDevice(&v[0],&BLAS_R[offset],sizeof(scalar_object)*vol);
}
/*
* in Fortran column major notation (cuBlas order)
*
* Exe = [e1(x)][..][en(x)]
*
* Rxr = [r1(x)][..][rm(x)]
*
* C_er = E^dag R
* C_er = C_er / lambda_e
* G_xr = Exe Cer
*/
deviceVector<scalar *> Ed(1);
deviceVector<scalar *> Rd(1);
deviceVector<scalar *> Cd(1);
deviceVector<scalar *> Gd(1);
scalar * Eh = & BLAS_E[0];
scalar * Rh = & BLAS_R[0];
scalar * Ch = & BLAS_C[0];
scalar * Gh = & BLAS_G[0];
acceleratorPut(Ed[0],Eh);
acceleratorPut(Rd[0],Rh);
acceleratorPut(Cd[0],Ch);
acceleratorPut(Gd[0],Gh);
GridBLAS BLAS;
/////////////////////////////////////////
// C_er = E^dag R
/////////////////////////////////////////
BLAS.gemmBatched(GridBLAS_OP_C,GridBLAS_OP_N,
nev,nrhs,vw,
ComplexD(1.0),
Ed,
Rd,
ComplexD(0.0), // wipe out C
Cd);
BLAS.synchronise();
assert(BLAS_C.size()==nev*nrhs);
std::vector<scalar> HOST_C(BLAS_C.size()); // nrhs . nev -- the coefficients
acceleratorCopyFromDevice(&BLAS_C[0],&HOST_C[0],BLAS_C.size()*sizeof(scalar));
grid->GlobalSumVector(&HOST_C[0],nev*nrhs);
for(int e=0;e<nev;e++){
RealD lam(1.0/eval[e]);
for(int r=0;r<nrhs;r++){
int off = e+nev*r;
HOST_C[off]=HOST_C[off] * lam;
// std::cout << "C["<<e<<"]["<<r<<"] ="<<HOST_C[off]<< " eval[e] "<<eval[e] <<std::endl;
}
}
acceleratorCopyToDevice(&HOST_C[0],&BLAS_C[0],BLAS_C.size()*sizeof(scalar));
/////////////////////////////////////////
// Guess G_xr = Exe Cer
/////////////////////////////////////////
BLAS.gemmBatched(GridBLAS_OP_N,GridBLAS_OP_N,
vw,nrhs,nev,
ComplexD(1.0),
Ed, // x . nev
Cd, // nev . nrhs
ComplexD(0.0),
Gd);
BLAS.synchronise();
///////////////////////////////////////
// Copy out the multirhs
///////////////////////////////////////
for(int r=0;r<nrhs;r++){
int64_t offset = r*vw;
autoView(v,guess[r],AcceleratorWrite);
acceleratorCopyDeviceToDevice(&BLAS_G[offset],&v[0],sizeof(scalar_object)*vol);
}
RealD t1 = usecond();
std::cout << GridLogMessage << "MultiRHSDelation for "<<nrhs<<" sources with "<<nev<<" eigenvectors took " << (t1-t0)/1e3 <<" ms"<<std::endl;
}
};
NAMESPACE_END(Grid);

View File

@ -33,109 +33,111 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
* Script A = SolverMatrix
* Script P = Preconditioner
*
* Deflation methods considered
* -- Solve P A x = P b [ like Luscher ]
* DEF-1 M P A x = M P b [i.e. left precon]
* DEF-2 P^T M A x = P^T M b
* ADEF-1 Preconditioner = M P + Q [ Q + M + M A Q]
* ADEF-2 Preconditioner = P^T M + Q
* BNN Preconditioner = P^T M P + Q
* BNN2 Preconditioner = M P + P^TM +Q - M P A M
*
* Implement ADEF-2
*
* Vstart = P^Tx + Qb
* M1 = P^TM + Q
* M2=M3=1
* Vout = x
*/
NAMESPACE_BEGIN(Grid);
// abstract base
template<class Field, class CoarseField>
class TwoLevelFlexiblePcg : public LinearFunction<Field>
template<class Field>
class TwoLevelCG : public LinearFunction<Field>
{
public:
int verbose;
RealD Tolerance;
Integer MaxIterations;
const int mmax = 5;
GridBase *grid;
GridBase *coarsegrid;
LinearOperatorBase<Field> *_Linop
OperatorFunction<Field> *_Smoother,
LinearFunction<CoarseField> *_CoarseSolver;
// Need somthing that knows how to get from Coarse to fine and back again
// Fine operator, Smoother, CoarseSolver
LinearOperatorBase<Field> &_FineLinop;
LinearFunction<Field> &_Smoother;
// more most opertor functions
TwoLevelFlexiblePcg(RealD tol,
Integer maxit,
LinearOperatorBase<Field> *Linop,
LinearOperatorBase<Field> *SmootherLinop,
OperatorFunction<Field> *Smoother,
OperatorFunction<CoarseField> CoarseLinop
) :
TwoLevelCG(RealD tol,
Integer maxit,
LinearOperatorBase<Field> &FineLinop,
LinearFunction<Field> &Smoother,
GridBase *fine) :
Tolerance(tol),
MaxIterations(maxit),
_Linop(Linop),
_PreconditionerLinop(PrecLinop),
_Preconditioner(Preconditioner)
{
verbose=0;
_FineLinop(FineLinop),
_Smoother(Smoother)
{
grid = fine;
};
// The Pcg routine is common to all, but the various matrices differ from derived
// implementation to derived implmentation
void operator() (const Field &src, Field &psi){
void operator() (const Field &src, Field &psi){
psi.Checkerboard() = src.Checkerboard();
grid = src.Grid();
virtual void operator() (const Field &src, Field &x)
{
std::cout << GridLogMessage<<"HDCG: fPcg starting single RHS"<<std::endl;
RealD f;
RealD rtzp,rtz,a,d,b;
RealD rptzp;
RealD tn;
RealD guess = norm2(psi);
RealD ssq = norm2(src);
RealD rsq = ssq*Tolerance*Tolerance;
/////////////////////////////
// Set up history vectors
/////////////////////////////
std::vector<Field> p (mmax,grid);
int mmax = 5;
std::cout << GridLogMessage<<"HDCG: fPcg allocating"<<std::endl;
std::vector<Field> p(mmax,grid);
std::vector<Field> mmp(mmax,grid);
std::vector<RealD> pAp(mmax);
Field x (grid); x = psi;
Field z (grid);
Field z(grid);
Field tmp(grid);
Field r (grid);
Field mu (grid);
Field mp (grid);
Field r (grid);
Field mu (grid);
std::cout << GridLogMessage<<"HDCG: fPcg allocated"<<std::endl;
//Initial residual computation & set up
RealD guess = norm2(x);
std::cout << GridLogMessage<<"HDCG: fPcg guess nrm "<<guess<<std::endl;
RealD src_nrm = norm2(src);
std::cout << GridLogMessage<<"HDCG: fPcg src nrm "<<src_nrm<<std::endl;
if ( src_nrm == 0.0 ) {
std::cout << GridLogMessage<<"HDCG: fPcg given trivial source norm "<<src_nrm<<std::endl;
x=Zero();
}
RealD tn;
GridStopWatch HDCGTimer;
HDCGTimer.Start();
//////////////////////////
// x0 = Vstart -- possibly modify guess
//////////////////////////
x=src;
Vstart(x,src);
// r0 = b -A x0
HermOp(x,mmp); // Shouldn't this be something else?
_FineLinop.HermOp(x,mmp[0]);
axpy (r, -1.0,mmp[0], src); // Recomputes r=src-Ax0
{
double n1 = norm2(x);
double n2 = norm2(mmp[0]);
double n3 = norm2(r);
std::cout<<GridLogMessage<<"x,vstart,r = "<<n1<<" "<<n2<<" "<<n3<<std::endl;
}
//////////////////////////////////
// Compute z = M1 x
//////////////////////////////////
M1(r,z,tmp,mp,SmootherMirs);
PcgM1(r,z);
rtzp =real(innerProduct(r,z));
///////////////////////////////////////
// Solve for Mss mu = P A z and set p = z-mu
// Def2: p = 1 - Q Az = Pright z
// Def2 p = 1 - Q Az = Pright z
// Other algos M2 is trivial
///////////////////////////////////////
M2(z,p[0]);
PcgM2(z,p[0]);
RealD ssq = norm2(src);
RealD rsq = ssq*Tolerance*Tolerance;
std::cout << GridLogMessage<<"HDCG: k=0 residual "<<rtzp<<" rsq "<<rsq<<"\n";
Field pp(grid);
for (int k=0;k<=MaxIterations;k++){
@ -143,31 +145,46 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
int peri_kp = (k+1) % mmax;
rtz=rtzp;
d= M3(p[peri_k],mp,mmp[peri_k],tmp);
d= PcgM3(p[peri_k],mmp[peri_k]);
a = rtz/d;
// Memorise this
pAp[peri_k] = d;
axpy(x,a,p[peri_k],x);
RealD rn = axpy_norm(r,-a,mmp[peri_k],r);
// Compute z = M x
M1(r,z,tmp,mp);
PcgM1(r,z);
{
RealD n1,n2;
n1=norm2(r);
n2=norm2(z);
std::cout << GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : vector r,z "<<n1<<" "<<n2<<"\n";
}
rtzp =real(innerProduct(r,z));
std::cout << GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : inner rtzp "<<rtzp<<"\n";
M2(z,mu); // ADEF-2 this is identity. Axpy possible to eliminate
// PcgM2(z,p[0]);
PcgM2(z,mu); // ADEF-2 this is identity. Axpy possible to eliminate
p[peri_kp]=mu;
p[peri_kp]=p[peri_k];
// Standard search direction p -> z + b p ; b =
// Standard search direction p -> z + b p
b = (rtzp)/rtz;
int northog;
// k=zero <=> peri_kp=1; northog = 1
// k=1 <=> peri_kp=2; northog = 2
// ... ... ...
// k=mmax-2<=> peri_kp=mmax-1; northog = mmax-1
// k=mmax-1<=> peri_kp=0; northog = 1
// northog = (peri_kp==0)?1:peri_kp; // This is the fCG(mmax) algorithm
northog = (k>mmax-1)?(mmax-1):k; // This is the fCG-Tr(mmax-1) algorithm
std::cout<<GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : orthogonalising to last "<<northog<<" vectors\n";
for(int back=0; back < northog; back++){
int peri_back = (k-back)%mmax;
RealD pbApk= real(innerProduct(mmp[peri_back],p[peri_kp]));
@ -176,75 +193,324 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
}
RealD rrn=sqrt(rn/ssq);
std::cout<<GridLogMessage<<"TwoLevelfPcg: k= "<<k<<" residual = "<<rrn<<std::endl;
RealD rtn=sqrt(rtz/ssq);
RealD rtnp=sqrt(rtzp/ssq);
std::cout<<GridLogMessage<<"HDCG: fPcg k= "<<k<<" residual = "<<rrn<<"\n";
// Stopping condition
if ( rn <= rsq ) {
HermOp(x,mmp); // Shouldn't this be something else?
HDCGTimer.Stop();
std::cout<<GridLogMessage<<"HDCG: fPcg converged in "<<k<<" iterations and "<<HDCGTimer.Elapsed()<<std::endl;;
_FineLinop.HermOp(x,mmp[0]);
axpy(tmp,-1.0,src,mmp[0]);
RealD psinorm = sqrt(norm2(x));
RealD srcnorm = sqrt(norm2(src));
RealD tmpnorm = sqrt(norm2(tmp));
RealD true_residual = tmpnorm/srcnorm;
std::cout<<GridLogMessage<<"TwoLevelfPcg: true residual is "<<true_residual<<std::endl;
std::cout<<GridLogMessage<<"TwoLevelfPcg: target residual was"<<Tolerance<<std::endl;
return k;
RealD mmpnorm = sqrt(norm2(mmp[0]));
RealD xnorm = sqrt(norm2(x));
RealD srcnorm = sqrt(norm2(src));
RealD tmpnorm = sqrt(norm2(tmp));
RealD true_residual = tmpnorm/srcnorm;
std::cout<<GridLogMessage
<<"HDCG: true residual is "<<true_residual
<<" solution "<<xnorm
<<" source "<<srcnorm
<<" mmp "<<mmpnorm
<<std::endl;
return;
}
}
// Non-convergence
assert(0);
HDCGTimer.Stop();
std::cout<<GridLogMessage<<"HDCG: not converged "<<HDCGTimer.Elapsed()<<std::endl;
RealD xnorm = sqrt(norm2(x));
RealD srcnorm = sqrt(norm2(src));
std::cout<<GridLogMessage<<"HDCG: non-converged solution "<<xnorm<<" source "<<srcnorm<<std::endl;
}
virtual void operator() (std::vector<Field> &src, std::vector<Field> &x)
{
std::cout << GridLogMessage<<"HDCG: mrhs fPcg starting"<<std::endl;
src[0].Grid()->Barrier();
int nrhs = src.size();
std::vector<RealD> f(nrhs);
std::vector<RealD> rtzp(nrhs);
std::vector<RealD> rtz(nrhs);
std::vector<RealD> a(nrhs);
std::vector<RealD> d(nrhs);
std::vector<RealD> b(nrhs);
std::vector<RealD> rptzp(nrhs);
/////////////////////////////
// Set up history vectors
/////////////////////////////
int mmax = 3;
std::cout << GridLogMessage<<"HDCG: fPcg allocating"<<std::endl;
src[0].Grid()->Barrier();
std::vector<std::vector<Field> > p(nrhs); for(int r=0;r<nrhs;r++) p[r].resize(mmax,grid);
std::cout << GridLogMessage<<"HDCG: fPcg allocated p"<<std::endl;
src[0].Grid()->Barrier();
std::vector<std::vector<Field> > mmp(nrhs); for(int r=0;r<nrhs;r++) mmp[r].resize(mmax,grid);
std::cout << GridLogMessage<<"HDCG: fPcg allocated mmp"<<std::endl;
src[0].Grid()->Barrier();
std::vector<std::vector<RealD> > pAp(nrhs); for(int r=0;r<nrhs;r++) pAp[r].resize(mmax);
std::cout << GridLogMessage<<"HDCG: fPcg allocated pAp"<<std::endl;
src[0].Grid()->Barrier();
std::vector<Field> z(nrhs,grid);
std::vector<Field> mp (nrhs,grid);
std::vector<Field> r (nrhs,grid);
std::vector<Field> mu (nrhs,grid);
std::cout << GridLogMessage<<"HDCG: fPcg allocated z,mp,r,mu"<<std::endl;
src[0].Grid()->Barrier();
//Initial residual computation & set up
std::vector<RealD> src_nrm(nrhs);
for(int rhs=0;rhs<nrhs;rhs++) {
src_nrm[rhs]=norm2(src[rhs]);
assert(src_nrm[rhs]!=0.0);
}
std::vector<RealD> tn(nrhs);
GridStopWatch HDCGTimer;
HDCGTimer.Start();
//////////////////////////
// x0 = Vstart -- possibly modify guess
//////////////////////////
Vstart(x,src);
for(int rhs=0;rhs<nrhs;rhs++){
// r0 = b -A x0
_FineLinop.HermOp(x[rhs],mmp[rhs][0]);
axpy (r[rhs], -1.0,mmp[rhs][0], src[rhs]); // Recomputes r=src-Ax0
}
//////////////////////////////////
// Compute z = M1 x
//////////////////////////////////
// This needs a multiRHS version for acceleration
PcgM1(r,z);
std::vector<RealD> ssq(nrhs);
std::vector<RealD> rsq(nrhs);
std::vector<Field> pp(nrhs,grid);
for(int rhs=0;rhs<nrhs;rhs++){
rtzp[rhs] =real(innerProduct(r[rhs],z[rhs]));
p[rhs][0]=z[rhs];
ssq[rhs]=norm2(src[rhs]);
rsq[rhs]= ssq[rhs]*Tolerance*Tolerance;
std::cout << GridLogMessage<<"mrhs HDCG: "<<rhs<<" k=0 residual "<<rtzp[rhs]<<" rsq "<<rsq[rhs]<<"\n";
}
std::vector<RealD> rn(nrhs);
for (int k=0;k<=MaxIterations;k++){
int peri_k = k % mmax;
int peri_kp = (k+1) % mmax;
for(int rhs=0;rhs<nrhs;rhs++){
rtz[rhs]=rtzp[rhs];
d[rhs]= PcgM3(p[rhs][peri_k],mmp[rhs][peri_k]);
a[rhs] = rtz[rhs]/d[rhs];
// Memorise this
pAp[rhs][peri_k] = d[rhs];
axpy(x[rhs],a[rhs],p[rhs][peri_k],x[rhs]);
rn[rhs] = axpy_norm(r[rhs],-a[rhs],mmp[rhs][peri_k],r[rhs]);
}
// Compute z = M x (for *all* RHS)
PcgM1(r,z);
std::cout << GridLogMessage<<"HDCG::fPcg M1 complete"<<std::endl;
grid->Barrier();
RealD max_rn=0.0;
for(int rhs=0;rhs<nrhs;rhs++){
rtzp[rhs] =real(innerProduct(r[rhs],z[rhs]));
std::cout << GridLogMessage<<"HDCG::fPcg rhs"<<rhs<<" iteration "<<k<<" : inner rtzp "<<rtzp[rhs]<<"\n";
mu[rhs]=z[rhs];
p[rhs][peri_kp]=mu[rhs];
// Standard search direction p == z + b p
b[rhs] = (rtzp[rhs])/rtz[rhs];
int northog = (k>mmax-1)?(mmax-1):k; // This is the fCG-Tr(mmax-1) algorithm
std::cout<<GridLogMessage<<"HDCG::fPcg iteration "<<k<<" : orthogonalising to last "<<northog<<" vectors\n";
for(int back=0; back < northog; back++){
int peri_back = (k-back)%mmax;
RealD pbApk= real(innerProduct(mmp[rhs][peri_back],p[rhs][peri_kp]));
RealD beta = -pbApk/pAp[rhs][peri_back];
axpy(p[rhs][peri_kp],beta,p[rhs][peri_back],p[rhs][peri_kp]);
}
RealD rrn=sqrt(rn[rhs]/ssq[rhs]);
RealD rtn=sqrt(rtz[rhs]/ssq[rhs]);
RealD rtnp=sqrt(rtzp[rhs]/ssq[rhs]);
std::cout<<GridLogMessage<<"HDCG: rhs "<<rhs<<"fPcg k= "<<k<<" residual = "<<rrn<<"\n";
if ( rrn > max_rn ) max_rn = rrn;
}
// Stopping condition based on worst case
if ( max_rn <= Tolerance ) {
HDCGTimer.Stop();
std::cout<<GridLogMessage<<"HDCG: mrhs fPcg converged in "<<k<<" iterations and "<<HDCGTimer.Elapsed()<<std::endl;;
for(int rhs=0;rhs<nrhs;rhs++){
_FineLinop.HermOp(x[rhs],mmp[rhs][0]);
Field tmp(grid);
axpy(tmp,-1.0,src[rhs],mmp[rhs][0]);
RealD mmpnorm = sqrt(norm2(mmp[rhs][0]));
RealD xnorm = sqrt(norm2(x[rhs]));
RealD srcnorm = sqrt(norm2(src[rhs]));
RealD tmpnorm = sqrt(norm2(tmp));
RealD true_residual = tmpnorm/srcnorm;
std::cout<<GridLogMessage
<<"HDCG: true residual ["<<rhs<<"] is "<<true_residual
<<" solution "<<xnorm
<<" source "<<srcnorm
<<" mmp "<<mmpnorm
<<std::endl;
}
return;
}
}
HDCGTimer.Stop();
std::cout<<GridLogMessage<<"HDCG: not converged "<<HDCGTimer.Elapsed()<<std::endl;
for(int rhs=0;rhs<nrhs;rhs++){
RealD xnorm = sqrt(norm2(x[rhs]));
RealD srcnorm = sqrt(norm2(src[rhs]));
std::cout<<GridLogMessage<<"HDCG: non-converged solution "<<xnorm<<" source "<<srcnorm<<std::endl;
}
}
public:
virtual void M(Field & in,Field & out,Field & tmp) {
virtual void PcgM1(std::vector<Field> & in,std::vector<Field> & out)
{
std::cout << "PcgM1 default (cheat) mrhs version"<<std::endl;
for(int rhs=0;rhs<in.size();rhs++){
this->PcgM1(in[rhs],out[rhs]);
}
}
virtual void PcgM1(Field & in, Field & out) =0;
virtual void Vstart(std::vector<Field> & x,std::vector<Field> & src)
{
std::cout << "Vstart default (cheat) mrhs version"<<std::endl;
for(int rhs=0;rhs<x.size();rhs++){
this->Vstart(x[rhs],src[rhs]);
}
}
virtual void Vstart(Field & x,const Field & src)=0;
virtual void PcgM2(const Field & in, Field & out) {
out=in;
}
virtual void M1(Field & in, Field & out) {// the smoother
virtual RealD PcgM3(const Field & p, Field & mmp){
RealD dd;
_FineLinop.HermOp(p,mmp);
ComplexD dot = innerProduct(p,mmp);
dd=real(dot);
return dd;
}
/////////////////////////////////////////////////////////////////////
// Only Def1 has non-trivial Vout.
/////////////////////////////////////////////////////////////////////
};
template<class Field, class CoarseField, class Aggregation>
class TwoLevelADEF2 : public TwoLevelCG<Field>
{
public:
///////////////////////////////////////////////////////////////////////////////////
// Need something that knows how to get from Coarse to fine and back again
// void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){
// void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){
///////////////////////////////////////////////////////////////////////////////////
GridBase *coarsegrid;
Aggregation &_Aggregates;
LinearFunction<CoarseField> &_CoarseSolver;
LinearFunction<CoarseField> &_CoarseSolverPrecise;
///////////////////////////////////////////////////////////////////////////////////
// more most opertor functions
TwoLevelADEF2(RealD tol,
Integer maxit,
LinearOperatorBase<Field> &FineLinop,
LinearFunction<Field> &Smoother,
LinearFunction<CoarseField> &CoarseSolver,
LinearFunction<CoarseField> &CoarseSolverPrecise,
Aggregation &Aggregates
) :
TwoLevelCG<Field>(tol,maxit,FineLinop,Smoother,Aggregates.FineGrid),
_CoarseSolver(CoarseSolver),
_CoarseSolverPrecise(CoarseSolverPrecise),
_Aggregates(Aggregates)
{
coarsegrid = Aggregates.CoarseGrid;
};
virtual void PcgM1(Field & in, Field & out)
{
GRID_TRACE("MultiGridPreconditioner ");
// [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min]
Field tmp(grid);
Field Min(grid);
PcgM(in,Min); // Smoother call
Field tmp(this->grid);
Field Min(this->grid);
CoarseField PleftProj(this->coarsegrid);
CoarseField PleftMss_proj(this->coarsegrid);
HermOp(Min,out);
GridStopWatch SmootherTimer;
GridStopWatch MatrixTimer;
SmootherTimer.Start();
this->_Smoother(in,Min);
SmootherTimer.Stop();
MatrixTimer.Start();
this->_FineLinop.HermOp(Min,out);
MatrixTimer.Stop();
axpy(tmp,-1.0,out,in); // tmp = in - A Min
ProjectToSubspace(tmp,PleftProj);
ApplyInverse(PleftProj,PleftMss_proj); // Ass^{-1} [in - A Min]_s
PromoteFromSubspace(PleftMss_proj,tmp);// tmp = Q[in - A Min]
GridStopWatch ProjTimer;
GridStopWatch CoarseTimer;
GridStopWatch PromTimer;
ProjTimer.Start();
this->_Aggregates.ProjectToSubspace(PleftProj,tmp);
ProjTimer.Stop();
CoarseTimer.Start();
this->_CoarseSolver(PleftProj,PleftMss_proj); // Ass^{-1} [in - A Min]_s
CoarseTimer.Stop();
PromTimer.Start();
this->_Aggregates.PromoteFromSubspace(PleftMss_proj,tmp);// tmp = Q[in - A Min]
PromTimer.Stop();
std::cout << GridLogPerformance << "PcgM1 breakdown "<<std::endl;
std::cout << GridLogPerformance << "\tSmoother " << SmootherTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tProj " << ProjTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tCoarse " << CoarseTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tProm " << PromTimer.Elapsed() <<std::endl;
axpy(out,1.0,Min,tmp); // Min+tmp
}
virtual void M2(const Field & in, Field & out) {
out=in;
// Must override for Def2 only
// case PcgDef2:
// Pright(in,out);
// break;
}
virtual RealD M3(const Field & p, Field & mmp){
double d,dd;
HermOpAndNorm(p,mmp,d,dd);
return dd;
// Must override for Def1 only
// case PcgDef1:
// d=linop_d->Mprec(p,mmp,tmp,0,1);// Dag no
// linop_d->Mprec(mmp,mp,tmp,1);// Dag yes
// Pleft(mp,mmp);
// d=real(linop_d->inner(p,mmp));
}
virtual void VstartDef2(Field & xconst Field & src){
//case PcgDef2:
//case PcgAdef2:
//case PcgAdef2f:
//case PcgV11f:
virtual void Vstart(Field & x,const Field & src)
{
std::cout << GridLogMessage<<"HDCG: fPcg Vstart "<<std::endl;
///////////////////////////////////
// Choose x_0 such that
// x_0 = guess + (A_ss^inv) r_s = guess + Ass_inv [src -Aguess]
@ -256,142 +522,211 @@ class TwoLevelFlexiblePcg : public LinearFunction<Field>
// = src_s - (A guess)_s - src_s + (A guess)_s
// = 0
///////////////////////////////////
Field r(grid);
Field mmp(grid);
Field r(this->grid);
Field mmp(this->grid);
CoarseField PleftProj(this->coarsegrid);
CoarseField PleftMss_proj(this->coarsegrid);
std::cout << GridLogMessage<<"HDCG: fPcg Vstart projecting "<<std::endl;
this->_Aggregates.ProjectToSubspace(PleftProj,src);
std::cout << GridLogMessage<<"HDCG: fPcg Vstart coarse solve "<<std::endl;
this->_CoarseSolverPrecise(PleftProj,PleftMss_proj); // Ass^{-1} r_s
std::cout << GridLogMessage<<"HDCG: fPcg Vstart promote "<<std::endl;
this->_Aggregates.PromoteFromSubspace(PleftMss_proj,x);
}
};
template<class Field, class CoarseField, class Aggregation>
class TwoLevelADEF2mrhs : public TwoLevelADEF2<Field,CoarseField,Aggregation>
{
public:
GridBase *coarsegridmrhs;
LinearFunction<CoarseField> &_CoarseSolverMrhs;
LinearFunction<CoarseField> &_CoarseSolverPreciseMrhs;
LinearFunction<CoarseField> &_CoarseGuesser;
TwoLevelADEF2mrhs(RealD tol,
Integer maxit,
LinearOperatorBase<Field> &FineLinop,
LinearFunction<Field> &Smoother,
// LinearFunction<CoarseField> &CoarseSolver,
// LinearFunction<CoarseField> &CoarseSolverPrecise,
LinearFunction<CoarseField> &CoarseSolverMrhs,
LinearFunction<CoarseField> &CoarseSolverPreciseMrhs,
LinearFunction<CoarseField> &CoarseGuesser,
GridBase *rhsgrid,
Aggregation &Aggregates) :
TwoLevelADEF2<Field,CoarseField,Aggregation>(tol, maxit,FineLinop,Smoother,CoarseSolverMrhs,CoarseSolverPreciseMrhs,Aggregates),
_CoarseSolverMrhs(CoarseSolverMrhs),
_CoarseSolverPreciseMrhs(CoarseSolverPreciseMrhs),
_CoarseGuesser(CoarseGuesser)
{
coarsegridmrhs = rhsgrid;
};
virtual void Vstart(std::vector<Field> & x,std::vector<Field> & src)
{
int nrhs=x.size();
std::cout << GridLogMessage<<"HDCG: fPcg Vstart for "<<nrhs<<" right hand sides" <<std::endl;
///////////////////////////////////
// Choose x_0 such that
// x_0 = guess + (A_ss^inv) r_s = guess + Ass_inv [src -Aguess]
// = [1 - Ass_inv A] Guess + Assinv src
// = P^T guess + Assinv src
// = Vstart [Tang notation]
// This gives:
// W^T (src - A x_0) = src_s - A guess_s - r_s
// = src_s - (A guess)_s - src_s + (A guess)_s
// = 0
///////////////////////////////////
CoarseField PleftProj(this->coarsegrid);
CoarseField PleftMss_proj(this->coarsegrid);
CoarseField PleftProjMrhs(this->coarsegridmrhs);
CoarseField PleftMss_projMrhs(this->coarsegridmrhs);
std::cout << GridLogMessage<<"HDCG: fPcg Vstart Mrhs projecting "<<std::endl;
for(int rhs=0;rhs<nrhs;rhs++) {
this->_Aggregates.ProjectToSubspace(PleftProj,src[rhs]); // can optimise later
InsertSliceFast(PleftProj,PleftProjMrhs,rhs,0);
this->_CoarseGuesser(PleftProj,PleftMss_proj);
InsertSliceFast(PleftMss_proj,PleftMss_projMrhs,rhs,0);
}
HermOp(x,mmp);
axpy (r, -1.0, mmp, src); // r_{-1} = src - A x
ProjectToSubspace(r,PleftProj);
ApplyInverseCG(PleftProj,PleftMss_proj); // Ass^{-1} r_s
PromoteFromSubspace(PleftMss_proj,mmp);
x=x+mmp;
std::cout << GridLogMessage<<"HDCG: fPcg Vstart Mrhs coarse solve "<<std::endl;
this->_CoarseSolverPreciseMrhs(PleftProjMrhs,PleftMss_projMrhs); // Ass^{-1} r_s
std::cout << GridLogMessage<<"HDCG: fPcg Vstart promote "<<std::endl;
for(int rhs=0;rhs<nrhs;rhs++) {
ExtractSliceFast(PleftMss_proj,PleftMss_projMrhs,rhs,0);
this->_Aggregates.PromoteFromSubspace(PleftMss_proj,x[rhs]);
}
}
virtual void PcgM1(std::vector<Field> & in,std::vector<Field> & out){
int nrhs=in.size();
std::cout << " mrhs PcgM1 for "<<nrhs<<" right hand sides"<<std::endl;
MemoryManager::Print();
// [PTM+Q] in = [1 - Q A] M in + Q in = Min + Q [ in -A Min]
Field tmp(this->grid);
std::vector<Field> Min(nrhs,this->grid);
std::cout << " mrhs PcgM1 Min "<<std::endl;
CoarseField PleftProj(this->coarsegrid);
CoarseField PleftMss_proj(this->coarsegrid);
CoarseField PleftProjMrhs(this->coarsegridmrhs);
CoarseField PleftMss_projMrhs(this->coarsegridmrhs);
std::cout << " mrhs Coarse ops "<<std::endl;
// Really want the coarse solver
// to do the guessing itself, knowing the eigenvectors.
// The projection to coarse space is in aggregates
// If the Aggregates have a layout change option
// they could formulate as a BLAS routine.
// Put the routines in this object
for(int rhs=0;rhs<nrhs;rhs++) {
std::cout << GridLogMessage<<" Smoother for "<<rhs<<std::endl;
this->_Smoother(in[rhs],Min[rhs]);
std::cout << GridLogMessage<<" HermOp for "<<rhs<<std::endl;
this->_FineLinop.HermOp(Min[rhs],out[rhs]);
axpy(tmp,-1.0,out[rhs],in[rhs]); // tmp = in - A Min
// Was
// this->_Aggregates.ProjectToSubspace(PleftProj,tmp); // can optimise later
// Now:
std::cout << GridLogMessage<<" blockProject for "<<rhs<<std::endl;
blockProjectFast(PleftProj,tmp,this->_Aggregates.subspace);
std::cout << GridLogMessage<<" InsertSlice for "<<rhs<<std::endl;
InsertSlice(PleftProj,PleftProjMrhs,rhs,0);
std::cout << GridLogMessage<<" CoarseGuesser for "<<rhs<<std::endl;
this->_CoarseGuesser(PleftProj,PleftMss_proj);
std::cout << GridLogMessage<<" InsertSlice for "<<rhs<<std::endl;
InsertSlice(PleftMss_proj,PleftMss_projMrhs,rhs,0);
}
MemoryManager::Print();
std::cout << " Coarse solve "<<std::endl;
this->_CoarseSolverMrhs(PleftProjMrhs,PleftMss_projMrhs); // Ass^{-1} [in - A Min]_s
std::cout << " Coarse solve done"<<std::endl;
MemoryManager::Print();
for(int rhs=0;rhs<nrhs;rhs++) {
std::cout << GridLogMessage<<" Extract for "<<rhs<<std::endl;
ExtractSlice(PleftMss_proj,PleftMss_projMrhs,rhs,0);
std::cout << GridLogMessage<<" Promote for "<<rhs<<std::endl;
this->_Aggregates.PromoteFromSubspace(PleftMss_proj,tmp);// tmp = Q[in - A Min]
// std::cout << " add for "<<rhs<<std::endl;
axpy(out[rhs],1.0,Min[rhs],tmp); // Min+tmp
}
MemoryManager::Print();
std::cout << " Extracted "<<std::endl;
}
};
template<class Field>
class TwoLevelADEF1defl : public TwoLevelCG<Field>
{
public:
const std::vector<Field> &evec;
const std::vector<RealD> &eval;
TwoLevelADEF1defl(RealD tol,
Integer maxit,
LinearOperatorBase<Field> &FineLinop,
LinearFunction<Field> &Smoother,
std::vector<Field> &_evec,
std::vector<RealD> &_eval) :
TwoLevelCG<Field>(tol,maxit,FineLinop,Smoother,_evec[0].Grid()),
evec(_evec),
eval(_eval)
{};
// Can just inherit existing M2
// Can just inherit existing M3
// Simple vstart - do nothing
virtual void Vstart(Field & x,const Field & src){
return;
x=src; // Could apply Q
};
// Override PcgM1
virtual void PcgM1(Field & in, Field & out)
{
GRID_TRACE("EvecPreconditioner ");
int N=evec.size();
Field Pin(this->grid);
Field Qin(this->grid);
//MP + Q = M(1-AQ) + Q = M
// // If we are eigenvector deflating in coarse space
// // Q = Sum_i |phi_i> 1/lambda_i <phi_i|
// // A Q = Sum_i |phi_i> <phi_i|
// // M(1-AQ) = M(1-proj) + Q
Qin.Checkerboard()=in.Checkerboard();
Qin = Zero();
Pin = in;
for (int i=0;i<N;i++) {
const Field& tmp = evec[i];
auto ip = TensorRemove(innerProduct(tmp,in));
axpy(Qin, ip / eval[i],tmp,Qin);
axpy(Pin, -ip ,tmp,Pin);
}
this->_Smoother(Pin,out);
out = out + Qin;
}
};
/////////////////////////////////////////////////////////////////////
// Only Def1 has non-trivial Vout. Override in Def1
/////////////////////////////////////////////////////////////////////
virtual void Vout (Field & in, Field & out,Field & src){
out = in;
//case PcgDef1:
// //Qb + PT x
// ProjectToSubspace(src,PleftProj);
// ApplyInverse(PleftProj,PleftMss_proj); // Ass^{-1} r_s
// PromoteFromSubspace(PleftMss_proj,tmp);
//
// Pright(in,out);
//
// linop_d->axpy(out,tmp,out,1.0);
// break;
}
NAMESPACE_END(Grid);
////////////////////////////////////////////////////////////////////////////////////////////////
// Pright and Pleft are common to all implementations
////////////////////////////////////////////////////////////////////////////////////////////////
virtual void Pright(Field & in,Field & out){
// P_R = [ 1 0 ]
// [ -Mss^-1 Msb 0 ]
Field in_sbar(grid);
ProjectToSubspace(in,PleftProj);
PromoteFromSubspace(PleftProj,out);
axpy(in_sbar,-1.0,out,in); // in_sbar = in - in_s
HermOp(in_sbar,out);
ProjectToSubspace(out,PleftProj); // Mssbar in_sbar (project)
ApplyInverse (PleftProj,PleftMss_proj); // Mss^{-1} Mssbar
PromoteFromSubspace(PleftMss_proj,out); //
axpy(out,-1.0,out,in_sbar); // in_sbar - Mss^{-1} Mssbar in_sbar
}
virtual void Pleft (Field & in,Field & out){
// P_L = [ 1 -Mbs Mss^-1]
// [ 0 0 ]
Field in_sbar(grid);
Field tmp2(grid);
Field Mtmp(grid);
ProjectToSubspace(in,PleftProj);
PromoteFromSubspace(PleftProj,out);
axpy(in_sbar,-1.0,out,in); // in_sbar = in - in_s
ApplyInverse(PleftProj,PleftMss_proj); // Mss^{-1} in_s
PromoteFromSubspace(PleftMss_proj,out);
HermOp(out,Mtmp);
ProjectToSubspace(Mtmp,PleftProj); // Msbar s Mss^{-1}
PromoteFromSubspace(PleftProj,tmp2);
axpy(out,-1.0,tmp2,Mtmp);
axpy(out,-1.0,out,in_sbar); // in_sbar - Msbars Mss^{-1} in_s
}
}
template<class Field>
class TwoLevelFlexiblePcgADef2 : public TwoLevelFlexiblePcg<Field> {
public:
virtual void M(Field & in,Field & out,Field & tmp){
}
virtual void M1(Field & in, Field & out,Field & tmp,Field & mp){
}
virtual void M2(Field & in, Field & out){
}
virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp){
}
virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp){
}
}
/*
template<class Field>
class TwoLevelFlexiblePcgAD : public TwoLevelFlexiblePcg<Field> {
public:
virtual void M(Field & in,Field & out,Field & tmp);
virtual void M1(Field & in, Field & out,Field & tmp,Field & mp);
virtual void M2(Field & in, Field & out);
virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp);
virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp);
}
template<class Field>
class TwoLevelFlexiblePcgDef1 : public TwoLevelFlexiblePcg<Field> {
public:
virtual void M(Field & in,Field & out,Field & tmp);
virtual void M1(Field & in, Field & out,Field & tmp,Field & mp);
virtual void M2(Field & in, Field & out);
virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp);
virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp);
virtual void Vout (Field & in, Field & out,Field & src,Field & tmp);
}
template<class Field>
class TwoLevelFlexiblePcgDef2 : public TwoLevelFlexiblePcg<Field> {
public:
virtual void M(Field & in,Field & out,Field & tmp);
virtual void M1(Field & in, Field & out,Field & tmp,Field & mp);
virtual void M2(Field & in, Field & out);
virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp);
virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp);
}
template<class Field>
class TwoLevelFlexiblePcgV11: public TwoLevelFlexiblePcg<Field> {
public:
virtual void M(Field & in,Field & out,Field & tmp);
virtual void M1(Field & in, Field & out,Field & tmp,Field & mp);
virtual void M2(Field & in, Field & out);
virtual RealD M3(Field & p, Field & mp,Field & mmp, Field & tmp);
virtual void Vstart(Field & in, Field & src, Field & r, Field & mp, Field & mmp, Field & tmp);
}
*/
#endif

View File

@ -183,13 +183,13 @@ public:
<< "\tTrue residual " << true_residual
<< "\tTarget " << Tolerance << std::endl;
std::cout << GridLogMessage << "Time breakdown "<<std::endl;
std::cout << GridLogMessage << "\tElapsed " << SolverTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tInner " << InnerTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tAxpyNorm " << AxpyNormTimer.Elapsed() <<std::endl;
std::cout << GridLogMessage << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "Time breakdown "<<std::endl;
std::cout << GridLogPerformance << "\tMatrix " << MatrixTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tLinalg " << LinalgTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tInner " << InnerTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tAxpyNorm " << AxpyNormTimer.Elapsed() <<std::endl;
std::cout << GridLogPerformance << "\tLinearComb " << LinearCombTimer.Elapsed() <<std::endl;
std::cout << GridLogDebug << "\tMobius flop rate " << DwfFlops/ usecs<< " Gflops " <<std::endl;
@ -207,7 +207,8 @@ public:
TrueResidual = sqrt(norm2(p)/ssq);
std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations<< std::endl;
std::cout << GridLogMessage << "ConjugateGradient did NOT converge "<<k<<" / "<< MaxIterations
<<" residual "<< TrueResidual<< std::endl;
if (ErrorOnNoConverge) assert(0);
IterationsToComplete = k;

View File

@ -144,7 +144,7 @@ public:
for(int s=0;s<nshift;s++){
rsq[s] = cp * mresidual[s] * mresidual[s];
std::cout<<GridLogMessage<<"ConjugateGradientMultiShift: shift "<<s
<<" target resid "<<rsq[s]<<std::endl;
<<" target resid^2 "<<rsq[s]<<std::endl;
ps[s] = src;
}
// r and p for primary

View File

@ -79,14 +79,16 @@ template<class Field> class ImplicitlyRestartedLanczosHermOpTester : public Imp
RealD vv = norm2(v) / ::pow(evalMaxApprox,2.0);
std::cout.precision(13);
std::cout<<GridLogIRL << "[" << std::setw(3)<<j<<"] "
<<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
<<std::endl;
int conv=0;
if( (vv<eresid*eresid) ) conv = 1;
std::cout<<GridLogIRL << "[" << std::setw(3)<<j<<"] "
<<"eval = "<<std::setw(25)<< eval << " (" << eval_poly << ")"
<<" |H B[i] - eval[i]B[i]|^2 / evalMaxApprox^2 " << std::setw(25) << vv
<<" target " << eresid*eresid << " conv " <<conv
<<std::endl;
return conv;
}
};
@ -457,7 +459,7 @@ until convergence
std::vector<Field>& evec,
Field& w,int Nm,int k)
{
std::cout<<GridLogIRL << "Lanczos step " <<k<<std::endl;
std::cout<<GridLogDebug << "Lanczos step " <<k<<std::endl;
const RealD tiny = 1.0e-20;
assert( k< Nm );
@ -465,7 +467,7 @@ until convergence
Field& evec_k = evec[k];
_PolyOp(evec_k,w); std::cout<<GridLogIRL << "PolyOp" <<std::endl;
_PolyOp(evec_k,w); std::cout<<GridLogDebug << "PolyOp" <<std::endl;
if(k>0) w -= lme[k-1] * evec[k-1];
@ -480,18 +482,18 @@ until convergence
lme[k] = beta;
if ( (k>0) && ( (k % orth_period) == 0 )) {
std::cout<<GridLogIRL << "Orthogonalising " <<k<<std::endl;
std::cout<<GridLogDebug << "Orthogonalising " <<k<<std::endl;
orthogonalize(w,evec,k); // orthonormalise
std::cout<<GridLogIRL << "Orthogonalised " <<k<<std::endl;
std::cout<<GridLogDebug << "Orthogonalised " <<k<<std::endl;
}
if(k < Nm-1) evec[k+1] = w;
std::cout<<GridLogIRL << "alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
std::cout<<GridLogIRL << "Lanczos step alpha[" << k << "] = " << zalph << " beta[" << k << "] = "<<beta<<std::endl;
if ( beta < tiny )
std::cout<<GridLogIRL << " beta is tiny "<<beta<<std::endl;
std::cout<<GridLogIRL << "Lanczos step complete " <<k<<std::endl;
std::cout<<GridLogDebug << "Lanczos step complete " <<k<<std::endl;
}
void diagonalize_Eigen(std::vector<RealD>& lmd, std::vector<RealD>& lme,

View File

@ -33,7 +33,7 @@ NAMESPACE_BEGIN(Grid);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Take a matrix and form an NE solver calling a Herm solver
///////////////////////////////////////////////////////////////////////////////////////////////////////
template<class Field> class NormalEquations {
template<class Field> class NormalEquations : public LinearFunction<Field>{
private:
SparseMatrixBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;
@ -60,7 +60,7 @@ public:
}
};
template<class Field> class HPDSolver {
template<class Field> class HPDSolver : public LinearFunction<Field> {
private:
LinearOperatorBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;
@ -78,13 +78,13 @@ public:
void operator() (const Field &in, Field &out){
_Guess(in,out);
_HermitianSolver(_Matrix,in,out); // Mdag M out = Mdag in
_HermitianSolver(_Matrix,in,out); //M out = in
}
};
template<class Field> class MdagMSolver {
template<class Field> class MdagMSolver : public LinearFunction<Field> {
private:
SparseMatrixBase<Field> & _Matrix;
OperatorFunction<Field> & _HermitianSolver;

View File

@ -20,7 +20,7 @@ template<class Field> class PowerMethod
RealD evalMaxApprox = 0.0;
auto src_n = src;
auto tmp = src;
const int _MAX_ITER_EST_ = 50;
const int _MAX_ITER_EST_ = 100;
for (int i=0;i<_MAX_ITER_EST_;i++) {

View File

@ -0,0 +1,383 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/Aggregates.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: Peter Boyle <peterboyle@Peters-MacBook-Pro-2.local>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
inline RealD AggregatePowerLaw(RealD x)
{
// return std::pow(x,-4);
// return std::pow(x,-3);
return std::pow(x,-5);
}
template<class Fobj,class CComplex,int nbasis>
class Aggregation {
public:
constexpr int Nbasis(void) { return nbasis; };
typedef iVector<CComplex,nbasis > siteVector;
typedef Lattice<siteVector> CoarseVector;
typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix;
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj > FineField;
GridBase *CoarseGrid;
GridBase *FineGrid;
std::vector<Lattice<Fobj> > subspace;
int checkerboard;
int Checkerboard(void){return checkerboard;}
Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) :
CoarseGrid(_CoarseGrid),
FineGrid(_FineGrid),
subspace(nbasis,_FineGrid),
checkerboard(_checkerboard)
{
};
void Orthogonalise(void){
CoarseScalar InnerProd(CoarseGrid);
// std::cout << GridLogMessage <<" Block Gramm-Schmidt pass 1"<<std::endl;
blockOrthogonalise(InnerProd,subspace);
}
void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){
blockProject(CoarseVec,FineVec,subspace);
}
void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){
FineVec.Checkerboard() = subspace[0].Checkerboard();
blockPromote(CoarseVec,FineVec,subspace);
}
virtual void CreateSubspaceRandom(GridParallelRNG &RNG) {
int nn=nbasis;
RealD scale;
FineField noise(FineGrid);
for(int b=0;b<nn;b++){
subspace[b] = Zero();
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
subspace[b] = noise;
}
}
virtual void CreateSubspace(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis)
{
RealD scale;
ConjugateGradient<FineField> CG(1.0e-2,100,false);
FineField noise(FineGrid);
FineField Mn(FineGrid);
for(int b=0;b<nn;b++){
subspace[b] = Zero();
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl;
for(int i=0;i<1;i++){
CG(hermop,noise,subspace[b]);
noise = subspace[b];
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
}
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl;
subspace[b] = noise;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
// World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit)
// and this is the best I found
////////////////////////////////////////////////////////////////////////////////////////////////
virtual void CreateSubspaceChebyshev(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,
int nn,
double hi,
double lo,
int orderfilter,
int ordermin,
int orderstep,
double filterlo
) {
RealD scale;
FineField noise(FineGrid);
FineField Mn(FineGrid);
FineField tmp(FineGrid);
// New normalised noise
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
std::cout << GridLogMessage<<" Chebyshev subspace pass-1 : ord "<<orderfilter<<" ["<<lo<<","<<hi<<"]"<<std::endl;
std::cout << GridLogMessage<<" Chebyshev subspace pass-2 : nbasis"<<nn<<" min "
<<ordermin<<" step "<<orderstep
<<" lo"<<filterlo<<std::endl;
// Initial matrix element
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl;
int b =0;
{
// Filter
Chebyshev<FineField> Cheb(lo,hi,orderfilter);
Cheb(hermop,noise,Mn);
// normalise
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
b++;
}
// Generate a full sequence of Chebyshevs
{
lo=filterlo;
noise=Mn;
FineField T0(FineGrid); T0 = noise;
FineField T1(FineGrid);
FineField T2(FineGrid);
FineField y(FineGrid);
FineField *Tnm = &T0;
FineField *Tn = &T1;
FineField *Tnp = &T2;
// Tn=T1 = (xscale M + mscale)in
RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo);
hermop.HermOp(T0,y);
T1=y*xscale+noise*mscale;
for(int n=2;n<=ordermin+orderstep*(nn-2);n++){
hermop.HermOp(*Tn,y);
autoView( y_v , y, AcceleratorWrite);
autoView( Tn_v , (*Tn), AcceleratorWrite);
autoView( Tnp_v , (*Tnp), AcceleratorWrite);
autoView( Tnm_v , (*Tnm), AcceleratorWrite);
const int Nsimd = CComplex::Nsimd();
accelerator_for(ss, FineGrid->oSites(), Nsimd, {
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
});
// Possible more fine grained control is needed than a linear sweep,
// but huge productivity gain if this is simple algorithm and not a tunable
int m =1;
if ( n>=ordermin ) m=n-ordermin;
if ( (m%orderstep)==0 ) {
Mn=*Tnp;
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << n<<" filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
b++;
}
// Cycle pointers to avoid copies
FineField *swizzle = Tnm;
Tnm =Tn;
Tn =Tnp;
Tnp =swizzle;
}
}
assert(b==nn);
}
virtual void CreateSubspaceChebyshev(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,
int nn,
double hi,
double lo,
int orderfilter
) {
RealD scale;
FineField noise(FineGrid);
FineField Mn(FineGrid);
FineField tmp(FineGrid);
// New normalised noise
std::cout << GridLogMessage<<" Chebyshev subspace pure noise : ord "<<orderfilter<<" ["<<lo<<","<<hi<<"]"<<std::endl;
std::cout << GridLogMessage<<" Chebyshev subspace pure noise : nbasis "<<nn<<std::endl;
for(int b =0;b<nbasis;b++)
{
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
// Initial matrix element
hermop.Op(noise,Mn);
if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl;
// Filter
Chebyshev<FineField> Cheb(lo,hi,orderfilter);
Cheb(hermop,noise,Mn);
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
// Refine
Chebyshev<FineField> PowerLaw(lo,hi,1000,AggregatePowerLaw);
noise = Mn;
PowerLaw(hermop,noise,Mn);
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
// normalise
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
}
}
virtual void CreateSubspaceChebyshevPowerLaw(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,
int nn,
double hi,
int orderfilter
) {
RealD scale;
FineField noise(FineGrid);
FineField Mn(FineGrid);
FineField tmp(FineGrid);
// New normalised noise
std::cout << GridLogMessage<<" Chebyshev subspace pure noise : ord "<<orderfilter<<" [0,"<<hi<<"]"<<std::endl;
std::cout << GridLogMessage<<" Chebyshev subspace pure noise : nbasis "<<nn<<std::endl;
for(int b =0;b<nbasis;b++)
{
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
// Initial matrix element
hermop.Op(noise,Mn);
if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl;
// Filter
Chebyshev<FineField> Cheb(0.0,hi,orderfilter,AggregatePowerLaw);
Cheb(hermop,noise,Mn);
// normalise
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
}
}
virtual void CreateSubspaceMultishift(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,
double Lo,double tol,int maxit)
{
RealD scale;
FineField noise(FineGrid);
FineField Mn(FineGrid);
FineField tmp(FineGrid);
// New normalised noise
std::cout << GridLogMessage<<" Multishift subspace : Lo "<<Lo<<std::endl;
// Filter
// [ 1/6(x+Lo) - 1/2(x+2Lo) + 1/2(x+3Lo) -1/6(x+4Lo) = Lo^3 /[ (x+1Lo)(x+2Lo)(x+3Lo)(x+4Lo) ]
//
// 1/(x+Lo) - 1/(x+2 Lo)
double epsilon = Lo/3;
std::vector<RealD> alpha({1.0/6.0,-1.0/2.0,1.0/2.0,-1.0/6.0});
std::vector<RealD> shifts({Lo,Lo+epsilon,Lo+2*epsilon,Lo+3*epsilon});
std::vector<RealD> tols({tol,tol,tol,tol});
std::cout << "sizes "<<alpha.size()<<" "<<shifts.size()<<" "<<tols.size()<<std::endl;
MultiShiftFunction msf(4,0.0,95.0);
std::cout << "msf constructed "<<std::endl;
msf.poles=shifts;
msf.residues=alpha;
msf.tolerances=tols;
msf.norm=0.0;
msf.order=alpha.size();
ConjugateGradientMultiShift<FineField> MSCG(maxit,msf);
for(int b =0;b<nbasis;b++)
{
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
// Initial matrix element
hermop.Op(noise,Mn);
if(b==0) std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl;
MSCG(hermop,noise,Mn);
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
}
}
virtual void RefineSubspace(LinearOperatorBase<FineField> &hermop,
double Lo,double tol,int maxit)
{
FineField tmp(FineGrid);
for(int b =0;b<nbasis;b++)
{
RealD MirsShift = Lo;
ConjugateGradient<FineField> CGsloppy(tol,maxit,false);
ShiftedHermOpLinearOperator<FineField> ShiftedFineHermOp(hermop,MirsShift);
CGsloppy(hermop,subspace[b],tmp);
subspace[b]=tmp;
}
}
};
NAMESPACE_END(Grid);

View File

@ -56,243 +56,6 @@ inline void blockMaskedInnerProduct(Lattice<CComplex> &CoarseInner,
blockSum(CoarseInner,fine_inner_msk);
}
class Geometry {
public:
int npoint;
int base;
std::vector<int> directions ;
std::vector<int> displacements;
std::vector<int> points_dagger;
Geometry(int _d) {
base = (_d==5) ? 1:0;
// make coarse grid stencil for 4d , not 5d
if ( _d==5 ) _d=4;
npoint = 2*_d+1;
directions.resize(npoint);
displacements.resize(npoint);
points_dagger.resize(npoint);
for(int d=0;d<_d;d++){
directions[d ] = d+base;
directions[d+_d] = d+base;
displacements[d ] = +1;
displacements[d+_d]= -1;
points_dagger[d ] = d+_d;
points_dagger[d+_d] = d;
}
directions [2*_d]=0;
displacements[2*_d]=0;
points_dagger[2*_d]=2*_d;
}
int point(int dir, int disp) {
assert(disp == -1 || disp == 0 || disp == 1);
assert(base+0 <= dir && dir < base+4);
// directions faster index = new indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 1 2 3 0 1 2 3 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 2 3 4 1 2 3 4 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// displacements faster index = old indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 0 1 1 2 2 3 3 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 1 2 2 3 3 4 4 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
if(dir == 0 and disp == 0)
return 8;
else // New indexing
return (1 - disp) / 2 * 4 + dir - base;
// else // Old indexing
// return (4 * (dir - base) + 1 - disp) / 2;
}
};
template<class Fobj,class CComplex,int nbasis>
class Aggregation {
public:
typedef iVector<CComplex,nbasis > siteVector;
typedef Lattice<siteVector> CoarseVector;
typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix;
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj > FineField;
GridBase *CoarseGrid;
GridBase *FineGrid;
std::vector<Lattice<Fobj> > subspace;
int checkerboard;
int Checkerboard(void){return checkerboard;}
Aggregation(GridBase *_CoarseGrid,GridBase *_FineGrid,int _checkerboard) :
CoarseGrid(_CoarseGrid),
FineGrid(_FineGrid),
subspace(nbasis,_FineGrid),
checkerboard(_checkerboard)
{
};
void Orthogonalise(void){
CoarseScalar InnerProd(CoarseGrid);
std::cout << GridLogMessage <<" Block Gramm-Schmidt pass 1"<<std::endl;
blockOrthogonalise(InnerProd,subspace);
}
void ProjectToSubspace(CoarseVector &CoarseVec,const FineField &FineVec){
blockProject(CoarseVec,FineVec,subspace);
}
void PromoteFromSubspace(const CoarseVector &CoarseVec,FineField &FineVec){
FineVec.Checkerboard() = subspace[0].Checkerboard();
blockPromote(CoarseVec,FineVec,subspace);
}
virtual void CreateSubspace(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,int nn=nbasis) {
RealD scale;
ConjugateGradient<FineField> CG(1.0e-2,100,false);
FineField noise(FineGrid);
FineField Mn(FineGrid);
for(int b=0;b<nn;b++){
subspace[b] = Zero();
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise ["<<b<<"] <n|MdagM|n> "<<norm2(Mn)<<std::endl;
for(int i=0;i<1;i++){
CG(hermop,noise,subspace[b]);
noise = subspace[b];
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
}
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "filtered["<<b<<"] <f|MdagM|f> "<<norm2(Mn)<<std::endl;
subspace[b] = noise;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////
// World of possibilities here. But have tried quite a lot of experiments (250+ jobs run on Summit)
// and this is the best I found
////////////////////////////////////////////////////////////////////////////////////////////////
virtual void CreateSubspaceChebyshev(GridParallelRNG &RNG,LinearOperatorBase<FineField> &hermop,
int nn,
double hi,
double lo,
int orderfilter,
int ordermin,
int orderstep,
double filterlo
) {
RealD scale;
FineField noise(FineGrid);
FineField Mn(FineGrid);
FineField tmp(FineGrid);
// New normalised noise
gaussian(RNG,noise);
scale = std::pow(norm2(noise),-0.5);
noise=noise*scale;
// Initial matrix element
hermop.Op(noise,Mn); std::cout<<GridLogMessage << "noise <n|MdagM|n> "<<norm2(Mn)<<std::endl;
int b =0;
{
// Filter
Chebyshev<FineField> Cheb(lo,hi,orderfilter);
Cheb(hermop,noise,Mn);
// normalise
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << "filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
b++;
}
// Generate a full sequence of Chebyshevs
{
lo=filterlo;
noise=Mn;
FineField T0(FineGrid); T0 = noise;
FineField T1(FineGrid);
FineField T2(FineGrid);
FineField y(FineGrid);
FineField *Tnm = &T0;
FineField *Tn = &T1;
FineField *Tnp = &T2;
// Tn=T1 = (xscale M + mscale)in
RealD xscale = 2.0/(hi-lo);
RealD mscale = -(hi+lo)/(hi-lo);
hermop.HermOp(T0,y);
T1=y*xscale+noise*mscale;
for(int n=2;n<=ordermin+orderstep*(nn-2);n++){
hermop.HermOp(*Tn,y);
autoView( y_v , y, AcceleratorWrite);
autoView( Tn_v , (*Tn), AcceleratorWrite);
autoView( Tnp_v , (*Tnp), AcceleratorWrite);
autoView( Tnm_v , (*Tnm), AcceleratorWrite);
const int Nsimd = CComplex::Nsimd();
accelerator_for(ss, FineGrid->oSites(), Nsimd, {
coalescedWrite(y_v[ss],xscale*y_v(ss)+mscale*Tn_v(ss));
coalescedWrite(Tnp_v[ss],2.0*y_v(ss)-Tnm_v(ss));
});
// Possible more fine grained control is needed than a linear sweep,
// but huge productivity gain if this is simple algorithm and not a tunable
int m =1;
if ( n>=ordermin ) m=n-ordermin;
if ( (m%orderstep)==0 ) {
Mn=*Tnp;
scale = std::pow(norm2(Mn),-0.5); Mn=Mn*scale;
subspace[b] = Mn;
hermop.Op(Mn,tmp);
std::cout<<GridLogMessage << n<<" filt ["<<b<<"] <n|MdagM|n> "<<norm2(tmp)<<std::endl;
b++;
}
// Cycle pointers to avoid copies
FineField *swizzle = Tnm;
Tnm =Tn;
Tn =Tnp;
Tnp =swizzle;
}
}
assert(b==nn);
}
};
// Fine Object == (per site) type of fine field
// nbasis == number of deflation vectors
template<class Fobj,class CComplex,int nbasis>

View File

@ -0,0 +1,621 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/GeneralCoarsenedMatrix.h
Copyright (C) 2015
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/qcd/QCD.h> // needed for Dagger(Yes|No), Inverse(Yes|No)
#include <Grid/lattice/PaddedCell.h>
#include <Grid/stencil/GeneralLocalStencil.h>
NAMESPACE_BEGIN(Grid);
// Fine Object == (per site) type of fine field
// nbasis == number of deflation vectors
template<class Fobj,class CComplex,int nbasis>
class GeneralCoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
public:
typedef GeneralCoarsenedMatrix<Fobj,CComplex,nbasis> GeneralCoarseOp;
typedef iVector<CComplex,nbasis > siteVector;
typedef iMatrix<CComplex,nbasis > siteMatrix;
typedef Lattice<iScalar<CComplex> > CoarseComplexField;
typedef Lattice<siteVector> CoarseVector;
typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix;
typedef iMatrix<CComplex,nbasis > Cobj;
typedef iVector<CComplex,nbasis > Cvec;
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj > FineField;
typedef Lattice<CComplex > FineComplexField;
typedef CoarseVector Field;
////////////////////
// Data members
////////////////////
int hermitian;
GridBase * _FineGrid;
GridCartesian * _CoarseGrid;
NonLocalStencilGeometry &geom;
PaddedCell Cell;
GeneralLocalStencil Stencil;
std::vector<CoarseMatrix> _A;
std::vector<CoarseMatrix> _Adag;
std::vector<CoarseVector> MultTemporaries;
///////////////////////
// Interface
///////////////////////
GridBase * Grid(void) { return _CoarseGrid; }; // this is all the linalg routines need to know
GridBase * FineGrid(void) { return _FineGrid; }; // this is all the linalg routines need to know
GridCartesian * CoarseGrid(void) { return _CoarseGrid; }; // this is all the linalg routines need to know
/* void ShiftMatrix(RealD shift)
{
int Nd=_FineGrid->Nd();
Coordinate zero_shift(Nd,0);
for(int p=0;p<geom.npoint;p++){
if ( zero_shift==geom.shifts[p] ) {
_A[p] = _A[p]+shift;
// _Adag[p] = _Adag[p]+shift;
}
}
}
void ProjectNearestNeighbour(RealD shift, GeneralCoarseOp &CopyMe)
{
int nfound=0;
std::cout << GridLogMessage <<"GeneralCoarsenedMatrix::ProjectNearestNeighbour "<< CopyMe._A[0].Grid()<<std::endl;
for(int p=0;p<geom.npoint;p++){
for(int pp=0;pp<CopyMe.geom.npoint;pp++){
// Search for the same relative shift
// Avoids brutal handling of Grid pointers
if ( CopyMe.geom.shifts[pp]==geom.shifts[p] ) {
_A[p] = CopyMe.Cell.Extract(CopyMe._A[pp]);
// _Adag[p] = CopyMe.Cell.Extract(CopyMe._Adag[pp]);
nfound++;
}
}
}
assert(nfound==geom.npoint);
ExchangeCoarseLinks();
}
*/
GeneralCoarsenedMatrix(NonLocalStencilGeometry &_geom,GridBase *FineGrid, GridCartesian * CoarseGrid)
: geom(_geom),
_FineGrid(FineGrid),
_CoarseGrid(CoarseGrid),
hermitian(1),
Cell(_geom.Depth(),_CoarseGrid),
Stencil(Cell.grids.back(),geom.shifts)
{
{
int npoint = _geom.npoint;
}
_A.resize(geom.npoint,CoarseGrid);
// _Adag.resize(geom.npoint,CoarseGrid);
}
void M (const CoarseVector &in, CoarseVector &out)
{
Mult(_A,in,out);
}
void Mdag (const CoarseVector &in, CoarseVector &out)
{
assert(hermitian);
Mult(_A,in,out);
// if ( hermitian ) M(in,out);
// else Mult(_Adag,in,out);
}
void Mult (std::vector<CoarseMatrix> &A,const CoarseVector &in, CoarseVector &out)
{
RealD tviews=0; RealD ttot=0; RealD tmult=0; RealD texch=0; RealD text=0; RealD ttemps=0; RealD tcopy=0;
RealD tmult2=0;
ttot=-usecond();
conformable(CoarseGrid(),in.Grid());
conformable(in.Grid(),out.Grid());
out.Checkerboard() = in.Checkerboard();
CoarseVector tin=in;
texch-=usecond();
CoarseVector pin = Cell.ExchangePeriodic(tin);
texch+=usecond();
CoarseVector pout(pin.Grid());
int npoint = geom.npoint;
typedef LatticeView<Cobj> Aview;
typedef LatticeView<Cvec> Vview;
const int Nsimd = CComplex::Nsimd();
int64_t osites=pin.Grid()->oSites();
RealD flops = 1.0* npoint * nbasis * nbasis * 8.0 * osites * CComplex::Nsimd();
RealD bytes = 1.0*osites*sizeof(siteMatrix)*npoint
+ 2.0*osites*sizeof(siteVector)*npoint;
{
tviews-=usecond();
autoView( in_v , pin, AcceleratorRead);
autoView( out_v , pout, AcceleratorWriteDiscard);
autoView( Stencil_v , Stencil, AcceleratorRead);
tviews+=usecond();
// Static and prereserve to keep UVM region live and not resized across multiple calls
ttemps-=usecond();
MultTemporaries.resize(npoint,pin.Grid());
ttemps+=usecond();
std::vector<Aview> AcceleratorViewContainer_h;
std::vector<Vview> AcceleratorVecViewContainer_h;
tviews-=usecond();
for(int p=0;p<npoint;p++) {
AcceleratorViewContainer_h.push_back( A[p].View(AcceleratorRead));
AcceleratorVecViewContainer_h.push_back(MultTemporaries[p].View(AcceleratorWrite));
}
tviews+=usecond();
static deviceVector<Aview> AcceleratorViewContainer; AcceleratorViewContainer.resize(npoint);
static deviceVector<Vview> AcceleratorVecViewContainer; AcceleratorVecViewContainer.resize(npoint);
auto Aview_p = &AcceleratorViewContainer[0];
auto Vview_p = &AcceleratorVecViewContainer[0];
tcopy-=usecond();
acceleratorCopyToDevice(&AcceleratorViewContainer_h[0],&AcceleratorViewContainer[0],npoint *sizeof(Aview));
acceleratorCopyToDevice(&AcceleratorVecViewContainer_h[0],&AcceleratorVecViewContainer[0],npoint *sizeof(Vview));
tcopy+=usecond();
tmult-=usecond();
accelerator_for(spb, osites*nbasis*npoint, Nsimd, {
typedef decltype(coalescedRead(in_v[0](0))) calcComplex;
int32_t ss = spb/(nbasis*npoint);
int32_t bp = spb%(nbasis*npoint);
int32_t point= bp/nbasis;
int32_t b = bp%nbasis;
auto SE = Stencil_v.GetEntry(point,ss);
auto nbr = coalescedReadGeneralPermute(in_v[SE->_offset],SE->_permute,Nd);
auto res = coalescedRead(Aview_p[point][ss](0,b))*nbr(0);
for(int bb=1;bb<nbasis;bb++) {
res = res + coalescedRead(Aview_p[point][ss](bb,b))*nbr(bb);
}
coalescedWrite(Vview_p[point][ss](b),res);
});
tmult2-=usecond();
accelerator_for(sb, osites*nbasis, Nsimd, {
int ss = sb/nbasis;
int b = sb%nbasis;
auto res = coalescedRead(Vview_p[0][ss](b));
for(int point=1;point<npoint;point++){
res = res + coalescedRead(Vview_p[point][ss](b));
}
coalescedWrite(out_v[ss](b),res);
});
tmult2+=usecond();
tmult+=usecond();
for(int p=0;p<npoint;p++) {
AcceleratorViewContainer_h[p].ViewClose();
AcceleratorVecViewContainer_h[p].ViewClose();
}
}
text-=usecond();
out = Cell.Extract(pout);
text+=usecond();
ttot+=usecond();
std::cout << GridLogPerformance<<"Coarse 1rhs Mult Aviews "<<tviews<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult exch "<<texch<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult mult "<<tmult<<" us"<<std::endl;
std::cout << GridLogPerformance<<" of which mult2 "<<tmult2<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult ext "<<text<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult temps "<<ttemps<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult copy "<<tcopy<<" us"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Mult tot "<<ttot<<" us"<<std::endl;
// std::cout << GridLogPerformance<<std::endl;
std::cout << GridLogPerformance<<"Coarse Kernel flops "<< flops<<std::endl;
std::cout << GridLogPerformance<<"Coarse Kernel flop/s "<< flops/tmult<<" mflop/s"<<std::endl;
std::cout << GridLogPerformance<<"Coarse Kernel bytes/s "<< bytes/tmult<<" MB/s"<<std::endl;
std::cout << GridLogPerformance<<"Coarse overall flops/s "<< flops/ttot<<" mflop/s"<<std::endl;
std::cout << GridLogPerformance<<"Coarse total bytes "<< bytes/1e6<<" MB"<<std::endl;
};
void PopulateAdag(void)
{
for(int64_t bidx=0;bidx<CoarseGrid()->gSites() ;bidx++){
Coordinate bcoor;
CoarseGrid()->GlobalIndexToGlobalCoor(bidx,bcoor);
for(int p=0;p<geom.npoint;p++){
Coordinate scoor = bcoor;
for(int mu=0;mu<bcoor.size();mu++){
int L = CoarseGrid()->GlobalDimensions()[mu];
scoor[mu] = (bcoor[mu] - geom.shifts[p][mu] + L) % L; // Modulo arithmetic
}
// Flip to poke/peekLocalSite and not too bad
auto link = peekSite(_A[p],scoor);
int pp = geom.Reverse(p);
pokeSite(adj(link),_Adag[pp],bcoor);
}
}
}
/////////////////////////////////////////////////////////////
//
// A) Only reduced flops option is to use a padded cell of depth 4
// and apply MpcDagMpc in the padded cell.
//
// Makes for ONE application of MpcDagMpc per vector instead of 30 or 80.
// With the effective cell size around (B+8)^4 perhaps 12^4/4^4 ratio
// Cost is 81x more, same as stencil size.
//
// But: can eliminate comms and do as local dirichlet.
//
// Local exchange gauge field once.
// Apply to all vectors, local only computation.
// Must exchange ghost subcells in reverse process of PaddedCell to take inner products
//
// B) Can reduce cost: pad by 1, apply Deo (4^4+6^4+8^4+8^4 )/ (4x 4^4)
// pad by 2, apply Doe
// pad by 3, apply Deo
// then break out 8x directions; cost is ~10x MpcDagMpc per vector
//
// => almost factor of 10 in setup cost, excluding data rearrangement
//
// Intermediates -- ignore the corner terms, leave approximate and force Hermitian
// Intermediates -- pad by 2 and apply 1+8+24 = 33 times.
/////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////
// BFM HDCG style approach: Solve a system of equations to get Aij
//////////////////////////////////////////////////////////
/*
* Here, k,l index which possible shift within the 3^Nd "ball" connected by MdagM.
*
* conj(phases[block]) proj[k][ block*Nvec+j ] = \sum_ball e^{i q_k . delta} < phi_{block,j} | MdagM | phi_{(block+delta),i} >
* = \sum_ball e^{iqk.delta} A_ji
*
* Must invert matrix M_k,l = e^[i q_k . delta_l]
*
* Where q_k = delta_k . (2*M_PI/global_nb[mu])
*/
#if 0
void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop,
Aggregation<Fobj,CComplex,nbasis> & Subspace)
{
std::cout << GridLogMessage<< "GeneralCoarsenMatrix "<< std::endl;
GridBase *grid = FineGrid();
RealD tproj=0.0;
RealD teigen=0.0;
RealD tmat=0.0;
RealD tphase=0.0;
RealD tinv=0.0;
/////////////////////////////////////////////////////////////
// Orthogonalise the subblocks over the basis
/////////////////////////////////////////////////////////////
CoarseScalar InnerProd(CoarseGrid());
blockOrthogonalise(InnerProd,Subspace.subspace);
const int npoint = geom.npoint;
Coordinate clatt = CoarseGrid()->GlobalDimensions();
int Nd = CoarseGrid()->Nd();
/*
* Here, k,l index which possible momentum/shift within the N-points connected by MdagM.
* Matrix index i is mapped to this shift via
* geom.shifts[i]
*
* conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]
* = \sum_{l in ball} e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >
* = \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l}
* = M_{kl} A_ji^{b.b+l}
*
* Must assemble and invert matrix M_k,l = e^[i q_k . delta_l]
*
* Where q_k = delta_k . (2*M_PI/global_nb[mu])
*
* Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j}
*/
teigen-=usecond();
Eigen::MatrixXcd Mkl = Eigen::MatrixXcd::Zero(npoint,npoint);
Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint);
ComplexD ci(0.0,1.0);
for(int k=0;k<npoint;k++){ // Loop over momenta
for(int l=0;l<npoint;l++){ // Loop over nbr relative
ComplexD phase(0.0,0.0);
for(int mu=0;mu<Nd;mu++){
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
phase=phase+TwoPiL*geom.shifts[k][mu]*geom.shifts[l][mu];
}
phase=exp(phase*ci);
Mkl(k,l) = phase;
}
}
invMkl = Mkl.inverse();
teigen+=usecond();
///////////////////////////////////////////////////////////////////////
// Now compute the matrix elements of linop between the orthonormal
// set of vectors.
///////////////////////////////////////////////////////////////////////
FineField phaV(grid); // Phased block basis vector
FineField MphaV(grid);// Matrix applied
CoarseVector coarseInner(CoarseGrid());
std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid());
std::vector<CoarseVector> FT(npoint,CoarseGrid());
for(int i=0;i<nbasis;i++){// Loop over basis vectors
std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl;
for(int p=0;p<npoint;p++){ // Loop over momenta in npoint
/////////////////////////////////////////////////////
// Stick a phase on every block
/////////////////////////////////////////////////////
tphase-=usecond();
CoarseComplexField coor(CoarseGrid());
CoarseComplexField pha(CoarseGrid()); pha=Zero();
for(int mu=0;mu<Nd;mu++){
LatticeCoordinate(coor,mu);
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
pha = pha + (TwoPiL * geom.shifts[p][mu]) * coor;
}
pha =exp(pha*ci);
phaV=Zero();
blockZAXPY(phaV,pha,Subspace.subspace[i],phaV);
tphase+=usecond();
/////////////////////////////////////////////////////////////////////
// Multiple phased subspace vector by matrix and project to subspace
// Remove local bulk phase to leave relative phases
/////////////////////////////////////////////////////////////////////
tmat-=usecond();
linop.Op(phaV,MphaV);
tmat+=usecond();
tproj-=usecond();
blockProject(coarseInner,MphaV,Subspace.subspace);
coarseInner = conjugate(pha) * coarseInner;
ComputeProj[p] = coarseInner;
tproj+=usecond();
}
tinv-=usecond();
for(int k=0;k<npoint;k++){
FT[k] = Zero();
for(int l=0;l<npoint;l++){
FT[k]= FT[k]+ invMkl(l,k)*ComputeProj[l];
}
int osites=CoarseGrid()->oSites();
autoView( A_v , _A[k], AcceleratorWrite);
autoView( FT_v , FT[k], AcceleratorRead);
accelerator_for(sss, osites, 1, {
for(int j=0;j<nbasis;j++){
A_v[sss](i,j) = FT_v[sss](j);
}
});
}
tinv+=usecond();
}
// Only needed if nonhermitian
if ( ! hermitian ) {
// std::cout << GridLogMessage<<"PopulateAdag "<<std::endl;
// PopulateAdag();
}
// Need to write something to populate Adag from A
ExchangeCoarseLinks();
std::cout << GridLogMessage<<"CoarsenOperator eigen "<<teigen<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator phase "<<tphase<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator mat "<<tmat <<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator proj "<<tproj<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator inv "<<tinv<<" us"<<std::endl;
}
#else
void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop,
Aggregation<Fobj,CComplex,nbasis> & Subspace)
{
std::cout << GridLogMessage<< "GeneralCoarsenMatrix "<< std::endl;
GridBase *grid = FineGrid();
RealD tproj=0.0;
RealD teigen=0.0;
RealD tmat=0.0;
RealD tphase=0.0;
RealD tphaseBZ=0.0;
RealD tinv=0.0;
/////////////////////////////////////////////////////////////
// Orthogonalise the subblocks over the basis
/////////////////////////////////////////////////////////////
CoarseScalar InnerProd(CoarseGrid());
blockOrthogonalise(InnerProd,Subspace.subspace);
for(int s=0;s<Subspace.subspace.size();s++){
std::cout << " subspace norm "<<norm2(Subspace.subspace[s])<<std::endl;
}
const int npoint = geom.npoint;
Coordinate clatt = CoarseGrid()->GlobalDimensions();
int Nd = CoarseGrid()->Nd();
/*
* Here, k,l index which possible momentum/shift within the N-points connected by MdagM.
* Matrix index i is mapped to this shift via
* geom.shifts[i]
*
* conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]
* = \sum_{l in ball} e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >
* = \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l}
* = M_{kl} A_ji^{b.b+l}
*
* Must assemble and invert matrix M_k,l = e^[i q_k . delta_l]
*
* Where q_k = delta_k . (2*M_PI/global_nb[mu])
*
* Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j}
*/
teigen-=usecond();
Eigen::MatrixXcd Mkl = Eigen::MatrixXcd::Zero(npoint,npoint);
Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint);
ComplexD ci(0.0,1.0);
for(int k=0;k<npoint;k++){ // Loop over momenta
for(int l=0;l<npoint;l++){ // Loop over nbr relative
ComplexD phase(0.0,0.0);
for(int mu=0;mu<Nd;mu++){
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
phase=phase+TwoPiL*geom.shifts[k][mu]*geom.shifts[l][mu];
}
phase=exp(phase*ci);
Mkl(k,l) = phase;
std::cout<<" Mkl "<<k<<" "<<l<<" "<<phase<<std::endl;
}
}
invMkl = Mkl.inverse();
teigen+=usecond();
///////////////////////////////////////////////////////////////////////
// Now compute the matrix elements of linop between the orthonormal
// set of vectors.
///////////////////////////////////////////////////////////////////////
FineField phaV(grid); // Phased block basis vector
FineField MphaV(grid);// Matrix applied
std::vector<FineComplexField> phaF(npoint,grid);
std::vector<CoarseComplexField> pha(npoint,CoarseGrid());
CoarseVector coarseInner(CoarseGrid());
typedef typename CComplex::scalar_type SComplex;
FineComplexField one(grid); one=SComplex(1.0);
FineComplexField zz(grid); zz = Zero();
tphase=-usecond();
for(int p=0;p<npoint;p++){ // Loop over momenta in npoint
/////////////////////////////////////////////////////
// Stick a phase on every block
/////////////////////////////////////////////////////
CoarseComplexField coor(CoarseGrid());
pha[p]=Zero();
for(int mu=0;mu<Nd;mu++){
LatticeCoordinate(coor,mu);
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
pha[p] = pha[p] + (TwoPiL * geom.shifts[p][mu]) * coor;
}
pha[p] =exp(pha[p]*ci);
blockZAXPY(phaF[p],pha[p],one,zz);
}
tphase+=usecond();
std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid());
std::vector<CoarseVector> FT(npoint,CoarseGrid());
for(int i=0;i<nbasis;i++){// Loop over basis vectors
std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl;
for(int p=0;p<npoint;p++){ // Loop over momenta in npoint
tphaseBZ-=usecond();
phaV = phaF[p]*Subspace.subspace[i];
tphaseBZ+=usecond();
/////////////////////////////////////////////////////////////////////
// Multiple phased subspace vector by matrix and project to subspace
// Remove local bulk phase to leave relative phases
/////////////////////////////////////////////////////////////////////
tmat-=usecond();
linop.Op(phaV,MphaV);
tmat+=usecond();
std::cout << i << " " <<p << " MphaV "<<norm2(MphaV)<<" "<<norm2(phaV)<<std::endl;
tproj-=usecond();
blockProject(coarseInner,MphaV,Subspace.subspace);
coarseInner = conjugate(pha[p]) * coarseInner;
ComputeProj[p] = coarseInner;
tproj+=usecond();
std::cout << i << " " <<p << " ComputeProj "<<norm2(ComputeProj[p])<<std::endl;
}
tinv-=usecond();
for(int k=0;k<npoint;k++){
FT[k] = Zero();
for(int l=0;l<npoint;l++){
FT[k]= FT[k]+ invMkl(l,k)*ComputeProj[l];
std::cout << i << " " <<k <<" "<<l<< " FT "<<norm2(FT[k])<<" "<<invMkl(l,k)<<std::endl;
}
int osites=CoarseGrid()->oSites();
autoView( A_v , _A[k], AcceleratorWrite);
autoView( FT_v , FT[k], AcceleratorRead);
accelerator_for(sss, osites, 1, {
for(int j=0;j<nbasis;j++){
A_v[sss](i,j) = FT_v[sss](j);
}
});
}
tinv+=usecond();
}
// Only needed if nonhermitian
if ( ! hermitian ) {
// std::cout << GridLogMessage<<"PopulateAdag "<<std::endl;
// PopulateAdag();
}
for(int p=0;p<geom.npoint;p++){
std::cout << " _A["<<p<<"] "<<norm2(_A[p])<<std::endl;
}
// Need to write something to populate Adag from A
ExchangeCoarseLinks();
std::cout << GridLogMessage<<"CoarsenOperator eigen "<<teigen<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator phase "<<tphase<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator phaseBZ "<<tphaseBZ<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator mat "<<tmat <<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator proj "<<tproj<<" us"<<std::endl;
std::cout << GridLogMessage<<"CoarsenOperator inv "<<tinv<<" us"<<std::endl;
}
#endif
void ExchangeCoarseLinks(void){
for(int p=0;p<geom.npoint;p++){
_A[p] = Cell.ExchangePeriodic(_A[p]);
// _Adag[p]= Cell.ExchangePeriodic(_Adag[p]);
}
}
virtual void Mdiag (const Field &in, Field &out){ assert(0);};
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);};
virtual void MdirAll (const Field &in, std::vector<Field> &out){assert(0);};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,519 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/GeneralCoarsenedMatrixMultiRHS.h
Copyright (C) 2015
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
// Fine Object == (per site) type of fine field
// nbasis == number of deflation vectors
template<class Fobj,class CComplex,int nbasis>
class MultiGeneralCoarsenedMatrix : public SparseMatrixBase<Lattice<iVector<CComplex,nbasis > > > {
public:
typedef typename CComplex::scalar_object SComplex;
typedef GeneralCoarsenedMatrix<Fobj,CComplex,nbasis> GeneralCoarseOp;
typedef MultiGeneralCoarsenedMatrix<Fobj,CComplex,nbasis> MultiGeneralCoarseOp;
typedef iVector<CComplex,nbasis > siteVector;
typedef iMatrix<CComplex,nbasis > siteMatrix;
typedef iVector<SComplex,nbasis > calcVector;
typedef iMatrix<SComplex,nbasis > calcMatrix;
typedef Lattice<iScalar<CComplex> > CoarseComplexField;
typedef Lattice<siteVector> CoarseVector;
typedef Lattice<iMatrix<CComplex,nbasis > > CoarseMatrix;
typedef iMatrix<CComplex,nbasis > Cobj;
typedef iVector<CComplex,nbasis > Cvec;
typedef Lattice< CComplex > CoarseScalar; // used for inner products on fine field
typedef Lattice<Fobj > FineField;
typedef Lattice<CComplex > FineComplexField;
typedef CoarseVector Field;
////////////////////
// Data members
////////////////////
GridCartesian * _CoarseGridMulti;
NonLocalStencilGeometry geom;
NonLocalStencilGeometry geom_srhs;
PaddedCell Cell;
GeneralLocalStencil Stencil;
deviceVector<calcVector> BLAS_B;
deviceVector<calcVector> BLAS_C;
std::vector<deviceVector<calcMatrix> > BLAS_A;
std::vector<deviceVector<ComplexD *> > BLAS_AP;
std::vector<deviceVector<ComplexD *> > BLAS_BP;
deviceVector<ComplexD *> BLAS_CP;
///////////////////////
// Interface
///////////////////////
GridBase * Grid(void) { return _CoarseGridMulti; }; // this is all the linalg routines need to know
GridCartesian * CoarseGrid(void) { return _CoarseGridMulti; }; // this is all the linalg routines need to know
// Can be used to do I/O on the operator matrices externally
void SetMatrix (int p,CoarseMatrix & A)
{
assert(A.size()==geom_srhs.npoint);
GridtoBLAS(A[p],BLAS_A[p]);
}
void GetMatrix (int p,CoarseMatrix & A)
{
assert(A.size()==geom_srhs.npoint);
BLAStoGrid(A[p],BLAS_A[p]);
}
/*
void CopyMatrix (GeneralCoarseOp &_Op)
{
for(int p=0;p<geom.npoint;p++){
auto Aup = _Op.Cell.Extract(_Op._A[p]);
//Unpadded
GridtoBLAS(Aup,BLAS_A[p]);
}
}
void CheckMatrix (GeneralCoarseOp &_Op)
{
std::cout <<"************* Checking the little direc operator mRHS"<<std::endl;
for(int p=0;p<geom.npoint;p++){
//Unpadded
auto Aup = _Op.Cell.Extract(_Op._A[p]);
auto Ack = Aup;
BLAStoGrid(Ack,BLAS_A[p]);
std::cout << p<<" Ack "<<norm2(Ack)<<std::endl;
std::cout << p<<" Aup "<<norm2(Aup)<<std::endl;
}
std::cout <<"************* "<<std::endl;
}
*/
MultiGeneralCoarsenedMatrix(NonLocalStencilGeometry &_geom,GridCartesian *CoarseGridMulti) :
_CoarseGridMulti(CoarseGridMulti),
geom_srhs(_geom),
geom(_CoarseGridMulti,_geom.hops,_geom.skip+1),
Cell(geom.Depth(),_CoarseGridMulti),
Stencil(Cell.grids.back(),geom.shifts) // padded cell stencil
{
int32_t padded_sites = Cell.grids.back()->lSites();
int32_t unpadded_sites = CoarseGridMulti->lSites();
int32_t nrhs = CoarseGridMulti->FullDimensions()[0]; // # RHS
int32_t orhs = nrhs/CComplex::Nsimd();
padded_sites = padded_sites/nrhs;
unpadded_sites = unpadded_sites/nrhs;
/////////////////////////////////////////////////
// Device data vector storage
/////////////////////////////////////////////////
BLAS_A.resize(geom.npoint);
for(int p=0;p<geom.npoint;p++){
BLAS_A[p].resize (unpadded_sites); // no ghost zone, npoint elements
}
BLAS_B.resize(nrhs *padded_sites); // includes ghost zone
BLAS_C.resize(nrhs *unpadded_sites); // no ghost zone
BLAS_AP.resize(geom.npoint);
BLAS_BP.resize(geom.npoint);
for(int p=0;p<geom.npoint;p++){
BLAS_AP[p].resize(unpadded_sites);
BLAS_BP[p].resize(unpadded_sites);
}
BLAS_CP.resize(unpadded_sites);
/////////////////////////////////////////////////
// Pointers to data
/////////////////////////////////////////////////
// Site identity mapping for A
for(int p=0;p<geom.npoint;p++){
for(int ss=0;ss<unpadded_sites;ss++){
ComplexD *ptr = (ComplexD *)&BLAS_A[p][ss];
acceleratorPut(BLAS_AP[p][ss],ptr);
}
}
// Site identity mapping for C
for(int ss=0;ss<unpadded_sites;ss++){
ComplexD *ptr = (ComplexD *)&BLAS_C[ss*nrhs];
acceleratorPut(BLAS_CP[ss],ptr);
}
// Neighbour table is more complicated
int32_t j=0; // Interior point counter (unpadded)
for(int32_t s=0;s<padded_sites;s++){ // 4 volume, padded
int ghost_zone=0;
for(int32_t point = 0 ; point < geom.npoint; point++){
int i=s*orhs*geom.npoint+point;
if( Stencil._entries[i]._wrap ) { // stencil is indexed by the oSite of the CoarseGridMulti, hence orhs factor
ghost_zone=1; // If general stencil wrapped in any direction, wrap=1
}
}
if( ghost_zone==0) {
for(int32_t point = 0 ; point < geom.npoint; point++){
int i=s*orhs*geom.npoint+point;
int32_t nbr = Stencil._entries[i]._offset*CComplex::Nsimd(); // oSite -> lSite
assert(nbr<BLAS_B.size());
ComplexD * ptr = (ComplexD *)&BLAS_B[nbr];
acceleratorPut(BLAS_BP[point][j],ptr); // neighbour indexing in ghost zone volume
}
j++;
}
}
assert(j==unpadded_sites);
}
template<class vobj> void GridtoBLAS(const Lattice<vobj> &from,deviceVector<typename vobj::scalar_object> &to)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
GridBase *Fg = from.Grid();
assert(!Fg->_isCheckerBoarded);
int nd = Fg->_ndimension;
to.resize(Fg->lSites());
Coordinate LocalLatt = Fg->LocalDimensions();
size_t nsite = 1;
for(int i=0;i<nd;i++) nsite *= LocalLatt[i];
////////////////////////////////////////////////////////////////////////////////////////////////
// do the index calc on the GPU
////////////////////////////////////////////////////////////////////////////////////////////////
Coordinate f_ostride = Fg->_ostride;
Coordinate f_istride = Fg->_istride;
Coordinate f_rdimensions = Fg->_rdimensions;
autoView(from_v,from,AcceleratorRead);
auto to_v = &to[0];
const int words=sizeof(vobj)/sizeof(vector_type);
accelerator_for(idx,nsite,1,{
Coordinate from_coor, base;
Lexicographic::CoorFromIndex(base,idx,LocalLatt);
for(int i=0;i<nd;i++){
from_coor[i] = base[i];
}
int from_oidx = 0; for(int d=0;d<nd;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]);
int from_lane = 0; for(int d=0;d<nd;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]);
const vector_type* from = (const vector_type *)&from_v[from_oidx];
scalar_type* to = (scalar_type *)&to_v[idx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], from_lane);
to[w] = stmp;
}
});
}
template<class vobj> void BLAStoGrid(Lattice<vobj> &grid,deviceVector<typename vobj::scalar_object> &in)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
GridBase *Tg = grid.Grid();
assert(!Tg->_isCheckerBoarded);
int nd = Tg->_ndimension;
assert(in.size()==Tg->lSites());
Coordinate LocalLatt = Tg->LocalDimensions();
size_t nsite = 1;
for(int i=0;i<nd;i++) nsite *= LocalLatt[i];
////////////////////////////////////////////////////////////////////////////////////////////////
// do the index calc on the GPU
////////////////////////////////////////////////////////////////////////////////////////////////
Coordinate t_ostride = Tg->_ostride;
Coordinate t_istride = Tg->_istride;
Coordinate t_rdimensions = Tg->_rdimensions;
autoView(to_v,grid,AcceleratorWrite);
auto from_v = &in[0];
const int words=sizeof(vobj)/sizeof(vector_type);
accelerator_for(idx,nsite,1,{
Coordinate to_coor, base;
Lexicographic::CoorFromIndex(base,idx,LocalLatt);
for(int i=0;i<nd;i++){
to_coor[i] = base[i];
}
int to_oidx = 0; for(int d=0;d<nd;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]);
int to_lane = 0; for(int d=0;d<nd;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]);
vector_type* to = (vector_type *)&to_v[to_oidx];
scalar_type* from = (scalar_type *)&from_v[idx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp=from[w];
putlane(to[w], stmp, to_lane);
}
});
}
void CoarsenOperator(LinearOperatorBase<Lattice<Fobj> > &linop,
Aggregation<Fobj,CComplex,nbasis> & Subspace,
GridBase *CoarseGrid)
{
std::cout << GridLogMessage<< "GeneralCoarsenMatrixMrhs "<< std::endl;
GridBase *grid = Subspace.FineGrid;
/////////////////////////////////////////////////////////////
// Orthogonalise the subblocks over the basis
/////////////////////////////////////////////////////////////
CoarseScalar InnerProd(CoarseGrid);
blockOrthogonalise(InnerProd,Subspace.subspace);
const int npoint = geom_srhs.npoint;
Coordinate clatt = CoarseGrid->GlobalDimensions();
int Nd = CoarseGrid->Nd();
/*
* Here, k,l index which possible momentum/shift within the N-points connected by MdagM.
* Matrix index i is mapped to this shift via
* geom.shifts[i]
*
* conj(pha[block]) proj[k (which mom)][j (basis vec cpt)][block]
* = \sum_{l in ball} e^{i q_k . delta_l} < phi_{block,j} | MdagM | phi_{(block+delta_l),i} >
* = \sum_{l in ball} e^{iqk.delta_l} A_ji^{b.b+l}
* = M_{kl} A_ji^{b.b+l}
*
* Must assemble and invert matrix M_k,l = e^[i q_k . delta_l]
*
* Where q_k = delta_k . (2*M_PI/global_nb[mu])
*
* Then A{ji}^{b,b+l} = M^{-1}_{lm} ComputeProj_{m,b,i,j}
*/
Eigen::MatrixXcd Mkl = Eigen::MatrixXcd::Zero(npoint,npoint);
Eigen::MatrixXcd invMkl = Eigen::MatrixXcd::Zero(npoint,npoint);
ComplexD ci(0.0,1.0);
for(int k=0;k<npoint;k++){ // Loop over momenta
for(int l=0;l<npoint;l++){ // Loop over nbr relative
ComplexD phase(0.0,0.0);
for(int mu=0;mu<Nd;mu++){
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
phase=phase+TwoPiL*geom_srhs.shifts[k][mu]*geom_srhs.shifts[l][mu];
}
phase=exp(phase*ci);
Mkl(k,l) = phase;
}
}
invMkl = Mkl.inverse();
///////////////////////////////////////////////////////////////////////
// Now compute the matrix elements of linop between the orthonormal
// set of vectors.
///////////////////////////////////////////////////////////////////////
FineField phaV(grid); // Phased block basis vector
FineField MphaV(grid);// Matrix applied
std::vector<FineComplexField> phaF(npoint,grid);
std::vector<CoarseComplexField> pha(npoint,CoarseGrid);
CoarseVector coarseInner(CoarseGrid);
typedef typename CComplex::scalar_type SComplex;
FineComplexField one(grid); one=SComplex(1.0);
FineComplexField zz(grid); zz = Zero();
for(int p=0;p<npoint;p++){ // Loop over momenta in npoint
/////////////////////////////////////////////////////
// Stick a phase on every block
/////////////////////////////////////////////////////
CoarseComplexField coor(CoarseGrid);
pha[p]=Zero();
for(int mu=0;mu<Nd;mu++){
LatticeCoordinate(coor,mu);
RealD TwoPiL = M_PI * 2.0/ clatt[mu];
pha[p] = pha[p] + (TwoPiL * geom_srhs.shifts[p][mu]) * coor;
}
pha[p] =exp(pha[p]*ci);
blockZAXPY(phaF[p],pha[p],one,zz);
}
// Could save on storage here
std::vector<CoarseMatrix> _A;
_A.resize(geom_srhs.npoint,CoarseGrid);
std::vector<CoarseVector> ComputeProj(npoint,CoarseGrid);
CoarseVector FT(CoarseGrid);
for(int i=0;i<nbasis;i++){// Loop over basis vectors
std::cout << GridLogMessage<< "CoarsenMatrixColoured vec "<<i<<"/"<<nbasis<< std::endl;
for(int p=0;p<npoint;p++){ // Loop over momenta in npoint
phaV = phaF[p]*Subspace.subspace[i];
/////////////////////////////////////////////////////////////////////
// Multiple phased subspace vector by matrix and project to subspace
// Remove local bulk phase to leave relative phases
/////////////////////////////////////////////////////////////////////
linop.Op(phaV,MphaV);
// Fixme, could use batched block projector here
blockProject(coarseInner,MphaV,Subspace.subspace);
coarseInner = conjugate(pha[p]) * coarseInner;
ComputeProj[p] = coarseInner;
}
for(int k=0;k<npoint;k++){
FT = Zero();
for(int l=0;l<npoint;l++){
FT= FT+ invMkl(l,k)*ComputeProj[l];
}
int osites=CoarseGrid->oSites();
autoView( A_v , _A[k], AcceleratorWrite);
autoView( FT_v , FT, AcceleratorRead);
accelerator_for(sss, osites, 1, {
for(int j=0;j<nbasis;j++){
A_v[sss](i,j) = FT_v[sss](j);
}
});
}
}
// Only needed if nonhermitian
// if ( ! hermitian ) {
// std::cout << GridLogMessage<<"PopulateAdag "<<std::endl;
// PopulateAdag();
// }
// Need to write something to populate Adag from A
for(int p=0;p<geom_srhs.npoint;p++){
GridtoBLAS(_A[p],BLAS_A[p]);
}
/*
Grid : Message : 11698.730546 s : CoarsenOperator eigen 1334 us
Grid : Message : 11698.730563 s : CoarsenOperator phase 34729 us
Grid : Message : 11698.730565 s : CoarsenOperator phaseBZ 2423814 us
Grid : Message : 11698.730566 s : CoarsenOperator mat 127890998 us
Grid : Message : 11698.730567 s : CoarsenOperator proj 515840840 us
Grid : Message : 11698.730568 s : CoarsenOperator inv 103948313 us
Takes 600s to compute matrix elements, DOMINATED by the block project.
Easy to speed up with the batched block project.
Store npoint vectors, get npoint x Nbasis block projection, and 81 fold faster.
*/
}
void Mdag(const CoarseVector &in, CoarseVector &out)
{
this->M(in,out);
}
void M (const CoarseVector &in, CoarseVector &out)
{
// std::cout << GridLogMessage << "New Mrhs coarse"<<std::endl;
conformable(CoarseGrid(),in.Grid());
conformable(in.Grid(),out.Grid());
out.Checkerboard() = in.Checkerboard();
RealD t_tot;
RealD t_exch;
RealD t_GtoB;
RealD t_BtoG;
RealD t_mult;
t_tot=-usecond();
CoarseVector tin=in;
t_exch=-usecond();
CoarseVector pin = Cell.ExchangePeriodic(tin); //padded input
t_exch+=usecond();
CoarseVector pout(pin.Grid());
int npoint = geom.npoint;
typedef calcMatrix* Aview;
typedef LatticeView<Cvec> Vview;
const int Nsimd = CComplex::Nsimd();
int64_t nrhs =pin.Grid()->GlobalDimensions()[0];
assert(nrhs>=1);
RealD flops,bytes;
int64_t osites=in.Grid()->oSites(); // unpadded
int64_t unpadded_vol = CoarseGrid()->lSites()/nrhs;
flops = 1.0* npoint * nbasis * nbasis * 8.0 * osites * CComplex::Nsimd();
bytes = 1.0*osites*sizeof(siteMatrix)*npoint/pin.Grid()->GlobalDimensions()[0]
+ 2.0*osites*sizeof(siteVector)*npoint;
t_GtoB=-usecond();
GridtoBLAS(pin,BLAS_B);
t_GtoB+=usecond();
GridBLAS BLAS;
t_mult=-usecond();
for(int p=0;p<geom.npoint;p++){
RealD c = 1.0;
if (p==0) c = 0.0;
ComplexD beta(c);
BLAS.gemmBatched(nbasis,nrhs,nbasis,
ComplexD(1.0),
BLAS_AP[p],
BLAS_BP[p],
ComplexD(c),
BLAS_CP);
}
BLAS.synchronise();
t_mult+=usecond();
t_BtoG=-usecond();
BLAStoGrid(out,BLAS_C);
t_BtoG+=usecond();
t_tot+=usecond();
/*
std::cout << GridLogMessage << "New Mrhs coarse DONE "<<std::endl;
std::cout << GridLogMessage<<"Coarse Mult exch "<<t_exch<<" us"<<std::endl;
std::cout << GridLogMessage<<"Coarse Mult mult "<<t_mult<<" us"<<std::endl;
std::cout << GridLogMessage<<"Coarse Mult GtoB "<<t_GtoB<<" us"<<std::endl;
std::cout << GridLogMessage<<"Coarse Mult BtoG "<<t_BtoG<<" us"<<std::endl;
std::cout << GridLogMessage<<"Coarse Mult tot "<<t_tot<<" us"<<std::endl;
std::cout << GridLogMessage<<std::endl;
std::cout << GridLogMessage<<"Coarse Kernel flops "<< flops<<std::endl;
std::cout << GridLogMessage<<"Coarse Kernel flop/s "<< flops/t_mult<<" mflop/s"<<std::endl;
std::cout << GridLogMessage<<"Coarse Kernel bytes/s "<< bytes/t_mult/1000<<" GB/s"<<std::endl;
std::cout << GridLogMessage<<"Coarse overall flops/s "<< flops/t_tot<<" mflop/s"<<std::endl;
*/
// std::cout << GridLogMessage<<"Coarse total bytes "<< bytes/1e6<<" MB"<<std::endl;
};
virtual void Mdiag (const Field &in, Field &out){ assert(0);};
virtual void Mdir (const Field &in, Field &out,int dir, int disp){assert(0);};
virtual void MdirAll (const Field &in, std::vector<Field> &out){assert(0);};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,238 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/algorithms/GeneralCoarsenedMatrix.h
Copyright (C) 2015
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
NAMESPACE_BEGIN(Grid);
/////////////////////////////////////////////////////////////////
// Geometry class in cartesian case
/////////////////////////////////////////////////////////////////
class Geometry {
public:
int npoint;
int base;
std::vector<int> directions ;
std::vector<int> displacements;
std::vector<int> points_dagger;
Geometry(int _d) {
base = (_d==5) ? 1:0;
// make coarse grid stencil for 4d , not 5d
if ( _d==5 ) _d=4;
npoint = 2*_d+1;
directions.resize(npoint);
displacements.resize(npoint);
points_dagger.resize(npoint);
for(int d=0;d<_d;d++){
directions[d ] = d+base;
directions[d+_d] = d+base;
displacements[d ] = +1;
displacements[d+_d]= -1;
points_dagger[d ] = d+_d;
points_dagger[d+_d] = d;
}
directions [2*_d]=0;
displacements[2*_d]=0;
points_dagger[2*_d]=2*_d;
}
int point(int dir, int disp) {
assert(disp == -1 || disp == 0 || disp == 1);
assert(base+0 <= dir && dir < base+4);
// directions faster index = new indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 1 2 3 0 1 2 3 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 2 3 4 1 2 3 4 0
// disp +1 +1 +1 +1 -1 -1 -1 -1 0
// displacements faster index = old indexing
// 4d (base = 0):
// point 0 1 2 3 4 5 6 7 8
// dir 0 0 1 1 2 2 3 3 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
// 5d (base = 1):
// point 0 1 2 3 4 5 6 7 8
// dir 1 1 2 2 3 3 4 4 0
// disp +1 -1 +1 -1 +1 -1 +1 -1 0
if(dir == 0 and disp == 0)
return 8;
else // New indexing
return (1 - disp) / 2 * 4 + dir - base;
// else // Old indexing
// return (4 * (dir - base) + 1 - disp) / 2;
}
};
/////////////////////////////////////////////////////////////////
// Less local equivalent of Geometry class in cartesian case
/////////////////////////////////////////////////////////////////
class NonLocalStencilGeometry {
public:
// int depth;
int skip;
int hops;
int npoint;
std::vector<Coordinate> shifts;
Coordinate stencil_size;
Coordinate stencil_lo;
Coordinate stencil_hi;
GridCartesian *grid;
GridCartesian *Grid() {return grid;};
int Depth(void){return 1;}; // Ghost zone depth
int Hops(void){return hops;}; // # of hops=> level of corner fill in in stencil
int DimSkip(void){return skip;};
virtual ~NonLocalStencilGeometry() {};
int Reverse(int point)
{
int Nd = Grid()->Nd();
Coordinate shft = shifts[point];
Coordinate rev(Nd);
for(int mu=0;mu<Nd;mu++) rev[mu]= -shft[mu];
for(int p=0;p<npoint;p++){
if(rev==shifts[p]){
return p;
}
}
assert(0);
return -1;
}
void BuildShifts(void)
{
this->shifts.resize(0);
int Nd = this->grid->Nd();
int dd = this->DimSkip();
for(int s0=this->stencil_lo[dd+0];s0<=this->stencil_hi[dd+0];s0++){
for(int s1=this->stencil_lo[dd+1];s1<=this->stencil_hi[dd+1];s1++){
for(int s2=this->stencil_lo[dd+2];s2<=this->stencil_hi[dd+2];s2++){
for(int s3=this->stencil_lo[dd+3];s3<=this->stencil_hi[dd+3];s3++){
Coordinate sft(Nd,0);
sft[dd+0] = s0;
sft[dd+1] = s1;
sft[dd+2] = s2;
sft[dd+3] = s3;
int nhops = abs(s0)+abs(s1)+abs(s2)+abs(s3);
if(nhops<=this->hops) this->shifts.push_back(sft);
}}}}
this->npoint = this->shifts.size();
std::cout << GridLogMessage << "NonLocalStencilGeometry has "<< this->npoint << " terms in stencil "<<std::endl;
}
NonLocalStencilGeometry(GridCartesian *_coarse_grid,int _hops,int _skip) : grid(_coarse_grid), hops(_hops), skip(_skip)
{
Coordinate latt = grid->GlobalDimensions();
stencil_size.resize(grid->Nd());
stencil_lo.resize(grid->Nd());
stencil_hi.resize(grid->Nd());
for(int d=0;d<grid->Nd();d++){
if ( latt[d] == 1 ) {
stencil_lo[d] = 0;
stencil_hi[d] = 0;
stencil_size[d]= 1;
} else if ( latt[d] == 2 ) {
stencil_lo[d] = -1;
stencil_hi[d] = 0;
stencil_size[d]= 2;
} else if ( latt[d] > 2 ) {
stencil_lo[d] = -1;
stencil_hi[d] = 1;
stencil_size[d]= 3;
}
}
this->BuildShifts();
};
};
// Need to worry about red-black now
class NonLocalStencilGeometry4D : public NonLocalStencilGeometry {
public:
virtual int DerivedDimSkip(void) { return 0;};
NonLocalStencilGeometry4D(GridCartesian *Coarse,int _hops) : NonLocalStencilGeometry(Coarse,_hops,0) { };
virtual ~NonLocalStencilGeometry4D() {};
};
class NonLocalStencilGeometry5D : public NonLocalStencilGeometry {
public:
virtual int DerivedDimSkip(void) { return 1; };
NonLocalStencilGeometry5D(GridCartesian *Coarse,int _hops) : NonLocalStencilGeometry(Coarse,_hops,1) { };
virtual ~NonLocalStencilGeometry5D() {};
};
/*
* Bunch of different options classes
*/
class NextToNextToNextToNearestStencilGeometry4D : public NonLocalStencilGeometry4D {
public:
NextToNextToNextToNearestStencilGeometry4D(GridCartesian *Coarse) : NonLocalStencilGeometry4D(Coarse,4)
{
};
};
class NextToNextToNextToNearestStencilGeometry5D : public NonLocalStencilGeometry5D {
public:
NextToNextToNextToNearestStencilGeometry5D(GridCartesian *Coarse) : NonLocalStencilGeometry5D(Coarse,4)
{
};
};
class NextToNearestStencilGeometry4D : public NonLocalStencilGeometry4D {
public:
NextToNearestStencilGeometry4D(GridCartesian *Coarse) : NonLocalStencilGeometry4D(Coarse,2)
{
};
};
class NextToNearestStencilGeometry5D : public NonLocalStencilGeometry5D {
public:
NextToNearestStencilGeometry5D(GridCartesian *Coarse) : NonLocalStencilGeometry5D(Coarse,2)
{
};
};
class NearestStencilGeometry4D : public NonLocalStencilGeometry4D {
public:
NearestStencilGeometry4D(GridCartesian *Coarse) : NonLocalStencilGeometry4D(Coarse,1)
{
};
};
class NearestStencilGeometry5D : public NonLocalStencilGeometry5D {
public:
NearestStencilGeometry5D(GridCartesian *Coarse) : NonLocalStencilGeometry5D(Coarse,1)
{
};
};
NAMESPACE_END(Grid);

View File

@ -0,0 +1,34 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: Grid/algorithms/multigrid/MultiGrid.h
Copyright (C) 2023
Author: Peter Boyle <pboyle@bnl.gov>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution directory
*************************************************************************************/
/* END LEGAL */
#pragma once
#include <Grid/algorithms/multigrid/Aggregates.h>
#include <Grid/algorithms/multigrid/Geometry.h>
#include <Grid/algorithms/multigrid/CoarsenedMatrix.h>
#include <Grid/algorithms/multigrid/GeneralCoarsenedMatrix.h>
#include <Grid/algorithms/multigrid/GeneralCoarsenedMatrixMultiRHS.h>

View File

@ -175,8 +175,56 @@ template<class T> using cshiftAllocator = std::allocator<T>;
template<class T> using Vector = std::vector<T,uvmAllocator<T> >;
template<class T> using stencilVector = std::vector<T,alignedAllocator<T> >;
template<class T> using commVector = std::vector<T,devAllocator<T> >;
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
template<class T> using commVector = std::vector<T,devAllocator<T> >;
template<class T> using deviceVector = std::vector<T,devAllocator<T> >;
template<class T> using cshiftVector = std::vector<T,cshiftAllocator<T> >;
/*
template<class T> class vecView
{
protected:
T * data;
uint64_t size;
ViewMode mode;
void * cpu_ptr;
public:
accelerator_inline T & operator[](size_t i) const { return this->data[i]; };
vecView(std::vector<T> &refer_to_me,ViewMode _mode)
{
cpu_ptr = &refer_to_me[0];
size = refer_to_me.size();
mode = _mode;
data =(T *) MemoryManager::ViewOpen(cpu_ptr,
size*sizeof(T),
mode,
AdviseDefault);
}
void ViewClose(void)
{ // Inform the manager
MemoryManager::ViewClose(this->cpu_ptr,this->mode);
}
};
template<class T> vecView<T> VectorView(std::vector<T> &vec,ViewMode _mode)
{
vecView<T> ret(vec,_mode); // does the open
return ret; // must be closed
}
// Little autoscope assister
template<class View>
class VectorViewCloser
{
View v; // Take a copy of view and call view close when I go out of scope automatically
public:
VectorViewCloser(View &_v) : v(_v) {};
~VectorViewCloser() { auto ptr = v.cpu_ptr; v.ViewClose(); MemoryManager::NotifyDeletion(ptr);}
};
#define autoVecView(v_v,v,mode) \
auto v_v = VectorView(v,mode); \
ViewCloser<decltype(v_v)> _autoView##v_v(v_v);
*/
NAMESPACE_END(Grid);

View File

@ -209,9 +209,9 @@ private:
static void CpuViewClose(uint64_t Ptr);
static uint64_t CpuViewOpen(uint64_t CpuPtr,size_t bytes,ViewMode mode,ViewAdvise hint);
#endif
static void NotifyDeletion(void * CpuPtr);
public:
static void NotifyDeletion(void * CpuPtr);
static void Print(void);
static void PrintAll(void);
static void PrintState( void* CpuPtr);

View File

@ -8,7 +8,7 @@ NAMESPACE_BEGIN(Grid);
static char print_buffer [ MAXLINE ];
#define mprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogMemory << print_buffer;
#define dprintf(...) snprintf (print_buffer,MAXLINE, __VA_ARGS__ ); std::cout << GridLogDebug << print_buffer;
//#define dprintf(...)
@ -111,7 +111,7 @@ void MemoryManager::AccDiscard(AcceleratorViewEntry &AccCache)
///////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
mprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
dprintf("MemoryManager: Discard(%lx) %lx\n",(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr);
assert(AccCache.accLock==0);
assert(AccCache.cpuLock==0);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
@ -141,7 +141,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
///////////////////////////////////////////////////////////////////////////
assert(AccCache.state!=Empty);
mprintf("MemoryManager: Evict cpu %lx acc %lx cpuLock %ld accLock %ld\n",
mprintf("MemoryManager: Evict CpuPtr %lx AccPtr %lx cpuLock %ld accLock %ld\n",
(uint64_t)AccCache.CpuPtr,(uint64_t)AccCache.AccPtr,
(uint64_t)AccCache.cpuLock,(uint64_t)AccCache.accLock);
if (AccCache.accLock!=0) return;
@ -155,7 +155,7 @@ void MemoryManager::Evict(AcceleratorViewEntry &AccCache)
AccCache.AccPtr=(uint64_t)NULL;
AccCache.state=CpuDirty; // CPU primary now
DeviceBytes -=AccCache.bytes;
dprintf("MemoryManager: Free(%lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
dprintf("MemoryManager: Free(AccPtr %lx) footprint now %ld \n",(uint64_t)AccCache.AccPtr,DeviceBytes);
}
// uint64_t CpuPtr = AccCache.CpuPtr;
DeviceEvictions++;
@ -169,7 +169,7 @@ void MemoryManager::Flush(AcceleratorViewEntry &AccCache)
assert(AccCache.AccPtr!=(uint64_t)NULL);
assert(AccCache.CpuPtr!=(uint64_t)NULL);
acceleratorCopyFromDevice((void *)AccCache.AccPtr,(void *)AccCache.CpuPtr,AccCache.bytes);
mprintf("MemoryManager: Flush %lx -> %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
mprintf("MemoryManager: acceleratorCopyFromDevice Flush AccPtr %lx -> CpuPtr %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
DeviceToHostBytes+=AccCache.bytes;
DeviceToHostXfer++;
AccCache.state=Consistent;
@ -184,7 +184,7 @@ void MemoryManager::Clone(AcceleratorViewEntry &AccCache)
AccCache.AccPtr=(uint64_t)AcceleratorAllocate(AccCache.bytes);
DeviceBytes+=AccCache.bytes;
}
mprintf("MemoryManager: Clone %lx <- %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
mprintf("MemoryManager: acceleratorCopyToDevice Clone AccPtr %lx <- CpuPtr %lx\n",(uint64_t)AccCache.AccPtr,(uint64_t)AccCache.CpuPtr); fflush(stdout);
acceleratorCopyToDevice((void *)AccCache.CpuPtr,(void *)AccCache.AccPtr,AccCache.bytes);
HostToDeviceBytes+=AccCache.bytes;
HostToDeviceXfer++;
@ -474,6 +474,7 @@ void MemoryManager::Print(void)
std::cout << GridLogMessage << DeviceEvictions << " Evictions from device " << std::endl;
std::cout << GridLogMessage << DeviceDestroy << " Destroyed vectors on device " << std::endl;
std::cout << GridLogMessage << AccViewTable.size()<< " vectors " << LRU.size()<<" evictable"<< std::endl;
acceleratorMem();
std::cout << GridLogMessage << "--------------------------------------------" << std::endl;
}
void MemoryManager::PrintAll(void)

View File

@ -70,8 +70,8 @@ public:
Coordinate _istride; // Inner stride i.e. within simd lane
int _osites; // _isites*_osites = product(dimensions).
int _isites;
int _fsites; // _isites*_osites = product(dimensions).
int _gsites;
int64_t _fsites; // _isites*_osites = product(dimensions).
int64_t _gsites;
Coordinate _slice_block;// subslice information
Coordinate _slice_stride;
Coordinate _slice_nblock;
@ -183,7 +183,7 @@ public:
inline int Nsimd(void) const { return _isites; };// Synonymous with iSites
inline int oSites(void) const { return _osites; };
inline int lSites(void) const { return _isites*_osites; };
inline int gSites(void) const { return _isites*_osites*_Nprocessors; };
inline int64_t gSites(void) const { return (int64_t)_isites*(int64_t)_osites*(int64_t)_Nprocessors; };
inline int Nd (void) const { return _ndimension;};
inline const Coordinate LocalStarts(void) { return _lstart; };
@ -214,7 +214,7 @@ public:
////////////////////////////////////////////////////////////////
// Global addressing
////////////////////////////////////////////////////////////////
void GlobalIndexToGlobalCoor(int gidx,Coordinate &gcoor){
void GlobalIndexToGlobalCoor(int64_t gidx,Coordinate &gcoor){
assert(gidx< gSites());
Lexicographic::CoorFromIndex(gcoor,gidx,_gdimensions);
}
@ -222,7 +222,7 @@ public:
assert(lidx<lSites());
Lexicographic::CoorFromIndex(lcoor,lidx,_ldimensions);
}
void GlobalCoorToGlobalIndex(const Coordinate & gcoor,int & gidx){
void GlobalCoorToGlobalIndex(const Coordinate & gcoor,int64_t & gidx){
gidx=0;
int mult=1;
for(int mu=0;mu<_ndimension;mu++) {

View File

@ -138,6 +138,14 @@ public:
////////////////////////////////////////////////////////////
// Face exchange, buffer swap in translational invariant way
////////////////////////////////////////////////////////////
void CommsComplete(std::vector<CommsRequest_t> &list);
void SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int dest,
void *recv,
int from,
int bytes,int dir);
void SendToRecvFrom(void *xmit,
int xmit_to_rank,
void *recv,

View File

@ -306,6 +306,44 @@ void CartesianCommunicator::GlobalSumVector(double *d,int N)
int ierr = MPI_Allreduce(MPI_IN_PLACE,d,N,MPI_DOUBLE,MPI_SUM,communicator);
assert(ierr==0);
}
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int dest,
void *recv,
int from,
int bytes,int dir)
{
MPI_Request xrq;
MPI_Request rrq;
assert(dest != _processor);
assert(from != _processor);
int tag;
tag= dir+from*32;
int ierr=MPI_Irecv(recv, bytes, MPI_CHAR,from,tag,communicator,&rrq);
assert(ierr==0);
list.push_back(rrq);
tag= dir+_processor*32;
ierr =MPI_Isend(xmit, bytes, MPI_CHAR,dest,tag,communicator,&xrq);
assert(ierr==0);
list.push_back(xrq);
}
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list)
{
int nreq=list.size();
if (nreq==0) return;
std::vector<MPI_Status> status(nreq);
int ierr = MPI_Waitall(nreq,&list[0],&status[0]);
assert(ierr==0);
list.resize(0);
}
// Basic Halo comms primitive
void CartesianCommunicator::SendToRecvFrom(void *xmit,
int dest,

View File

@ -91,6 +91,17 @@ void CartesianCommunicator::SendToRecvFrom(void *xmit,
{
assert(0);
}
void CartesianCommunicator::CommsComplete(std::vector<CommsRequest_t> &list){ assert(0);}
void CartesianCommunicator::SendToRecvFromBegin(std::vector<CommsRequest_t> &list,
void *xmit,
int dest,
void *recv,
int from,
int bytes,int dir)
{
assert(0);
}
void CartesianCommunicator::AllToAll(int dim,void *in,void *out,uint64_t words,uint64_t bytes)
{
bcopy(in,out,bytes*words);

View File

@ -604,8 +604,8 @@ void GlobalSharedMemory::SharedMemoryAllocate(uint64_t bytes, int flags)
#ifdef GRID_SYCL_LEVEL_ZERO_IPC
typedef struct { int fd; pid_t pid ; ze_ipc_mem_handle_t ze; } clone_mem_t;
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::level_zero>(theGridAccelerator->get_context());
auto zeDevice = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_device());
auto zeContext = cl::sycl::get_native<cl::sycl::backend::ext_oneapi_level_zero>(theGridAccelerator->get_context());
ze_ipc_mem_handle_t ihandle;
clone_mem_t handle;

View File

@ -47,3 +47,4 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
#include <Grid/lattice/Lattice_transfer.h>
#include <Grid/lattice/Lattice_basis.h>
#include <Grid/lattice/Lattice_crc.h>
#include <Grid/lattice/PaddedCell.h>

View File

@ -345,7 +345,9 @@ GridUnopClass(UnaryNot, Not(a));
GridUnopClass(UnaryTrace, trace(a));
GridUnopClass(UnaryTranspose, transpose(a));
GridUnopClass(UnaryTa, Ta(a));
GridUnopClass(UnarySpTa, SpTa(a));
GridUnopClass(UnaryProjectOnGroup, ProjectOnGroup(a));
GridUnopClass(UnaryProjectOnSpGroup, ProjectOnSpGroup(a));
GridUnopClass(UnaryTimesI, timesI(a));
GridUnopClass(UnaryTimesMinusI, timesMinusI(a));
GridUnopClass(UnaryAbs, abs(a));
@ -456,7 +458,9 @@ GRID_DEF_UNOP(operator!, UnaryNot);
GRID_DEF_UNOP(trace, UnaryTrace);
GRID_DEF_UNOP(transpose, UnaryTranspose);
GRID_DEF_UNOP(Ta, UnaryTa);
GRID_DEF_UNOP(SpTa, UnarySpTa);
GRID_DEF_UNOP(ProjectOnGroup, UnaryProjectOnGroup);
GRID_DEF_UNOP(ProjectOnSpGroup, UnaryProjectOnSpGroup);
GRID_DEF_UNOP(timesI, UnaryTimesI);
GRID_DEF_UNOP(timesMinusI, UnaryTimesMinusI);
GRID_DEF_UNOP(abs, UnaryAbs); // abs overloaded in cmath C++98; DON'T do the

View File

@ -360,7 +360,7 @@ public:
template<class vobj> std::ostream& operator<< (std::ostream& stream, const Lattice<vobj> &o){
typedef typename vobj::scalar_object sobj;
for(int g=0;g<o.Grid()->_gsites;g++){
for(int64_t g=0;g<o.Grid()->_gsites;g++){
Coordinate gcoor;
o.Grid()->GlobalIndexToGlobalCoor(g,gcoor);

View File

@ -29,7 +29,7 @@ Author: Peter Boyle <paboyle@ph.ed.ac.uk>
NAMESPACE_BEGIN(Grid);
template<class vobj> void DumpSliceNorm(std::string s,Lattice<vobj> &f,int mu=-1)
template<class vobj> void DumpSliceNorm(std::string s,const Lattice<vobj> &f,int mu=-1)
{
auto ff = localNorm2(f);
if ( mu==-1 ) mu = f.Grid()->Nd()-1;

View File

@ -203,6 +203,27 @@ template<class vobj> inline RealD norm2(const Lattice<vobj> &arg){
return real(nrm);
}
template<class Op,class T1>
inline auto norm2(const LatticeUnaryExpression<Op,T1> & expr) ->RealD
{
return norm2(closure(expr));
}
template<class Op,class T1,class T2>
inline auto norm2(const LatticeBinaryExpression<Op,T1,T2> & expr) ->RealD
{
return norm2(closure(expr));
}
template<class Op,class T1,class T2,class T3>
inline auto norm2(const LatticeTrinaryExpression<Op,T1,T2,T3> & expr) ->RealD
{
return norm2(closure(expr));
}
//The global maximum of the site norm2
template<class vobj> inline RealD maxLocalNorm2(const Lattice<vobj> &arg)
{

View File

@ -30,7 +30,7 @@ int getNumBlocksAndThreads(const Iterator n, const size_t sizeofsobj, Iterator &
cudaGetDevice(&device);
#endif
#ifdef GRID_HIP
hipGetDevice(&device);
auto discard=hipGetDevice(&device);
#endif
Iterator warpSize = gpu_props[device].warpSize;

View File

@ -361,9 +361,14 @@ public:
_bernoulli.resize(_vol,std::discrete_distribution<int32_t>{1,1});
_uid.resize(_vol,std::uniform_int_distribution<uint32_t>() );
}
template <class vobj,class distribution> inline void fill(Lattice<vobj> &l,std::vector<distribution> &dist){
template <class vobj,class distribution> inline void fill(Lattice<vobj> &l,std::vector<distribution> &dist)
{
if ( l.Grid()->_isCheckerBoarded ) {
Lattice<vobj> tmp(_grid);
fill(tmp,dist);
pickCheckerboard(l.Checkerboard(),l,tmp);
return;
}
typedef typename vobj::scalar_object scalar_object;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
@ -427,7 +432,7 @@ public:
#if 1
thread_for( lidx, _grid->lSites(), {
int gidx;
int64_t gidx;
int o_idx;
int i_idx;
int rank;

View File

@ -66,6 +66,65 @@ inline auto TraceIndex(const Lattice<vobj> &lhs) -> Lattice<decltype(traceIndex<
return ret;
};
template<int N, class Vec>
Lattice<iScalar<iScalar<iScalar<Vec> > > > Determinant(const Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iScalar<Vec> > > > ret(grid);
typedef typename Vec::scalar_type scalar;
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<scalar, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
scalar tmp= Us()()(i,j);
ComplexD ztmp(real(tmp),imag(tmp));
EigenU(i,j)=ztmp;
}}
ComplexD detD = EigenU.determinant();
typename Vec::scalar_type det(detD.real(),detD.imag());
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N>
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > Inverse(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
iScalar<iScalar<iMatrix<ComplexD, N> > > Ui;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
Eigen::MatrixXcd EigenUinv = EigenU.inverse();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
Ui()()(i,j) = EigenUinv(i,j);
}}
pokeLocalSite(Ui,ret_v,lcoor);
});
return ret;
}
NAMESPACE_END(Grid);
#endif

View File

@ -276,18 +276,33 @@ inline void blockProject(Lattice<iVector<CComplex,nbasis > > &coarseData,
autoView( coarseData_ , coarseData, AcceleratorWrite);
autoView( ip_ , ip, AcceleratorWrite);
RealD t_IP=0;
RealD t_co=0;
RealD t_za=0;
for(int v=0;v<nbasis;v++) {
t_IP-=usecond();
blockInnerProductD(ip,Basis[v],fineDataRed); // ip = <basis|fine>
t_IP+=usecond();
t_co-=usecond();
accelerator_for( sc, coarse->oSites(), vobj::Nsimd(), {
convertType(coarseData_[sc](v),ip_[sc]);
});
t_co+=usecond();
// improve numerical stability of projection
// |fine> = |fine> - <basis|fine> |basis>
ip=-ip;
t_za-=usecond();
blockZAXPY(fineDataRed,ip,Basis[v],fineDataRed);
t_za+=usecond();
}
// std::cout << GridLogPerformance << " blockProject : blockInnerProduct : "<<t_IP<<" us"<<std::endl;
// std::cout << GridLogPerformance << " blockProject : conv : "<<t_co<<" us"<<std::endl;
// std::cout << GridLogPerformance << " blockProject : blockZaxpy : "<<t_za<<" us"<<std::endl;
}
// This only minimises data motion from CPU to GPU
// there is chance of better implementation that does a vxk loop of inner products to data share
// at the GPU thread level
template<class vobj,class CComplex,int nbasis,class VLattice>
inline void batchBlockProject(std::vector<Lattice<iVector<CComplex,nbasis>>> &coarseData,
const std::vector<Lattice<vobj>> &fineData,
@ -393,8 +408,15 @@ template<class vobj,class CComplex>
Lattice<dotp> coarse_inner(coarse);
// Precision promotion
RealD t;
t=-usecond();
fine_inner = localInnerProductD<vobj>(fineX,fineY);
// t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : localInnerProductD "<<t<<" us"<<std::endl;
t=-usecond();
blockSum(coarse_inner,fine_inner);
// t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : blockSum "<<t<<" us"<<std::endl;
t=-usecond();
{
autoView( CoarseInner_ , CoarseInner,AcceleratorWrite);
autoView( coarse_inner_ , coarse_inner,AcceleratorRead);
@ -402,6 +424,7 @@ template<class vobj,class CComplex>
convertType(CoarseInner_[ss], TensorRemove(coarse_inner_[ss]));
});
}
// t+=usecond(); std::cout << GridLogPerformance << " blockInnerProduct : convertType "<<t<<" us"<<std::endl;
}
@ -444,6 +467,9 @@ inline void blockNormalise(Lattice<CComplex> &ip,Lattice<vobj> &fineX)
template<class vobj>
inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
{
const int maxsubsec=256;
typedef iVector<vobj,maxsubsec> vSubsec;
GridBase * fine = fineData.Grid();
GridBase * coarse= coarseData.Grid();
@ -463,37 +489,62 @@ inline void blockSum(Lattice<vobj> &coarseData,const Lattice<vobj> &fineData)
autoView( coarseData_ , coarseData, AcceleratorWrite);
autoView( fineData_ , fineData, AcceleratorRead);
auto coarseData_p = &coarseData_[0];
auto fineData_p = &fineData_[0];
auto coarseData_p = &coarseData_[0];
auto fineData_p = &fineData_[0];
Coordinate fine_rdimensions = fine->_rdimensions;
Coordinate coarse_rdimensions = coarse->_rdimensions;
vobj zz = Zero();
accelerator_for(sc,coarse->oSites(),1,{
// Somewhat lazy calculation
// Find the biggest power of two subsection divisor less than or equal to maxsubsec
int subsec=maxsubsec;
int subvol;
subvol=blockVol/subsec;
while(subvol*subsec!=blockVol){
subsec = subsec/2;
subvol=blockVol/subsec;
};
Lattice<vSubsec> coarseTmp(coarse);
autoView( coarseTmp_, coarseTmp, AcceleratorWriteDiscard);
auto coarseTmp_p= &coarseTmp_[0];
// Sum within subsecs in a first kernel
accelerator_for(sce,subsec*coarse->oSites(),vobj::Nsimd(),{
int sc=sce/subsec;
int e=sce%subsec;
// One thread per sub block
Coordinate coor_c(_ndimension);
Lexicographic::CoorFromIndex(coor_c,sc,coarse_rdimensions); // Block coordinate
vobj cd = zz;
for(int sb=0;sb<blockVol;sb++){
auto cd = coalescedRead(zz);
for(int sb=e*subvol;sb<MIN((e+1)*subvol,blockVol);sb++){
int sf;
Coordinate coor_b(_ndimension);
Coordinate coor_f(_ndimension);
Lexicographic::CoorFromIndex(coor_b,sb,block_r); // Block sub coordinate
for(int d=0;d<_ndimension;d++) coor_f[d]=coor_c[d]*block_r[d] + coor_b[d];
Lexicographic::IndexFromCoor(coor_f,sf,fine_rdimensions);
cd=cd+fineData_p[sf];
cd=cd+coalescedRead(fineData_p[sf]);
}
coarseData_p[sc] = cd;
coalescedWrite(coarseTmp_[sc](e),cd);
});
// Sum across subsecs in a second kernel
accelerator_for(sc,coarse->oSites(),vobj::Nsimd(),{
auto cd = coalescedRead(coarseTmp_p[sc](0));
for(int e=1;e<subsec;e++){
cd=cd+coalescedRead(coarseTmp_p[sc](e));
}
coalescedWrite(coarseData_p[sc],cd);
});
return;
}
@ -550,7 +601,7 @@ inline void blockOrthogonalise(Lattice<CComplex> &ip,std::vector<Lattice<vobj> >
blockOrthonormalize(ip,Basis);
}
#if 0
#ifdef GRID_ACCELERATED
// TODO: CPU optimized version here
template<class vobj,class CComplex,int nbasis>
inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
@ -576,26 +627,37 @@ inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
autoView( fineData_ , fineData, AcceleratorWrite);
autoView( coarseData_ , coarseData, AcceleratorRead);
typedef LatticeView<vobj> Vview;
std::vector<Vview> AcceleratorVecViewContainer_h;
for(int v=0;v<nbasis;v++) {
AcceleratorVecViewContainer_h.push_back(Basis[v].View(AcceleratorRead));
}
static deviceVector<Vview> AcceleratorVecViewContainer; AcceleratorVecViewContainer.resize(nbasis);
acceleratorCopyToDevice(&AcceleratorVecViewContainer_h[0],&AcceleratorVecViewContainer[0],nbasis *sizeof(Vview));
auto Basis_p = &AcceleratorVecViewContainer[0];
// Loop with a cache friendly loop ordering
accelerator_for(sf,fine->oSites(),1,{
Coordinate frdimensions=fine->_rdimensions;
Coordinate crdimensions=coarse->_rdimensions;
accelerator_for(sf,fine->oSites(),vobj::Nsimd(),{
int sc;
Coordinate coor_c(_ndimension);
Coordinate coor_f(_ndimension);
Lexicographic::CoorFromIndex(coor_f,sf,fine->_rdimensions);
Lexicographic::CoorFromIndex(coor_f,sf,frdimensions);
for(int d=0;d<_ndimension;d++) coor_c[d]=coor_f[d]/block_r[d];
Lexicographic::IndexFromCoor(coor_c,sc,coarse->_rdimensions);
Lexicographic::IndexFromCoor(coor_c,sc,crdimensions);
for(int i=0;i<nbasis;i++) {
/* auto basis_ = Basis[i], );*/
if(i==0) fineData_[sf]=coarseData_[sc](i) *basis_[sf]);
else fineData_[sf]=fineData_[sf]+coarseData_[sc](i)*basis_[sf]);
}
auto sum= coarseData_(sc)(0) *Basis_p[0](sf);
for(int i=1;i<nbasis;i++) sum = sum + coarseData_(sc)(i)*Basis_p[i](sf);
coalescedWrite(fineData_[sf],sum);
});
for(int v=0;v<nbasis;v++) {
AcceleratorVecViewContainer_h[v].ViewClose();
}
return;
}
#else
// CPU version
template<class vobj,class CComplex,int nbasis,class VLattice>
inline void blockPromote(const Lattice<iVector<CComplex,nbasis > > &coarseData,
Lattice<vobj> &fineData,
@ -682,7 +744,11 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
static const int words=sizeof(vobj)/sizeof(vector_type);
const int words=sizeof(vobj)/sizeof(vector_type);
//////////////////////////////////////////////////////////////////////////////////////////
// checks should guarantee that the operations are local
//////////////////////////////////////////////////////////////////////////////////////////
GridBase *Fg = From.Grid();
GridBase *Tg = To.Grid();
@ -698,48 +764,186 @@ void localCopyRegion(const Lattice<vobj> &From,Lattice<vobj> & To,Coordinate Fro
assert(Fg->_processors[d] == Tg->_processors[d]);
}
// the above should guarantee that the operations are local
Coordinate ldf = Fg->_ldimensions;
Coordinate rdf = Fg->_rdimensions;
Coordinate isf = Fg->_istride;
Coordinate osf = Fg->_ostride;
Coordinate rdt = Tg->_rdimensions;
Coordinate ist = Tg->_istride;
Coordinate ost = Tg->_ostride;
///////////////////////////////////////////////////////////
// do the index calc on the GPU
///////////////////////////////////////////////////////////
Coordinate f_ostride = Fg->_ostride;
Coordinate f_istride = Fg->_istride;
Coordinate f_rdimensions = Fg->_rdimensions;
Coordinate t_ostride = Tg->_ostride;
Coordinate t_istride = Tg->_istride;
Coordinate t_rdimensions = Tg->_rdimensions;
autoView( t_v , To, CpuWrite);
autoView( f_v , From, CpuRead);
thread_for(idx,Fg->lSites(),{
sobj s;
Coordinate Fcoor(nd);
Coordinate Tcoor(nd);
Lexicographic::CoorFromIndex(Fcoor,idx,ldf);
int in_region=1;
for(int d=0;d<nd;d++){
if ( (Fcoor[d] < FromLowerLeft[d]) || (Fcoor[d]>=FromLowerLeft[d]+RegionSize[d]) ){
in_region=0;
size_t nsite = 1;
for(int i=0;i<nd;i++) nsite *= RegionSize[i];
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
autoView(from_v,From,AcceleratorRead);
autoView(to_v,To,AcceleratorWrite);
accelerator_for(idx,nsite,1,{
Coordinate from_coor, to_coor, base;
Lexicographic::CoorFromIndex(base,idx,RegionSize);
for(int i=0;i<nd;i++){
from_coor[i] = base[i] + FromLowerLeft[i];
to_coor[i] = base[i] + ToLowerLeft[i];
}
Tcoor[d] = ToLowerLeft[d]+ Fcoor[d]-FromLowerLeft[d];
}
if (in_region) {
#if 0
Integer idx_f = 0; for(int d=0;d<nd;d++) idx_f+=isf[d]*(Fcoor[d]/rdf[d]); // inner index from
Integer idx_t = 0; for(int d=0;d<nd;d++) idx_t+=ist[d]*(Tcoor[d]/rdt[d]); // inner index to
Integer odx_f = 0; for(int d=0;d<nd;d++) odx_f+=osf[d]*(Fcoor[d]%rdf[d]); // outer index from
Integer odx_t = 0; for(int d=0;d<nd;d++) odx_t+=ost[d]*(Tcoor[d]%rdt[d]); // outer index to
scalar_type * fp = (scalar_type *)&f_v[odx_f];
scalar_type * tp = (scalar_type *)&t_v[odx_t];
int from_oidx = 0; for(int d=0;d<nd;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]);
int from_lane = 0; for(int d=0;d<nd;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]);
int to_oidx = 0; for(int d=0;d<nd;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]);
int to_lane = 0; for(int d=0;d<nd;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]);
const vector_type* from = (const vector_type *)&from_v[from_oidx];
vector_type* to = (vector_type *)&to_v[to_oidx];
scalar_type stmp;
for(int w=0;w<words;w++){
tp[w].putlane(fp[w].getlane(idx_f),idx_t);
stmp = getlane(from[w], from_lane);
putlane(to[w], stmp, to_lane);
}
#else
peekLocalSite(s,f_v,Fcoor);
pokeLocalSite(s,t_v,Tcoor);
#endif
}
});
}
template<class vobj>
void InsertSliceFast(const Lattice<vobj> &From,Lattice<vobj> & To,int slice, int orthog)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
const int words=sizeof(vobj)/sizeof(vector_type);
//////////////////////////////////////////////////////////////////////////////////////////
// checks should guarantee that the operations are local
//////////////////////////////////////////////////////////////////////////////////////////
GridBase *Fg = From.Grid();
GridBase *Tg = To.Grid();
assert(!Fg->_isCheckerBoarded);
assert(!Tg->_isCheckerBoarded);
int Nsimd = Fg->Nsimd();
int nF = Fg->_ndimension;
int nT = Tg->_ndimension;
assert(nF+1 == nT);
///////////////////////////////////////////////////////////
// do the index calc on the GPU
///////////////////////////////////////////////////////////
Coordinate f_ostride = Fg->_ostride;
Coordinate f_istride = Fg->_istride;
Coordinate f_rdimensions = Fg->_rdimensions;
Coordinate t_ostride = Tg->_ostride;
Coordinate t_istride = Tg->_istride;
Coordinate t_rdimensions = Tg->_rdimensions;
Coordinate RegionSize = Fg->_ldimensions;
size_t nsite = 1;
for(int i=0;i<nF;i++) nsite *= RegionSize[i]; // whole volume of lower dim grid
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
autoView(from_v,From,AcceleratorRead);
autoView(to_v,To,AcceleratorWrite);
accelerator_for(idx,nsite,1,{
Coordinate from_coor(nF), to_coor(nT);
Lexicographic::CoorFromIndex(from_coor,idx,RegionSize);
int j=0;
for(int i=0;i<nT;i++){
if ( i!=orthog ) {
to_coor[i] = from_coor[j];
j++;
} else {
to_coor[i] = slice;
}
}
int from_oidx = 0; for(int d=0;d<nF;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]);
int from_lane = 0; for(int d=0;d<nF;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]);
int to_oidx = 0; for(int d=0;d<nT;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]);
int to_lane = 0; for(int d=0;d<nT;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]);
const vector_type* from = (const vector_type *)&from_v[from_oidx];
vector_type* to = (vector_type *)&to_v[to_oidx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], from_lane);
putlane(to[w], stmp, to_lane);
}
});
}
template<class vobj>
void ExtractSliceFast(Lattice<vobj> &To,const Lattice<vobj> & From,int slice, int orthog)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
const int words=sizeof(vobj)/sizeof(vector_type);
//////////////////////////////////////////////////////////////////////////////////////////
// checks should guarantee that the operations are local
//////////////////////////////////////////////////////////////////////////////////////////
GridBase *Fg = From.Grid();
GridBase *Tg = To.Grid();
assert(!Fg->_isCheckerBoarded);
assert(!Tg->_isCheckerBoarded);
int Nsimd = Fg->Nsimd();
int nF = Fg->_ndimension;
int nT = Tg->_ndimension;
assert(nT+1 == nF);
///////////////////////////////////////////////////////////
// do the index calc on the GPU
///////////////////////////////////////////////////////////
Coordinate f_ostride = Fg->_ostride;
Coordinate f_istride = Fg->_istride;
Coordinate f_rdimensions = Fg->_rdimensions;
Coordinate t_ostride = Tg->_ostride;
Coordinate t_istride = Tg->_istride;
Coordinate t_rdimensions = Tg->_rdimensions;
Coordinate RegionSize = Tg->_ldimensions;
size_t nsite = 1;
for(int i=0;i<nT;i++) nsite *= RegionSize[i]; // whole volume of lower dim grid
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
autoView(from_v,From,AcceleratorRead);
autoView(to_v,To,AcceleratorWrite);
accelerator_for(idx,nsite,1,{
Coordinate from_coor(nF), to_coor(nT);
Lexicographic::CoorFromIndex(to_coor,idx,RegionSize);
int j=0;
for(int i=0;i<nF;i++){
if ( i!=orthog ) {
from_coor[i] = to_coor[j];
j++;
} else {
from_coor[i] = slice;
}
}
int from_oidx = 0; for(int d=0;d<nF;d++) from_oidx+=f_ostride[d]*(from_coor[d]%f_rdimensions[d]);
int from_lane = 0; for(int d=0;d<nF;d++) from_lane+=f_istride[d]*(from_coor[d]/f_rdimensions[d]);
int to_oidx = 0; for(int d=0;d<nT;d++) to_oidx+=t_ostride[d]*(to_coor[d]%t_rdimensions[d]);
int to_lane = 0; for(int d=0;d<nT;d++) to_lane+=t_istride[d]*(to_coor[d]/t_rdimensions[d]);
const vector_type* from = (const vector_type *)&from_v[from_oidx];
vector_type* to = (vector_type *)&to_v[to_oidx];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], from_lane);
putlane(to[w], stmp, to_lane);
}
});
}
template<class vobj>
void InsertSlice(const Lattice<vobj> &lowDim,Lattice<vobj> & higherDim,int slice, int orthog)
@ -829,7 +1033,7 @@ void ExtractSlice(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slic
}
//Can I implement with local copyregion??
template<class vobj>
void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
{
@ -850,61 +1054,18 @@ void InsertSliceLocal(const Lattice<vobj> &lowDim, Lattice<vobj> & higherDim,int
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
}
}
// the above should guarantee that the operations are local
autoView(lowDimv,lowDim,CpuRead);
autoView(higherDimv,higherDim,CpuWrite);
thread_for(idx,lg->lSites(),{
sobj s;
Coordinate lcoor(nl);
Coordinate hcoor(nh);
lg->LocalIndexToLocalCoor(idx,lcoor);
if( lcoor[orthog] == slice_lo ) {
hcoor=lcoor;
hcoor[orthog] = slice_hi;
peekLocalSite(s,lowDimv,lcoor);
pokeLocalSite(s,higherDimv,hcoor);
}
});
Coordinate sz = lg->_ldimensions;
sz[orthog]=1;
Coordinate f_ll(nl,0); f_ll[orthog]=slice_lo;
Coordinate t_ll(nh,0); t_ll[orthog]=slice_hi;
localCopyRegion(lowDim,higherDim,f_ll,t_ll,sz);
}
template<class vobj>
void ExtractSliceLocal(Lattice<vobj> &lowDim,const Lattice<vobj> & higherDim,int slice_lo,int slice_hi, int orthog)
{
typedef typename vobj::scalar_object sobj;
GridBase *lg = lowDim.Grid();
GridBase *hg = higherDim.Grid();
int nl = lg->_ndimension;
int nh = hg->_ndimension;
assert(nl == nh);
assert(orthog<nh);
assert(orthog>=0);
for(int d=0;d<nh;d++){
if ( d!=orthog ) {
assert(lg->_processors[d] == hg->_processors[d]);
assert(lg->_ldimensions[d] == hg->_ldimensions[d]);
}
}
// the above should guarantee that the operations are local
autoView(lowDimv,lowDim,CpuWrite);
autoView(higherDimv,higherDim,CpuRead);
thread_for(idx,lg->lSites(),{
sobj s;
Coordinate lcoor(nl);
Coordinate hcoor(nh);
lg->LocalIndexToLocalCoor(idx,lcoor);
if( lcoor[orthog] == slice_lo ) {
hcoor=lcoor;
hcoor[orthog] = slice_hi;
peekLocalSite(s,higherDimv,hcoor);
pokeLocalSite(s,lowDimv,lcoor);
}
});
InsertSliceLocal(higherDim,lowDim,slice_hi,slice_lo,orthog);
}
@ -930,7 +1091,7 @@ void Replicate(const Lattice<vobj> &coarse,Lattice<vobj> & fine)
Coordinate fcoor(nd);
Coordinate ccoor(nd);
for(int g=0;g<fg->gSites();g++){
for(int64_t g=0;g<fg->gSites();g++){
fg->GlobalIndexToGlobalCoor(g,fcoor);
for(int d=0;d<nd;d++){
@ -1616,5 +1777,35 @@ void Grid_unsplit(std::vector<Lattice<Vobj> > & full,Lattice<Vobj> & split)
}
}
//////////////////////////////////////////////////////
// Faster but less accurate blockProject
//////////////////////////////////////////////////////
template<class vobj,class CComplex,int nbasis,class VLattice>
inline void blockProjectFast(Lattice<iVector<CComplex,nbasis > > &coarseData,
const Lattice<vobj> &fineData,
const VLattice &Basis)
{
GridBase * fine = fineData.Grid();
GridBase * coarse= coarseData.Grid();
Lattice<iScalar<CComplex> > ip(coarse);
autoView( coarseData_ , coarseData, AcceleratorWrite);
autoView( ip_ , ip, AcceleratorWrite);
RealD t_IP=0;
RealD t_co=0;
for(int v=0;v<nbasis;v++) {
t_IP-=usecond();
blockInnerProductD(ip,Basis[v],fineData);
t_IP+=usecond();
t_co-=usecond();
accelerator_for( sc, coarse->oSites(), vobj::Nsimd(), {
convertType(coarseData_[sc](v),ip_[sc]);
});
t_co+=usecond();
}
}
NAMESPACE_END(Grid);

View File

@ -26,14 +26,214 @@ Author: Peter Boyle pboyle@bnl.gov
/* END LEGAL */
#pragma once
#include<Grid/cshift/Cshift.h>
NAMESPACE_BEGIN(Grid);
//Allow the user to specify how the C-shift is performed, e.g. to respect the appropriate boundary conditions
template<typename vobj>
struct CshiftImplBase{
virtual Lattice<vobj> Cshift(const Lattice<vobj> &in, int dir, int shift) const = 0;
virtual ~CshiftImplBase(){}
};
template<typename vobj>
struct CshiftImplDefault: public CshiftImplBase<vobj>{
Lattice<vobj> Cshift(const Lattice<vobj> &in, int dir, int shift) const override{ return Grid::Cshift(in,dir,shift); }
};
template<typename Gimpl>
struct CshiftImplGauge: public CshiftImplBase<typename Gimpl::GaugeLinkField::vector_object>{
typename Gimpl::GaugeLinkField Cshift(const typename Gimpl::GaugeLinkField &in, int dir, int shift) const override{ return Gimpl::CshiftLink(in,dir,shift); }
};
/*
*
* TODO:
* -- address elementsof vobj via thread block in Scatter/Gather
* -- overlap comms with motion in Face_exchange
*
*/
template<class vobj> inline void ScatterSlice(const cshiftVector<vobj> &buf,
Lattice<vobj> &lat,
int x,
int dim,
int offset=0)
{
const int Nsimd=vobj::Nsimd();
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
GridBase *grid = lat.Grid();
Coordinate simd = grid->_simd_layout;
int Nd = grid->Nd();
int block = grid->_slice_block[dim];
int stride = grid->_slice_stride[dim];
int nblock = grid->_slice_nblock[dim];
int rd = grid->_rdimensions[dim];
int ox = x%rd;
int ix = x/rd;
int isites = 1; for(int d=0;d<Nd;d++) if( d!=dim) isites*=simd[d];
Coordinate rsimd= simd; rsimd[dim]=1; // maybe reduce Nsimd
int rNsimd = 1; for(int d=0;d<Nd;d++) rNsimd*=rsimd[d];
int rNsimda= Nsimd/simd[dim]; // should be equal
assert(rNsimda==rNsimd);
int face_ovol=block*nblock;
// assert(buf.size()==face_ovol*rNsimd);
/*This will work GPU ONLY unless rNsimd is put in the lexico index*/
//Let's make it work on GPU and then make a special accelerator_for that
//doesn't hide the SIMD direction and keeps explicit in the threadIdx
//for cross platform
// FIXME -- can put internal indices into thread loop
auto buf_p = & buf[0];
autoView(lat_v, lat, AcceleratorWrite);
accelerator_for(ss, face_ovol/simd[dim],Nsimd,{
// scalar layout won't coalesce
#ifdef GRID_SIMT
{
int blane=acceleratorSIMTlane(Nsimd); // buffer lane
#else
for(int blane=0;blane<Nsimd;blane++) {
#endif
int olane=blane%rNsimd; // reduced lattice lane
int obit =blane/rNsimd;
///////////////////////////////////////////////////////////////
// osite -- potentially one bit from simd in the buffer: (ss<<1)|obit
///////////////////////////////////////////////////////////////
int ssp = ss*simd[dim]+obit;
int b = ssp%block;
int n = ssp/block;
int osite= b+n*stride + ox*block;
////////////////////////////////////////////
// isite -- map lane within buffer to lane within lattice
////////////////////////////////////////////
Coordinate icoor;
int lane;
Lexicographic::CoorFromIndex(icoor,olane,rsimd);
icoor[dim]=ix;
Lexicographic::IndexFromCoor(icoor,lane,simd);
///////////////////////////////////////////
// Transfer into lattice - will coalesce
///////////////////////////////////////////
// sobj obj = extractLane(blane,buf_p[ss+offset]);
// insertLane(lane,lat_v[osite],obj);
const int words=sizeof(vobj)/sizeof(vector_type);
vector_type * from = (vector_type *)&buf_p[ss+offset];
vector_type * to = (vector_type *)&lat_v[osite];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], blane);
putlane(to[w], stmp, lane);
}
}
});
}
template<class vobj> inline void GatherSlice(cshiftVector<vobj> &buf,
const Lattice<vobj> &lat,
int x,
int dim,
int offset=0)
{
const int Nsimd=vobj::Nsimd();
typedef typename vobj::scalar_object sobj;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::vector_type vector_type;
autoView(lat_v, lat, AcceleratorRead);
GridBase *grid = lat.Grid();
Coordinate simd = grid->_simd_layout;
int Nd = grid->Nd();
int block = grid->_slice_block[dim];
int stride = grid->_slice_stride[dim];
int nblock = grid->_slice_nblock[dim];
int rd = grid->_rdimensions[dim];
int ox = x%rd;
int ix = x/rd;
int isites = 1; for(int d=0;d<Nd;d++) if( d!=dim) isites*=simd[d];
Coordinate rsimd= simd; rsimd[dim]=1; // maybe reduce Nsimd
int rNsimd = 1; for(int d=0;d<Nd;d++) rNsimd*=rsimd[d];
int face_ovol=block*nblock;
// assert(buf.size()==face_ovol*rNsimd);
/*This will work GPU ONLY unless rNsimd is put in the lexico index*/
//Let's make it work on GPU and then make a special accelerator_for that
//doesn't hide the SIMD direction and keeps explicit in the threadIdx
//for cross platform
//For CPU perhaps just run a loop over Nsimd
auto buf_p = & buf[0];
accelerator_for(ss, face_ovol/simd[dim],Nsimd,{
// scalar layout won't coalesce
#ifdef GRID_SIMT
{
int blane=acceleratorSIMTlane(Nsimd); // buffer lane
#else
for(int blane=0;blane<Nsimd;blane++) {
#endif
int olane=blane%rNsimd; // reduced lattice lane
int obit =blane/rNsimd;
////////////////////////////////////////////
// osite
////////////////////////////////////////////
int ssp = ss*simd[dim]+obit;
int b = ssp%block;
int n = ssp/block;
int osite= b+n*stride + ox*block;
////////////////////////////////////////////
// isite -- map lane within buffer to lane within lattice
////////////////////////////////////////////
Coordinate icoor;
int lane;
Lexicographic::CoorFromIndex(icoor,olane,rsimd);
icoor[dim]=ix;
Lexicographic::IndexFromCoor(icoor,lane,simd);
///////////////////////////////////////////
// Take out of lattice
///////////////////////////////////////////
// sobj obj = extractLane(lane,lat_v[osite]);
// insertLane(blane,buf_p[ss+offset],obj);
const int words=sizeof(vobj)/sizeof(vector_type);
vector_type * to = (vector_type *)&buf_p[ss+offset];
vector_type * from = (vector_type *)&lat_v[osite];
scalar_type stmp;
for(int w=0;w<words;w++){
stmp = getlane(from[w], lane);
putlane(to[w], stmp, blane);
}
}
});
}
class PaddedCell {
public:
GridCartesian * unpadded_grid;
int dims;
int depth;
std::vector<GridCartesian *> grids;
~PaddedCell()
{
DeleteGrids();
@ -45,14 +245,18 @@ public:
dims=_grid->Nd();
AllocateGrids();
Coordinate local =unpadded_grid->LocalDimensions();
Coordinate procs =unpadded_grid->ProcessorGrid();
for(int d=0;d<dims;d++){
assert(local[d]>=depth);
if ( procs[d] > 1 ) assert(local[d]>=depth);
}
}
void DeleteGrids(void)
{
Coordinate processors=unpadded_grid->_processors;
for(int d=0;d<grids.size();d++){
delete grids[d];
if ( processors[d] > 1 ) {
delete grids[d];
}
}
grids.resize(0);
};
@ -63,45 +267,66 @@ public:
Coordinate processors=unpadded_grid->_processors;
Coordinate plocal =unpadded_grid->LocalDimensions();
Coordinate global(dims);
GridCartesian *old_grid = unpadded_grid;
// expand up one dim at a time
for(int d=0;d<dims;d++){
plocal[d] += 2*depth;
if ( processors[d] > 1 ) {
plocal[d] += 2*depth;
for(int d=0;d<dims;d++){
global[d] = plocal[d]*processors[d];
}
for(int d=0;d<dims;d++){
global[d] = plocal[d]*processors[d];
old_grid = new GridCartesian(global,simd,processors);
}
grids.push_back(new GridCartesian(global,simd,processors));
grids.push_back(old_grid);
}
};
template<class vobj>
inline Lattice<vobj> Extract(Lattice<vobj> &in)
inline Lattice<vobj> Extract(const Lattice<vobj> &in) const
{
Coordinate processors=unpadded_grid->_processors;
Lattice<vobj> out(unpadded_grid);
Coordinate local =unpadded_grid->LocalDimensions();
Coordinate fll(dims,depth); // depends on the MPI spread
// depends on the MPI spread
Coordinate fll(dims,depth);
Coordinate tll(dims,0); // depends on the MPI spread
for(int d=0;d<dims;d++){
if( processors[d]==1 ) fll[d]=0;
}
localCopyRegion(in,out,fll,tll,local);
return out;
}
template<class vobj>
inline Lattice<vobj> Exchange(Lattice<vobj> &in)
inline Lattice<vobj> Exchange(const Lattice<vobj> &in, const CshiftImplBase<vobj> &cshift = CshiftImplDefault<vobj>()) const
{
GridBase *old_grid = in.Grid();
int dims = old_grid->Nd();
Lattice<vobj> tmp = in;
for(int d=0;d<dims;d++){
tmp = Expand(d,tmp); // rvalue && assignment
tmp = Expand(d,tmp,cshift); // rvalue && assignment
}
return tmp;
}
template<class vobj>
inline Lattice<vobj> ExchangePeriodic(const Lattice<vobj> &in) const
{
GridBase *old_grid = in.Grid();
int dims = old_grid->Nd();
Lattice<vobj> tmp = in;
for(int d=0;d<dims;d++){
tmp = ExpandPeriodic(d,tmp); // rvalue && assignment
}
return tmp;
}
// expand up one dim at a time
template<class vobj>
inline Lattice<vobj> Expand(int dim,Lattice<vobj> &in)
inline Lattice<vobj> Expand(int dim, const Lattice<vobj> &in, const CshiftImplBase<vobj> &cshift = CshiftImplDefault<vobj>()) const
{
Coordinate processors=unpadded_grid->_processors;
GridBase *old_grid = in.Grid();
GridCartesian *new_grid = grids[dim];//These are new grids
Lattice<vobj> padded(new_grid);
@ -111,26 +336,236 @@ public:
if(dim==0) conformable(old_grid,unpadded_grid);
else conformable(old_grid,grids[dim-1]);
std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl;
// Middle bit
for(int x=0;x<local[dim];x++){
InsertSliceLocal(in,padded,x,depth+x,dim);
}
// High bit
shifted = Cshift(in,dim,depth);
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim);
}
// Low bit
shifted = Cshift(in,dim,-depth);
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,x,x,dim);
double tins=0, tshift=0;
int islocal = 0 ;
if ( processors[dim] == 1 ) islocal = 1;
if ( islocal ) {
// replace with a copy and maybe grid swizzle
// return in;??
double t = usecond();
padded = in;
tins += usecond() - t;
} else {
//////////////////////////////////////////////
// Replace sequence with
// ---------------------
// (i) Gather high face(s); start comms
// (ii) Gather low face(s); start comms
// (iii) Copy middle bit with localCopyRegion
// (iv) Complete high face(s), insert slice(s)
// (iv) Complete low face(s), insert slice(s)
//////////////////////////////////////////////
// Middle bit
double t = usecond();
for(int x=0;x<local[dim];x++){
InsertSliceLocal(in,padded,x,depth+x,dim);
}
tins += usecond() - t;
// High bit
t = usecond();
shifted = cshift.Cshift(in,dim,depth);
tshift += usecond() - t;
t=usecond();
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,local[dim]-depth+x,depth+local[dim]+x,dim);
}
tins += usecond() - t;
// Low bit
t = usecond();
shifted = cshift.Cshift(in,dim,-depth);
tshift += usecond() - t;
t = usecond();
for(int x=0;x<depth;x++){
InsertSliceLocal(shifted,padded,x,x,dim);
}
tins += usecond() - t;
}
std::cout << GridLogPerformance << "PaddedCell::Expand timings: cshift:" << tshift/1000 << "ms, insert-slice:" << tins/1000 << "ms" << std::endl;
return padded;
}
template<class vobj>
inline Lattice<vobj> ExpandPeriodic(int dim, const Lattice<vobj> &in) const
{
Coordinate processors=unpadded_grid->_processors;
GridBase *old_grid = in.Grid();
GridCartesian *new_grid = grids[dim];//These are new grids
Lattice<vobj> padded(new_grid);
// Lattice<vobj> shifted(old_grid);
Coordinate local =old_grid->LocalDimensions();
Coordinate plocal =new_grid->LocalDimensions();
if(dim==0) conformable(old_grid,unpadded_grid);
else conformable(old_grid,grids[dim-1]);
// std::cout << " dim "<<dim<<" local "<<local << " padding to "<<plocal<<std::endl;
double tins=0, tshift=0;
int islocal = 0 ;
if ( processors[dim] == 1 ) islocal = 1;
if ( islocal ) {
padded=in; // slightly different interface could avoid a copy operation
} else {
Face_exchange(in,padded,dim,depth);
return padded;
}
return padded;
}
template<class vobj>
void Face_exchange(const Lattice<vobj> &from,
Lattice<vobj> &to,
int dimension,int depth) const
{
typedef typename vobj::vector_type vector_type;
typedef typename vobj::scalar_type scalar_type;
typedef typename vobj::scalar_object sobj;
RealD t_gather=0.0;
RealD t_scatter=0.0;
RealD t_comms=0.0;
RealD t_copy=0.0;
// std::cout << GridLogMessage << "dimension " <<dimension<<std::endl;
// DumpSliceNorm(std::string("Face_exchange from"),from,dimension);
GridBase *grid=from.Grid();
GridBase *new_grid=to.Grid();
Coordinate lds = from.Grid()->_ldimensions;
Coordinate nlds= to.Grid()->_ldimensions;
Coordinate simd= from.Grid()->_simd_layout;
int ld = lds[dimension];
int nld = to.Grid()->_ldimensions[dimension];
const int Nsimd = vobj::Nsimd();
assert(depth<=lds[dimension]); // A must be on neighbouring node
assert(depth>0); // A caller bug if zero
assert(ld+2*depth==nld);
////////////////////////////////////////////////////////////////////////////
// Face size and byte calculations
////////////////////////////////////////////////////////////////////////////
int buffer_size = 1;
for(int d=0;d<lds.size();d++){
if ( d!= dimension) buffer_size=buffer_size*lds[d];
}
buffer_size = buffer_size / Nsimd;
int rNsimd = Nsimd / simd[dimension];
assert( buffer_size == from.Grid()->_slice_nblock[dimension]*from.Grid()->_slice_block[dimension] / simd[dimension]);
static cshiftVector<vobj> send_buf;
static cshiftVector<vobj> recv_buf;
send_buf.resize(buffer_size*2*depth);
recv_buf.resize(buffer_size*2*depth);
std::vector<CommsRequest_t> fwd_req;
std::vector<CommsRequest_t> bwd_req;
int words = buffer_size;
int bytes = words * sizeof(vobj);
////////////////////////////////////////////////////////////////////////////
// Communication coords
////////////////////////////////////////////////////////////////////////////
int comm_proc = 1;
int xmit_to_rank;
int recv_from_rank;
grid->ShiftedRanks(dimension,comm_proc,xmit_to_rank,recv_from_rank);
////////////////////////////////////////////////////////////////////////////
// Gather all surface terms up to depth "d"
////////////////////////////////////////////////////////////////////////////
RealD t;
RealD t_tot=-usecond();
int plane=0;
for ( int d=0;d < depth ; d ++ ) {
int tag = d*1024 + dimension*2+0;
t=usecond();
GatherSlice(send_buf,from,d,dimension,plane*buffer_size); plane++;
t_gather+=usecond()-t;
t=usecond();
grid->SendToRecvFromBegin(fwd_req,
(void *)&send_buf[d*buffer_size], xmit_to_rank,
(void *)&recv_buf[d*buffer_size], recv_from_rank, bytes, tag);
t_comms+=usecond()-t;
}
for ( int d=0;d < depth ; d ++ ) {
int tag = d*1024 + dimension*2+1;
t=usecond();
GatherSlice(send_buf,from,ld-depth+d,dimension,plane*buffer_size); plane++;
t_gather+= usecond() - t;
t=usecond();
grid->SendToRecvFromBegin(bwd_req,
(void *)&send_buf[(d+depth)*buffer_size], recv_from_rank,
(void *)&recv_buf[(d+depth)*buffer_size], xmit_to_rank, bytes,tag);
t_comms+=usecond()-t;
}
////////////////////////////////////////////////////////////////////////////
// Copy interior -- overlap this with comms
////////////////////////////////////////////////////////////////////////////
int Nd = new_grid->Nd();
Coordinate LL(Nd,0);
Coordinate sz = grid->_ldimensions;
Coordinate toLL(Nd,0);
toLL[dimension]=depth;
t=usecond();
localCopyRegion(from,to,LL,toLL,sz);
t_copy= usecond() - t;
////////////////////////////////////////////////////////////////////////////
// Scatter all faces
////////////////////////////////////////////////////////////////////////////
plane=0;
t=usecond();
grid->CommsComplete(fwd_req);
t_comms+= usecond() - t;
t=usecond();
for ( int d=0;d < depth ; d ++ ) {
ScatterSlice(recv_buf,to,nld-depth+d,dimension,plane*buffer_size); plane++;
}
t_scatter= usecond() - t;
t=usecond();
grid->CommsComplete(bwd_req);
t_comms+= usecond() - t;
t=usecond();
for ( int d=0;d < depth ; d ++ ) {
ScatterSlice(recv_buf,to,d,dimension,plane*buffer_size); plane++;
}
t_scatter+= usecond() - t;
t_tot+=usecond();
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: gather :" << t_gather/1000 << "ms"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: scatter:" << t_scatter/1000 << "ms"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: copy :" << t_copy/1000 << "ms"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: comms :" << t_comms/1000 << "ms"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: total :" << t_tot/1000 << "ms"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: gather :" << depth*4.0*bytes/t_gather << "MB/s"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: scatter:" << depth*4.0*bytes/t_scatter<< "MB/s"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: comms :" << (RealD)4.0*bytes/t_comms << "MB/s"<<std::endl;
std::cout << GridLogPerformance << "PaddedCell::Expand new timings: face bytes :" << depth*bytes/1e6 << "MB"<<std::endl;
}
};
NAMESPACE_END(Grid);

View File

@ -165,7 +165,7 @@ class BinaryIO {
* FIXME -- 128^3 x 256 x 16 will overflow.
*/
int global_site;
int64_t global_site;
Lexicographic::CoorFromIndex(coor,local_site,local_vol);
@ -175,8 +175,8 @@ class BinaryIO {
Lexicographic::IndexFromCoor(coor,global_site,global_vol);
uint32_t gsite29 = global_site%29;
uint32_t gsite31 = global_site%31;
uint64_t gsite29 = global_site%29;
uint64_t gsite31 = global_site%31;
site_crc = crc32(0,(unsigned char *)site_buf,sizeof(fobj));
// std::cout << "Site "<<local_site << " crc "<<std::hex<<site_crc<<std::dec<<std::endl;
@ -545,7 +545,9 @@ class BinaryIO {
const std::string &format,
uint32_t &nersc_csum,
uint32_t &scidac_csuma,
uint32_t &scidac_csumb)
uint32_t &scidac_csumb,
int control=BINARYIO_LEXICOGRAPHIC
)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::Realified::scalar_type word; word w=0;
@ -556,7 +558,7 @@ class BinaryIO {
std::vector<sobj> scalardata(lsites);
std::vector<fobj> iodata(lsites); // Munge, checksum, byte order in here
IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC,
IOobject(w,grid,iodata,file,offset,format,BINARYIO_READ|control,
nersc_csum,scidac_csuma,scidac_csumb);
GridStopWatch timer;
@ -582,7 +584,8 @@ class BinaryIO {
const std::string &format,
uint32_t &nersc_csum,
uint32_t &scidac_csuma,
uint32_t &scidac_csumb)
uint32_t &scidac_csumb,
int control=BINARYIO_LEXICOGRAPHIC)
{
typedef typename vobj::scalar_object sobj;
typedef typename vobj::Realified::scalar_type word; word w=0;
@ -607,7 +610,7 @@ class BinaryIO {
while (attemptsLeft >= 0)
{
grid->Barrier();
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|BINARYIO_LEXICOGRAPHIC,
IOobject(w,grid,iodata,file,offset,format,BINARYIO_WRITE|control,
nersc_csum,scidac_csuma,scidac_csumb);
if (checkWrite)
{
@ -617,7 +620,7 @@ class BinaryIO {
std::cout << GridLogMessage << "writeLatticeObject: read back object" << std::endl;
grid->Barrier();
IOobject(w,grid,ckiodata,file,ckoffset,format,BINARYIO_READ|BINARYIO_LEXICOGRAPHIC,
IOobject(w,grid,ckiodata,file,ckoffset,format,BINARYIO_READ|control,
cknersc_csum,ckscidac_csuma,ckscidac_csumb);
if ((cknersc_csum != nersc_csum) or (ckscidac_csuma != scidac_csuma) or (ckscidac_csumb != scidac_csumb))
{

View File

@ -162,8 +162,14 @@ template<class vobj> void ScidacMetaData(Lattice<vobj> & field,
{
uint32_t scidac_checksuma = stoull(scidacChecksum_.suma,0,16);
uint32_t scidac_checksumb = stoull(scidacChecksum_.sumb,0,16);
if ( scidac_csuma !=scidac_checksuma) return 0;
if ( scidac_csumb !=scidac_checksumb) return 0;
std::cout << GridLogMessage << " scidacChecksumVerify computed "<<scidac_csuma<<" expected "<<scidac_checksuma <<std::endl;
std::cout << GridLogMessage << " scidacChecksumVerify computed "<<scidac_csumb<<" expected "<<scidac_checksumb <<std::endl;
if ( scidac_csuma !=scidac_checksuma) {
return 0;
};
if ( scidac_csumb !=scidac_checksumb) {
return 0;
};
return 1;
}
@ -206,7 +212,7 @@ class GridLimeReader : public BinaryIO {
// Read a generic lattice field and verify checksum
////////////////////////////////////////////
template<class vobj>
void readLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name)
void readLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name,int control=BINARYIO_LEXICOGRAPHIC)
{
typedef typename vobj::scalar_object sobj;
scidacChecksum scidacChecksum_;
@ -238,7 +244,7 @@ class GridLimeReader : public BinaryIO {
uint64_t offset= ftello(File);
// std::cout << " ReadLatticeObject from offset "<<offset << std::endl;
BinarySimpleMunger<sobj,sobj> munge;
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb);
BinaryIO::readLatticeObject< vobj, sobj >(field, filename, munge, offset, format,nersc_csum,scidac_csuma,scidac_csumb,control);
std::cout << GridLogMessage << "SciDAC checksum A " << std::hex << scidac_csuma << std::dec << std::endl;
std::cout << GridLogMessage << "SciDAC checksum B " << std::hex << scidac_csumb << std::dec << std::endl;
/////////////////////////////////////////////
@ -408,7 +414,7 @@ class GridLimeWriter : public BinaryIO
// in communicator used by the field.Grid()
////////////////////////////////////////////////////
template<class vobj>
void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name)
void writeLimeLatticeBinaryObject(Lattice<vobj> &field,std::string record_name,int control=BINARYIO_LEXICOGRAPHIC)
{
////////////////////////////////////////////////////////////////////
// NB: FILE and iostream are jointly writing disjoint sequences in the
@ -459,7 +465,7 @@ class GridLimeWriter : public BinaryIO
///////////////////////////////////////////
std::string format = getFormatString<vobj>();
BinarySimpleMunger<sobj,sobj> munge;
BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset1, format,nersc_csum,scidac_csuma,scidac_csumb);
BinaryIO::writeLatticeObject<vobj,sobj>(field, filename, munge, offset1, format,nersc_csum,scidac_csuma,scidac_csumb,control);
///////////////////////////////////////////
// Wind forward and close the record
@ -512,7 +518,8 @@ class ScidacWriter : public GridLimeWriter {
////////////////////////////////////////////////
template <class vobj, class userRecord>
void writeScidacFieldRecord(Lattice<vobj> &field,userRecord _userRecord,
const unsigned int recordScientificPrec = 0)
const unsigned int recordScientificPrec = 0,
int control=BINARYIO_LEXICOGRAPHIC)
{
GridBase * grid = field.Grid();
@ -534,7 +541,7 @@ class ScidacWriter : public GridLimeWriter {
writeLimeObject(0,0,_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
}
// Collective call
writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA)); // Closes message with checksum
writeLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA),control); // Closes message with checksum
}
};
@ -553,7 +560,8 @@ class ScidacReader : public GridLimeReader {
// Write generic lattice field in scidac format
////////////////////////////////////////////////
template <class vobj, class userRecord>
void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord)
void readScidacFieldRecord(Lattice<vobj> &field,userRecord &_userRecord,
int control=BINARYIO_LEXICOGRAPHIC)
{
typedef typename vobj::scalar_object sobj;
GridBase * grid = field.Grid();
@ -571,7 +579,7 @@ class ScidacReader : public GridLimeReader {
readLimeObject(header ,std::string("FieldMetaData"),std::string(GRID_FORMAT)); // Open message
readLimeObject(_userRecord,_userRecord.SerialisableClassName(),std::string(SCIDAC_RECORD_XML));
readLimeObject(_scidacRecord,_scidacRecord.SerialisableClassName(),std::string(SCIDAC_PRIVATE_RECORD_XML));
readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA));
readLimeLatticeBinaryObject(field,std::string(ILDG_BINARY_DATA),control);
}
void skipPastBinaryRecord(void) {
std::string rec_name(ILDG_BINARY_DATA);

View File

@ -126,6 +126,16 @@ typedef WilsonFermion<WilsonTwoIndexSymmetricImplD> WilsonTwoIndexSymmetricFermi
typedef WilsonFermion<WilsonTwoIndexAntiSymmetricImplF> WilsonTwoIndexAntiSymmetricFermionF;
typedef WilsonFermion<WilsonTwoIndexAntiSymmetricImplD> WilsonTwoIndexAntiSymmetricFermionD;
// Sp(2n)
typedef WilsonFermion<SpWilsonImplF> SpWilsonFermionF;
typedef WilsonFermion<SpWilsonImplD> SpWilsonFermionD;
typedef WilsonFermion<SpWilsonTwoIndexAntiSymmetricImplF> SpWilsonTwoIndexAntiSymmetricFermionF;
typedef WilsonFermion<SpWilsonTwoIndexAntiSymmetricImplD> SpWilsonTwoIndexAntiSymmetricFermionD;
typedef WilsonFermion<SpWilsonTwoIndexSymmetricImplF> SpWilsonTwoIndexSymmetricFermionF;
typedef WilsonFermion<SpWilsonTwoIndexSymmetricImplD> SpWilsonTwoIndexSymmetricFermionD;
// Twisted mass fermion
typedef WilsonTMFermion<WilsonImplD2> WilsonTMFermionD2;
typedef WilsonTMFermion<WilsonImplF> WilsonTMFermionF;

View File

@ -261,6 +261,22 @@ typedef WilsonImpl<vComplex, TwoIndexAntiSymmetricRepresentation, CoeffReal > W
typedef WilsonImpl<vComplexF, TwoIndexAntiSymmetricRepresentation, CoeffReal > WilsonTwoIndexAntiSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, TwoIndexAntiSymmetricRepresentation, CoeffReal > WilsonTwoIndexAntiSymmetricImplD; // Double
//sp 2n
typedef WilsonImpl<vComplex, SpFundamentalRepresentation, CoeffReal > SpWilsonImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpFundamentalRepresentation, CoeffReal > SpWilsonImplF; // Float
typedef WilsonImpl<vComplexD, SpFundamentalRepresentation, CoeffReal > SpWilsonImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, SpTwoIndexAntiSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexAntiSymmetricImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplR; // Real.. whichever prec
typedef WilsonImpl<vComplexF, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplF; // Float
typedef WilsonImpl<vComplexD, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonTwoIndexSymmetricImplD; // Double
typedef WilsonImpl<vComplex, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplR; // Real.. whichever prec // adj = 2indx symmetric for Sp(2N)
typedef WilsonImpl<vComplexF, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplF; // Float // adj = 2indx symmetric for Sp(2N)
typedef WilsonImpl<vComplexD, SpTwoIndexSymmetricRepresentation, CoeffReal > SpWilsonAdjImplD; // Double // adj = 2indx symmetric for Sp(2N)
NAMESPACE_END(Grid);

View File

@ -0,0 +1 @@
../WilsonCloverFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonTMFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonImplD

View File

@ -0,0 +1 @@
../WilsonCloverFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonKernelsInstantiation.cc.master

View File

@ -0,0 +1 @@
../WilsonTMFermionInstantiation.cc.master

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonImplF

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexAntiSymmetricImplD

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexAntiSymmetricImplF

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexSymmetricImplD

View File

@ -0,0 +1 @@
#define IMPLEMENTATION SpWilsonTwoIndexSymmetricImplF

View File

@ -10,12 +10,18 @@ WILSON_IMPL_LIST=" \
WilsonImplF \
WilsonImplD \
WilsonImplD2 \
SpWilsonImplF \
SpWilsonImplD \
WilsonAdjImplF \
WilsonAdjImplD \
WilsonTwoIndexSymmetricImplF \
WilsonTwoIndexSymmetricImplD \
WilsonTwoIndexAntiSymmetricImplF \
WilsonTwoIndexAntiSymmetricImplD \
SpWilsonTwoIndexAntiSymmetricImplF \
SpWilsonTwoIndexAntiSymmetricImplD \
SpWilsonTwoIndexSymmetricImplF \
SpWilsonTwoIndexSymmetricImplD \
GparityWilsonImplF \
GparityWilsonImplD "

View File

@ -39,6 +39,9 @@ NAMESPACE_BEGIN(Grid);
typedef WilsonGaugeAction<PeriodicGimplR> WilsonGaugeActionR;
typedef WilsonGaugeAction<PeriodicGimplF> WilsonGaugeActionF;
typedef WilsonGaugeAction<PeriodicGimplD> WilsonGaugeActionD;
typedef WilsonGaugeAction<SpPeriodicGimplR> SpWilsonGaugeActionR;
typedef WilsonGaugeAction<SpPeriodicGimplF> SpWilsonGaugeActionF;
typedef WilsonGaugeAction<SpPeriodicGimplD> SpWilsonGaugeActionD;
typedef PlaqPlusRectangleAction<PeriodicGimplR> PlaqPlusRectangleActionR;
typedef PlaqPlusRectangleAction<PeriodicGimplF> PlaqPlusRectangleActionF;
typedef PlaqPlusRectangleAction<PeriodicGimplD> PlaqPlusRectangleActionD;

View File

@ -61,7 +61,7 @@ NAMESPACE_BEGIN(Grid);
typedef typename Impl::Field Field;
// hardcodes the exponential approximation in the template
template <class S, int Nrepresentation = Nc, int Nexp = 12 > class GaugeImplTypes {
template <class S, int Nrepresentation = Nc, int Nexp = 12, class Group = SU<Nc> > class GaugeImplTypes {
public:
typedef S Simd;
typedef typename Simd::scalar_type scalar_type;
@ -78,8 +78,6 @@ public:
typedef Lattice<SiteLink> LinkField;
typedef Lattice<SiteField> Field;
typedef SU<Nrepresentation> Group;
// Guido: we can probably separate the types from the HMC functions
// this will create 2 kind of implementations
// probably confusing the users
@ -119,6 +117,7 @@ public:
//
LinkField Pmu(P.Grid());
Pmu = Zero();
for (int mu = 0; mu < Nd; mu++) {
Group::GaussianFundamentalLieAlgebraMatrix(pRNG, Pmu);
RealD scale = ::sqrt(HMC_MOMENTUM_DENOMINATOR) ;
@ -126,8 +125,12 @@ public:
PokeIndex<LorentzIndex>(P, Pmu, mu);
}
}
static inline Field projectForce(Field &P) { return Ta(P); }
static inline Field projectForce(Field &P) {
Field ret(P.Grid());
Group::taProj(P, ret);
return ret;
}
static inline void update_field(Field& P, Field& U, double ep){
//static std::chrono::duration<double> diff;
@ -137,14 +140,15 @@ public:
autoView(P_v,P,AcceleratorRead);
accelerator_for(ss, P.Grid()->oSites(),1,{
for (int mu = 0; mu < Nd; mu++) {
U_v[ss](mu) = ProjectOnGroup(Exponentiate(P_v[ss](mu), ep, Nexp) * U_v[ss](mu));
U_v[ss](mu) = Exponentiate(P_v[ss](mu), ep, Nexp) * U_v[ss](mu);
U_v[ss](mu) = Group::ProjectOnGeneralGroup(U_v[ss](mu));
}
});
//auto end = std::chrono::high_resolution_clock::now();
// diff += end - start;
// std::cout << "Time to exponentiate matrix " << diff.count() << " s\n";
}
static inline RealD FieldSquareNorm(Field& U){
LatticeComplex Hloc(U.Grid());
Hloc = Zero();
@ -157,7 +161,7 @@ public:
}
static inline void Project(Field &U) {
ProjectSUn(U);
Group::ProjectOnSpecialGroup(U);
}
static inline void HotConfiguration(GridParallelRNG &pRNG, Field &U) {
@ -171,6 +175,7 @@ public:
static inline void ColdConfiguration(GridParallelRNG &pRNG, Field &U) {
Group::ColdConfiguration(pRNG, U);
}
};
@ -178,10 +183,17 @@ typedef GaugeImplTypes<vComplex, Nc> GimplTypesR;
typedef GaugeImplTypes<vComplexF, Nc> GimplTypesF;
typedef GaugeImplTypes<vComplexD, Nc> GimplTypesD;
typedef GaugeImplTypes<vComplex, Nc, 12, Sp<Nc> > SpGimplTypesR;
typedef GaugeImplTypes<vComplexF, Nc, 12, Sp<Nc> > SpGimplTypesF;
typedef GaugeImplTypes<vComplexD, Nc, 12, Sp<Nc> > SpGimplTypesD;
typedef GaugeImplTypes<vComplex, SU<Nc>::AdjointDimension> GimplAdjointTypesR;
typedef GaugeImplTypes<vComplexF, SU<Nc>::AdjointDimension> GimplAdjointTypesF;
typedef GaugeImplTypes<vComplexD, SU<Nc>::AdjointDimension> GimplAdjointTypesD;
NAMESPACE_END(Grid);
#endif // GRID_GAUGE_IMPL_TYPES_H

View File

@ -176,7 +176,7 @@ public:
return PeriodicBC::CshiftLink(Link,mu,shift);
}
static inline void setDirections(std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
static inline void setDirections(const std::vector<int> &conjDirs) { _conjDirs=conjDirs; }
static inline std::vector<int> getDirections(void) { return _conjDirs; }
static inline bool isPeriodicGaugeField(void) { return false; }
};
@ -193,6 +193,11 @@ typedef ConjugateGaugeImpl<GimplTypesR> ConjugateGimplR; // Real.. whichever pre
typedef ConjugateGaugeImpl<GimplTypesF> ConjugateGimplF; // Float
typedef ConjugateGaugeImpl<GimplTypesD> ConjugateGimplD; // Double
typedef PeriodicGaugeImpl<SpGimplTypesR> SpPeriodicGimplR; // Real.. whichever prec
typedef PeriodicGaugeImpl<SpGimplTypesF> SpPeriodicGimplF; // Float
typedef PeriodicGaugeImpl<SpGimplTypesD> SpPeriodicGimplD; // Double
NAMESPACE_END(Grid);
#endif

View File

@ -43,7 +43,7 @@ public:
private:
RealD c_plaq;
RealD c_rect;
typename WilsonLoops<Gimpl>::StapleAndRectStapleAllWorkspace workspace;
public:
PlaqPlusRectangleAction(RealD b,RealD c): c_plaq(b),c_rect(c){};
@ -79,27 +79,18 @@ public:
GridBase *grid = Umu.Grid();
std::vector<GaugeLinkField> U (Nd,grid);
std::vector<GaugeLinkField> U2(Nd,grid);
for(int mu=0;mu<Nd;mu++){
U[mu] = PeekIndex<LorentzIndex>(Umu,mu);
WilsonLoops<Gimpl>::RectStapleDouble(U2[mu],U[mu],mu);
}
std::vector<GaugeLinkField> RectStaple(Nd,grid), Staple(Nd,grid);
WilsonLoops<Gimpl>::StapleAndRectStapleAll(Staple, RectStaple, U, workspace);
GaugeLinkField dSdU_mu(grid);
GaugeLinkField staple(grid);
for (int mu=0; mu < Nd; mu++){
// Staple in direction mu
WilsonLoops<Gimpl>::Staple(staple,Umu,mu);
dSdU_mu = Ta(U[mu]*staple)*factor_p;
WilsonLoops<Gimpl>::RectStaple(Umu,staple,U2,U,mu);
dSdU_mu = dSdU_mu + Ta(U[mu]*staple)*factor_r;
dSdU_mu = Ta(U[mu]*Staple[mu])*factor_p;
dSdU_mu = dSdU_mu + Ta(U[mu]*RectStaple[mu])*factor_r;
PokeIndex<LorentzIndex>(dSdU, dSdU_mu, mu);
}

View File

@ -225,6 +225,18 @@ template <class RepresentationsPolicy,
using GenericHMCRunnerHirep =
HMCWrapperTemplate<PeriodicGimplR, Integrator, RepresentationsPolicy>;
// sp2n
template <template <typename, typename, typename> class Integrator>
using GenericSpHMCRunner = HMCWrapperTemplate<SpPeriodicGimplR, Integrator>;
template <class RepresentationsPolicy,
template <typename, typename, typename> class Integrator>
using GenericSpHMCRunnerHirep =
HMCWrapperTemplate<SpPeriodicGimplR, Integrator, RepresentationsPolicy>;
template <class Implementation, class RepresentationsPolicy,
template <typename, typename, typename> class Integrator>
using GenericHMCRunnerTemplate = HMCWrapperTemplate<Implementation, Integrator, RepresentationsPolicy>;

View File

@ -13,7 +13,7 @@ NAMESPACE_BEGIN(Grid);
* Empty since HMC updates already the fundamental representation
*/
template <int ncolour>
template <int ncolour, class group_name>
class FundamentalRep {
public:
static const int Dimension = ncolour;
@ -21,7 +21,7 @@ public:
// typdef to be used by the Representations class in HMC to get the
// types for the higher representation fields
typedef typename SU<ncolour>::LatticeMatrix LatticeMatrix;
typedef typename GaugeGroup<ncolour,group_name>::LatticeMatrix LatticeMatrix;
typedef LatticeGaugeField LatticeField;
explicit FundamentalRep(GridBase* grid) {} //do nothing
@ -45,7 +45,8 @@ public:
typedef FundamentalRep<Nc> FundamentalRepresentation;
typedef FundamentalRep<Nc,GroupName::SU> FundamentalRepresentation;
typedef FundamentalRep<Nc,GroupName::Sp> SpFundamentalRepresentation;
NAMESPACE_END(Grid);

View File

@ -20,14 +20,14 @@ NAMESPACE_BEGIN(Grid);
* in the SUnTwoIndex.h file
*/
template <int ncolour, TwoIndexSymmetry S>
template <int ncolour, TwoIndexSymmetry S, class group_name = GroupName::SU>
class TwoIndexRep {
public:
// typdef to be used by the Representations class in HMC to get the
// types for the higher representation fields
typedef typename SU_TwoIndex<ncolour, S>::LatticeTwoIndexMatrix LatticeMatrix;
typedef typename SU_TwoIndex<ncolour, S>::LatticeTwoIndexField LatticeField;
static const int Dimension = ncolour * (ncolour + S) / 2;
typedef typename GaugeGroupTwoIndex<ncolour, S, group_name>::LatticeTwoIndexMatrix LatticeMatrix;
typedef typename GaugeGroupTwoIndex<ncolour, S, group_name>::LatticeTwoIndexField LatticeField;
static const int Dimension = GaugeGroupTwoIndex<ncolour,S,group_name>::Dimension;
static const bool isFundamental = false;
LatticeField U;
@ -43,10 +43,10 @@ public:
U = Zero();
LatticeColourMatrix tmp(Uin.Grid());
Vector<typename SU<ncolour>::Matrix> eij(Dimension);
Vector<typename GaugeGroup<ncolour,group_name>::Matrix> eij(Dimension);
for (int a = 0; a < Dimension; a++)
SU_TwoIndex<ncolour, S>::base(a, eij[a]);
GaugeGroupTwoIndex<ncolour, S, group_name>::base(a, eij[a]);
for (int mu = 0; mu < Nd; mu++) {
auto Uin_mu = peekLorentz(Uin, mu);
@ -71,7 +71,7 @@ public:
out_mu = Zero();
typename SU<ncolour>::LatticeAlgebraVector h(in.Grid());
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector h(in.Grid());
projectOnAlgebra(h, in_mu, double(Nc + 2 * S)); // factor T(r)/T(fund)
FundamentalLieAlgebraMatrix(h, out_mu); // apply scale only once
pokeLorentz(out, out_mu, mu);
@ -80,20 +80,23 @@ public:
}
private:
void projectOnAlgebra(typename SU<ncolour>::LatticeAlgebraVector &h_out,
void projectOnAlgebra(typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeMatrix &in, Real scale = 1.0) const {
SU_TwoIndex<ncolour, S>::projectOnAlgebra(h_out, in, scale);
GaugeGroupTwoIndex<ncolour, S,group_name>::projectOnAlgebra(h_out, in, scale);
}
void FundamentalLieAlgebraMatrix(
typename SU<ncolour>::LatticeAlgebraVector &h,
typename SU<ncolour>::LatticeMatrix &out, Real scale = 1.0) const {
SU<ncolour>::FundamentalLieAlgebraMatrix(h, out, scale);
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h,
typename GaugeGroup<ncolour, group_name>::LatticeMatrix &out, Real scale = 1.0) const {
GaugeGroup<ncolour,group_name>::FundamentalLieAlgebraMatrix(h, out, scale);
}
};
typedef TwoIndexRep<Nc, Symmetric> TwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric> TwoIndexAntiSymmetricRepresentation;
typedef TwoIndexRep<Nc, Symmetric, GroupName::SU> TwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric, GroupName::SU> TwoIndexAntiSymmetricRepresentation;
typedef TwoIndexRep<Nc, Symmetric, GroupName::Sp> SpTwoIndexSymmetricRepresentation;
typedef TwoIndexRep<Nc, AntiSymmetric, GroupName::Sp> SpTwoIndexAntiSymmetricRepresentation;
NAMESPACE_END(Grid);

View File

@ -37,13 +37,14 @@ NAMESPACE_BEGIN(Grid);
// Make these members of an Impl class for BC's.
namespace PeriodicBC {
//Out(x) = Link(x)*field(x+mu)
template<class covariant,class gauge> Lattice<covariant> CovShiftForward(const Lattice<gauge> &Link,
int mu,
const Lattice<covariant> &field)
{
return Link*Cshift(field,mu,1);// moves towards negative mu
}
//Out(x) = Link^dag(x-mu)*field(x-mu)
template<class covariant,class gauge> Lattice<covariant> CovShiftBackward(const Lattice<gauge> &Link,
int mu,
const Lattice<covariant> &field)
@ -52,19 +53,19 @@ namespace PeriodicBC {
tmp = adj(Link)*field;
return Cshift(tmp,mu,-1);// moves towards positive mu
}
//Out(x) = Link^dag(x-mu)
template<class gauge> Lattice<gauge>
CovShiftIdentityBackward(const Lattice<gauge> &Link, int mu)
{
return Cshift(adj(Link), mu, -1);
}
//Out(x) = Link(x)
template<class gauge> Lattice<gauge>
CovShiftIdentityForward(const Lattice<gauge> &Link, int mu)
{
return Link;
}
//Link(x) = Link(x+mu)
template<class gauge> Lattice<gauge>
ShiftStaple(const Lattice<gauge> &Link, int mu)
{

470
Grid/qcd/utils/GaugeGroup.h Normal file
View File

@ -0,0 +1,470 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/utils/GaugeGroup.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_UTIL_GAUGEGROUP_H
#define QCD_UTIL_GAUGEGROUP_H
// Important detail: nvcc requires all template parameters to have names.
// This is the only reason why the second template parameter has a name.
#define ONLY_IF_SU \
typename dummy_name = group_name, \
typename named_dummy = std::enable_if_t < \
std::is_same<dummy_name, group_name>::value && \
is_su<dummy_name>::value >
#define ONLY_IF_Sp \
typename dummy_name = group_name, \
typename named_dummy = std::enable_if_t < \
std::is_same<dummy_name, group_name>::value && \
is_sp<dummy_name>::value >
NAMESPACE_BEGIN(Grid);
namespace GroupName {
class SU {};
class Sp {};
} // namespace GroupName
template <typename group_name>
struct is_su {
static const bool value = false;
};
template <>
struct is_su<GroupName::SU> {
static const bool value = true;
};
template <typename group_name>
struct is_sp {
static const bool value = false;
};
template <>
struct is_sp<GroupName::Sp> {
static const bool value = true;
};
template <typename group_name>
constexpr int compute_adjoint_dimension(int ncolour);
template <>
constexpr int compute_adjoint_dimension<GroupName::SU>(int ncolour) {
return ncolour * ncolour - 1;
}
template <>
constexpr int compute_adjoint_dimension<GroupName::Sp>(int ncolour) {
return ncolour / 2 * (ncolour + 1);
}
template <int ncolour, class group_name>
class GaugeGroup {
public:
static const int Dimension = ncolour;
static const int AdjointDimension =
compute_adjoint_dimension<group_name>(ncolour);
static const int AlgebraDimension =
compute_adjoint_dimension<group_name>(ncolour);
template <typename vtype>
using iSU2Matrix = iScalar<iScalar<iMatrix<vtype, 2> > >;
template <typename vtype>
using iGroupMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
template <typename vtype>
using iAlgebraVector = iScalar<iScalar<iVector<vtype, AdjointDimension> > >;
static int su2subgroups(void) { return su2subgroups(group_name()); }
//////////////////////////////////////////////////////////////////////////////////////////////////
// Types can be accessed as SU<2>::Matrix , SU<2>::vSUnMatrix,
// SU<2>::LatticeMatrix etc...
//////////////////////////////////////////////////////////////////////////////////////////////////
typedef iGroupMatrix<Complex> Matrix;
typedef iGroupMatrix<ComplexF> MatrixF;
typedef iGroupMatrix<ComplexD> MatrixD;
typedef iGroupMatrix<vComplex> vMatrix;
typedef iGroupMatrix<vComplexF> vMatrixF;
typedef iGroupMatrix<vComplexD> vMatrixD;
// For the projectors to the algebra
// these should be real...
// keeping complex for consistency with the SIMD vector types
typedef iAlgebraVector<Complex> AlgebraVector;
typedef iAlgebraVector<ComplexF> AlgebraVectorF;
typedef iAlgebraVector<ComplexD> AlgebraVectorD;
typedef iAlgebraVector<vComplex> vAlgebraVector;
typedef iAlgebraVector<vComplexF> vAlgebraVectorF;
typedef iAlgebraVector<vComplexD> vAlgebraVectorD;
typedef Lattice<vMatrix> LatticeMatrix;
typedef Lattice<vMatrixF> LatticeMatrixF;
typedef Lattice<vMatrixD> LatticeMatrixD;
typedef Lattice<vAlgebraVector> LatticeAlgebraVector;
typedef Lattice<vAlgebraVectorF> LatticeAlgebraVectorF;
typedef Lattice<vAlgebraVectorD> LatticeAlgebraVectorD;
typedef iSU2Matrix<Complex> SU2Matrix;
typedef iSU2Matrix<ComplexF> SU2MatrixF;
typedef iSU2Matrix<ComplexD> SU2MatrixD;
typedef iSU2Matrix<vComplex> vSU2Matrix;
typedef iSU2Matrix<vComplexF> vSU2MatrixF;
typedef iSU2Matrix<vComplexD> vSU2MatrixD;
typedef Lattice<vSU2Matrix> LatticeSU2Matrix;
typedef Lattice<vSU2MatrixF> LatticeSU2MatrixF;
typedef Lattice<vSU2MatrixD> LatticeSU2MatrixD;
// Private implementation details are specified in the following files:
// Grid/qcd/utils/SUn.impl
// Grid/qcd/utils/SUn.impl
// The public part of the interface follows below and refers to these
// private member functions.
#include <Grid/qcd/utils/SUn.impl.h>
#include <Grid/qcd/utils/Sp2n.impl.h>
public:
template <class cplx>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta) {
return generator(lieIndex, ta, group_name());
}
static void su2SubGroupIndex(int &i1, int &i2, int su2_index) {
return su2SubGroupIndex(i1, i2, su2_index, group_name());
}
static void testGenerators(void) { testGenerators(group_name()); }
static void printGenerators(void) {
for (int gen = 0; gen < AlgebraDimension; gen++) {
Matrix ta;
generator(gen, ta);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << ta << std::endl;
}
}
template <typename LatticeMatrixType>
static void LieRandomize(GridParallelRNG &pRNG, LatticeMatrixType &out,
double scale = 1.0) {
GridBase *grid = out.Grid();
typedef typename LatticeMatrixType::vector_type vector_type;
typedef iSinglet<vector_type> vTComplexType;
typedef Lattice<vTComplexType> LatticeComplexType;
typedef typename GridTypeMapper<
typename LatticeMatrixType::vector_object>::scalar_object MatrixType;
LatticeComplexType ca(grid);
LatticeMatrixType lie(grid);
LatticeMatrixType la(grid);
ComplexD ci(0.0, scale);
MatrixType ta;
lie = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
random(pRNG, ca);
ca = (ca + conjugate(ca)) * 0.5;
ca = ca - 0.5;
generator(a, ta);
la = ci * ca * ta;
lie = lie + la; // e^{i la ta}
}
taExp(lie, out);
}
static void GaussianFundamentalLieAlgebraMatrix(GridParallelRNG &pRNG,
LatticeMatrix &out,
Real scale = 1.0) {
GridBase *grid = out.Grid();
LatticeReal ca(grid);
LatticeMatrix la(grid);
Complex ci(0.0, scale);
Matrix ta;
out = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
gaussian(pRNG, ca);
generator(a, ta);
la = toComplex(ca) * ta;
out += la;
}
out *= ci;
}
static void FundamentalLieAlgebraMatrix(const LatticeAlgebraVector &h,
LatticeMatrix &out,
Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeMatrix la(grid);
Matrix ta;
out = Zero();
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
la = peekColour(h, a) * timesI(ta) * scale;
out += la;
}
}
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1
// ) inverse operation: FundamentalLieAlgebraMatrix
static void projectOnAlgebra(LatticeAlgebraVector &h_out,
const LatticeMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
Matrix Ta;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, Ta);
pokeColour(h_out, -2.0 * (trace(timesI(Ta) * in)) * scale, a);
}
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r) {
return ProjectOnGeneralGroup(r, group_name());
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r) {
return ProjectOnGeneralGroup(r, group_name());
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg) {
return ProjectOnGeneralGroup(arg, group_name());
}
template <int N,class vComplex_t> // Projects on the general groups U(N), Sp(2N)xZ2 i.e. determinant is allowed a complex phase.
static void ProjectOnGeneralGroup(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >, Nd> > &U) {
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
Umu = ProjectOnGeneralGroup(Umu);
}
}
template <int N,class vComplex_t>
static Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu) {
return ProjectOnGeneralGroup(Umu, group_name());
}
template <int N,class vComplex_t> // Projects on SU(N), Sp(2N), with unit determinant, by first projecting on general group and then enforcing unit determinant
static void ProjectOnSpecialGroup(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu) {
Umu = ProjectOnGeneralGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for (int i = 0; i < N; i++) {
auto element = PeekIndex<ColourIndex>(Umu, N - 1, i);
element = element * det;
PokeIndex<ColourIndex>(Umu, element, Nc - 1, i);
}
}
template <int N,class vComplex_t> // reunitarise, resimplectify... previously ProjectSUn
static void ProjectOnSpecialGroup(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >, Nd> > &U) {
// Reunitarise
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
ProjectOnSpecialGroup(Umu);
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
template <typename GaugeField>
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
LatticeMatrixType tmp(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
// LieRandomize(pRNG, Umu, 1.0);
// PokeIndex<LorentzIndex>(out, Umu, mu);
gaussian(pRNG,Umu);
tmp = Ta(Umu);
taExp(tmp,Umu);
ProjectOnSpecialGroup(Umu);
// ProjectSUn(Umu);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void TepidConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
LieRandomize(pRNG, Umu, 0.01);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void ColdConfiguration(GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iGroupMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
Umu = 1.0;
for (int mu = 0; mu < Nd; mu++) {
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template <typename GaugeField>
static void ColdConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
ColdConfiguration(out);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out) {
taProj(in, out, group_name());
}
template <typename LatticeMatrixType>
static void taExp(const LatticeMatrixType &x, LatticeMatrixType &ex) {
typedef typename LatticeMatrixType::scalar_type ComplexType;
LatticeMatrixType xn(x.Grid());
RealD nfac = 1.0;
xn = x;
ex = xn + ComplexType(1.0); // 1+x
// Do a 12th order exponentiation
for (int i = 2; i <= 12; ++i) {
nfac = nfac / RealD(i); // 1/2, 1/2.3 ...
xn = xn * x; // x2, x3,x4....
ex = ex + xn * nfac; // x2/2!, x3/3!....
}
}
};
template <int ncolour>
using SU = GaugeGroup<ncolour, GroupName::SU>;
template <int ncolour>
using Sp = GaugeGroup<ncolour, GroupName::Sp>;
typedef SU<2> SU2;
typedef SU<3> SU3;
typedef SU<4> SU4;
typedef SU<5> SU5;
typedef SU<Nc> FundamentalMatrices;
typedef Sp<2> Sp2;
typedef Sp<4> Sp4;
typedef Sp<6> Sp6;
typedef Sp<8> Sp8;
template <int N,class vComplex_t>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu)
{
GaugeGroup<N,GroupName::SU>::ProjectOnSpecialGroup(Umu);
}
template <int N,class vComplex_t>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >,Nd> > &U)
{
GaugeGroup<N,GroupName::SU>::ProjectOnSpecialGroup(U);
}
template <int N,class vComplex_t>
static void ProjectSpn(Lattice<iScalar<iScalar<iMatrix<vComplex_t, N> > > > &Umu)
{
GaugeGroup<N,GroupName::Sp>::ProjectOnSpecialGroup(Umu);
}
template <int N,class vComplex_t>
static void ProjectSpn(Lattice<iVector<iScalar<iMatrix<vComplex_t, N> >,Nd> > &U)
{
GaugeGroup<N,GroupName::Sp>::ProjectOnSpecialGroup(U);
}
// Explicit specialisation for SU(3).
static void ProjectSU3(Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
{
GridBase *grid = Umu.Grid();
const int x = 0;
const int y = 1;
const int z = 2;
// Reunitarise
Umu = ProjectOnGroup(Umu);
autoView(Umu_v, Umu, CpuWrite);
thread_for(ss, grid->oSites(), {
auto cm = Umu_v[ss];
cm()()(2, x) = adj(cm()()(0, y) * cm()()(1, z) -
cm()()(0, z) * cm()()(1, y)); // x= yz-zy
cm()()(2, y) = adj(cm()()(0, z) * cm()()(1, x) -
cm()()(0, x) * cm()()(1, z)); // y= zx-xz
cm()()(2, z) = adj(cm()()(0, x) * cm()()(1, y) -
cm()()(0, y) * cm()()(1, x)); // z= xy-yx
Umu_v[ss] = cm;
});
}
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >, Nd> > &U)
{
GridBase *grid = U.Grid();
// Reunitarise
for (int mu = 0; mu < Nd; mu++) {
auto Umu = PeekIndex<LorentzIndex>(U, mu);
Umu = ProjectOnGroup(Umu);
ProjectSU3(Umu);
PokeIndex<LorentzIndex>(U, Umu, mu);
}
}
NAMESPACE_END(Grid);
#endif

View File

@ -0,0 +1,371 @@
////////////////////////////////////////////////////////////////////////
//
// * Two index representation generators
//
// * Normalisation for the fundamental generators:
// trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
//
// base for NxN two index (anti-symmetric) matrices
// normalized to 1 (d_ij is the kroenecker delta)
//
// (e^(ij)_{kl} = 1 / sqrt(2) (d_ik d_jl +/- d_jk d_il)
//
// Then the generators are written as
//
// (iT_a)^(ij)(lk) = i * ( tr[e^(ij)^dag e^(lk) T^trasp_a] +
// tr[e^(lk)e^(ij)^dag T_a] ) //
//
//
////////////////////////////////////////////////////////////////////////
// Authors: David Preti, Guido Cossu
#ifndef QCD_UTIL_GAUGEGROUPTWOINDEX_H
#define QCD_UTIL_GAUGEGROUPTWOINDEX_H
NAMESPACE_BEGIN(Grid);
enum TwoIndexSymmetry { Symmetric = 1, AntiSymmetric = -1 };
constexpr inline Real delta(int a, int b) { return (a == b) ? 1.0 : 0.0; }
namespace detail {
template <class cplx, int nc, TwoIndexSymmetry S>
struct baseOffDiagonalSpHelper;
template <class cplx, int nc>
struct baseOffDiagonalSpHelper<cplx, nc, AntiSymmetric> {
static const int ngroup = nc / 2;
static void baseOffDiagonalSp(int i, int j, iScalar<iScalar<iMatrix<cplx, nc> > > &eij) {
eij = Zero();
RealD tmp;
if ((i == ngroup + j) && (1 <= j) && (j < ngroup)) {
for (int k = 0; k < j+1; k++) {
if (k < j) {
tmp = 1 / sqrt(j * (j + 1));
eij()()(k, k + ngroup) = tmp;
eij()()(k + ngroup, k) = -tmp;
}
if (k == j) {
tmp = -j / sqrt(j * (j + 1));
eij()()(k, k + ngroup) = tmp;
eij()()(k + ngroup, k) = -tmp;
}
}
}
else if (i != ngroup + j) {
for (int k = 0; k < nc; k++)
for (int l = 0; l < nc; l++) {
eij()()(l, k) =
delta(i, k) * delta(j, l) - delta(j, k) * delta(i, l);
}
}
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
};
template <class cplx, int nc>
struct baseOffDiagonalSpHelper<cplx, nc, Symmetric> {
static void baseOffDiagonalSp(int i, int j, iScalar<iScalar<iMatrix<cplx, nc> > > &eij) {
eij = Zero();
for (int k = 0; k < nc; k++)
for (int l = 0; l < nc; l++)
eij()()(l, k) =
delta(i, k) * delta(j, l) + delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
};
} // closing detail namespace
template <int ncolour, TwoIndexSymmetry S, class group_name>
class GaugeGroupTwoIndex : public GaugeGroup<ncolour, group_name> {
public:
// The chosen convention is that we are taking ncolour to be N in SU<N> but 2N
// in Sp(2N). ngroup is equal to N for SU but 2N/2 = N for Sp(2N).
static_assert(std::is_same<group_name, GroupName::SU>::value or
std::is_same<group_name, GroupName::Sp>::value,
"ngroup is only implemented for SU and Sp currently.");
static const int ngroup =
std::is_same<group_name, GroupName::SU>::value ? ncolour : ncolour / 2;
static const int Dimension =
(ncolour * (ncolour + S) / 2) + (std::is_same<group_name, GroupName::Sp>::value ? (S - 1) / 2 : 0);
static const int DimensionAS =
(ncolour * (ncolour - 1) / 2) + (std::is_same<group_name, GroupName::Sp>::value ? (- 1) : 0);
static const int DimensionS =
ncolour * (ncolour + 1) / 2;
static const int NumGenerators =
GaugeGroup<ncolour, group_name>::AlgebraDimension;
template <typename vtype>
using iGroupTwoIndexMatrix = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
typedef iGroupTwoIndexMatrix<Complex> TIMatrix;
typedef iGroupTwoIndexMatrix<ComplexF> TIMatrixF;
typedef iGroupTwoIndexMatrix<ComplexD> TIMatrixD;
typedef iGroupTwoIndexMatrix<vComplex> vTIMatrix;
typedef iGroupTwoIndexMatrix<vComplexF> vTIMatrixF;
typedef iGroupTwoIndexMatrix<vComplexD> vTIMatrixD;
typedef Lattice<vTIMatrix> LatticeTwoIndexMatrix;
typedef Lattice<vTIMatrixF> LatticeTwoIndexMatrixF;
typedef Lattice<vTIMatrixD> LatticeTwoIndexMatrixD;
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
LatticeTwoIndexField;
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
LatticeTwoIndexFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
LatticeTwoIndexFieldD;
template <typename vtype>
using iGroupMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef iGroupMatrix<Complex> Matrix;
typedef iGroupMatrix<ComplexF> MatrixF;
typedef iGroupMatrix<ComplexD> MatrixD;
private:
template <class cplx>
static void baseDiagonal(int Index, iGroupMatrix<cplx> &eij) {
eij = Zero();
eij()()(Index - ncolour * (ncolour - 1) / 2,
Index - ncolour * (ncolour - 1) / 2) = 1.0;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iGroupMatrix<cplx> &eij, GroupName::SU) {
eij = Zero();
for (int k = 0; k < ncolour; k++)
for (int l = 0; l < ncolour; l++)
eij()()(l, k) =
delta(i, k) * delta(j, l) + S * delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iGroupMatrix<cplx> &eij, GroupName::Sp) {
detail::baseOffDiagonalSpHelper<cplx, ncolour, S>::baseOffDiagonalSp(i, j, eij);
}
public:
template <class cplx>
static void base(int Index, iGroupMatrix<cplx> &eij) {
// returns (e)^(ij)_{kl} necessary for change of base U_F -> U_R
assert(Index < Dimension);
eij = Zero();
// for the linearisation of the 2 indexes
static int a[ncolour * (ncolour - 1) / 2][2]; // store the a <-> i,j
static bool filled = false;
if (!filled) {
int counter = 0;
for (int i = 1; i < ncolour; i++) {
for (int j = 0; j < i; j++) {
if (std::is_same<group_name, GroupName::Sp>::value)
{
if (j==0 && i==ngroup+j && S==-1) {
//std::cout << "skipping" << std::endl; // for Sp2n this vanishes identically.
j = j+1;
}
}
a[counter][0] = i;
a[counter][1] = j;
counter++;
}
}
filled = true;
}
if (Index < ncolour*ncolour - DimensionS)
{
baseOffDiagonal(a[Index][0], a[Index][1], eij, group_name());
} else {
baseDiagonal(Index, eij);
}
}
static void printBase(void) {
for (int gen = 0; gen < Dimension; gen++) {
Matrix tmp;
base(gen, tmp);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << tmp << std::endl;
}
}
template <class cplx>
static void generator(int Index, iGroupTwoIndexMatrix<cplx> &i2indTa) {
Vector<iGroupMatrix<cplx> > ta(NumGenerators);
Vector<iGroupMatrix<cplx> > eij(Dimension);
iGroupMatrix<cplx> tmp;
for (int a = 0; a < NumGenerators; a++)
GaugeGroup<ncolour, group_name>::generator(a, ta[a]);
for (int a = 0; a < Dimension; a++) base(a, eij[a]);
for (int a = 0; a < Dimension; a++) {
tmp = transpose(eij[a]*ta[Index]) + transpose(eij[a]) * ta[Index];
for (int b = 0; b < Dimension; b++) {
Complex iTr = TensorRemove(timesI(trace(tmp * eij[b])));
i2indTa()()(a, b) = iTr;
}
}
}
static void printGenerators(void) {
for (int gen = 0; gen < NumGenerators; gen++) {
TIMatrix i2indTa;
generator(gen, i2indTa);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << i2indTa << std::endl;
}
}
static void testGenerators(void) {
TIMatrix i2indTa, i2indTb;
std::cout << GridLogMessage << "2IndexRep - Checking if traceless"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(trace(i2indTa)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "2IndexRep - Checking if antihermitean"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(adj(i2indTa) + i2indTa) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage
<< "2IndexRep - Checking Tr[Ta*Tb]=delta(a,b)*(N +- 2)/2"
<< std::endl;
for (int a = 0; a < NumGenerators; a++) {
for (int b = 0; b < NumGenerators; b++) {
generator(a, i2indTa);
generator(b, i2indTb);
// generator returns iTa, so we need a minus sign here
Complex Tr = -TensorRemove(trace(i2indTa * i2indTb));
std::cout << GridLogMessage << "a=" << a << "b=" << b << "Tr=" << Tr
<< std::endl;
if (a == b) {
assert(real(Tr) - ((ncolour + S * 2) * 0.5) < 1e-8);
} else {
assert(real(Tr) < 1e-8);
}
assert(imag(Tr) < 1e-8);
}
}
std::cout << GridLogMessage << std::endl;
}
static void TwoIndexLieAlgebraMatrix(
const typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h,
LatticeTwoIndexMatrix &out, Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeTwoIndexMatrix la(grid);
TIMatrix i2indTa;
out = Zero();
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
la = peekColour(h, a) * i2indTa;
out += la;
}
out *= scale;
}
// Projects the algebra components
// of a lattice matrix ( of dimension ncol*ncol -1 )
static void projectOnAlgebra(
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
TIMatrix i2indTa;
Real coefficient = -2.0 / (ncolour + 2 * S) * scale;
// 2/(Nc +/- 2) for the normalization of the trace in the two index rep
for (int a = 0; a < NumGenerators; a++) {
generator(a, i2indTa);
pokeColour(h_out, real(trace(i2indTa * in)) * coefficient, a);
}
}
// a projector that keeps the generators stored to avoid the overhead of
// recomputing them
static void projector(
typename GaugeGroup<ncolour, group_name>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
// to store the generators
static std::vector<TIMatrix> i2indTa(NumGenerators);
h_out = Zero();
static bool precalculated = false;
if (!precalculated) {
precalculated = true;
for (int a = 0; a < NumGenerators; a++) generator(a, i2indTa[a]);
}
Real coefficient =
-2.0 / (ncolour + 2 * S) * scale; // 2/(Nc +/- 2) for the normalization
// of the trace in the two index rep
for (int a = 0; a < NumGenerators; a++) {
auto tmp = real(trace(i2indTa[a] * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
};
template <int ncolour, TwoIndexSymmetry S>
using SU_TwoIndex = GaugeGroupTwoIndex<ncolour, S, GroupName::SU>;
// Some useful type names
typedef SU_TwoIndex<Nc, Symmetric> TwoIndexSymmMatrices;
typedef SU_TwoIndex<Nc, AntiSymmetric> TwoIndexAntiSymmMatrices;
typedef SU_TwoIndex<2, Symmetric> SU2TwoIndexSymm;
typedef SU_TwoIndex<3, Symmetric> SU3TwoIndexSymm;
typedef SU_TwoIndex<4, Symmetric> SU4TwoIndexSymm;
typedef SU_TwoIndex<5, Symmetric> SU5TwoIndexSymm;
typedef SU_TwoIndex<2, AntiSymmetric> SU2TwoIndexAntiSymm;
typedef SU_TwoIndex<3, AntiSymmetric> SU3TwoIndexAntiSymm;
typedef SU_TwoIndex<4, AntiSymmetric> SU4TwoIndexAntiSymm;
typedef SU_TwoIndex<5, AntiSymmetric> SU5TwoIndexAntiSymm;
template <int ncolour, TwoIndexSymmetry S>
using Sp_TwoIndex = GaugeGroupTwoIndex<ncolour, S, GroupName::Sp>;
typedef Sp_TwoIndex<Nc, Symmetric> SpTwoIndexSymmMatrices;
typedef Sp_TwoIndex<Nc, AntiSymmetric> SpTwoIndexAntiSymmMatrices;
typedef Sp_TwoIndex<2, Symmetric> Sp2TwoIndexSymm;
typedef Sp_TwoIndex<4, Symmetric> Sp4TwoIndexSymm;
typedef Sp_TwoIndex<4, AntiSymmetric> Sp4TwoIndexAntiSymm;
NAMESPACE_END(Grid);
#endif

View File

@ -1,932 +0,0 @@
/*************************************************************************************
Grid physics library, www.github.com/paboyle/Grid
Source file: ./lib/qcd/utils/SUn.h
Copyright (C) 2015
Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
Author: Peter Boyle <paboyle@ph.ed.ac.uk>
Author: neo <cossu@post.kek.jp>
Author: paboyle <paboyle@ph.ed.ac.uk>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
See the full license in the file "LICENSE" in the top level distribution
directory
*************************************************************************************/
/* END LEGAL */
#ifndef QCD_UTIL_SUN_H
#define QCD_UTIL_SUN_H
NAMESPACE_BEGIN(Grid);
template<int N, class Vec>
Lattice<iScalar<iScalar<iScalar<Vec> > > > Determinant(const Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iScalar<Vec> > > > ret(grid);
typedef typename Vec::scalar_type scalar;
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<scalar, N> > > Us;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
scalar tmp= Us()()(i,j);
ComplexD ztmp(real(tmp),imag(tmp));
EigenU(i,j)=ztmp;
}}
ComplexD detD = EigenU.determinant();
typename Vec::scalar_type det(detD.real(),detD.imag());
pokeLocalSite(det,ret_v,lcoor);
});
return ret;
}
template<int N, class Vec>
static void ProjectSUn(Lattice<iScalar<iScalar<iMatrix<Vec, N> > > > &Umu)
{
Umu = ProjectOnGroup(Umu);
auto det = Determinant(Umu);
det = conjugate(det);
for(int i=0;i<N;i++){
auto element = PeekIndex<ColourIndex>(Umu,N-1,i);
element = element * det;
PokeIndex<ColourIndex>(Umu,element,Nc-1,i);
}
}
template<int N,class Vec>
static void ProjectSUn(Lattice<iVector<iScalar<iMatrix<Vec, N> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
template <int ncolour>
class SU {
public:
static const int Dimension = ncolour;
static const int AdjointDimension = ncolour * ncolour - 1;
static int su2subgroups(void) { return (ncolour * (ncolour - 1)) / 2; }
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
template <typename vtype>
using iSU2Matrix = iScalar<iScalar<iMatrix<vtype, 2> > >;
template <typename vtype>
using iSUnAlgebraVector =
iScalar<iScalar<iVector<vtype, AdjointDimension> > >;
//////////////////////////////////////////////////////////////////////////////////////////////////
// Types can be accessed as SU<2>::Matrix , SU<2>::vSUnMatrix,
// SU<2>::LatticeMatrix etc...
//////////////////////////////////////////////////////////////////////////////////////////////////
typedef iSUnMatrix<Complex> Matrix;
typedef iSUnMatrix<ComplexF> MatrixF;
typedef iSUnMatrix<ComplexD> MatrixD;
typedef iSUnMatrix<vComplex> vMatrix;
typedef iSUnMatrix<vComplexF> vMatrixF;
typedef iSUnMatrix<vComplexD> vMatrixD;
// For the projectors to the algebra
// these should be real...
// keeping complex for consistency with the SIMD vector types
typedef iSUnAlgebraVector<Complex> AlgebraVector;
typedef iSUnAlgebraVector<ComplexF> AlgebraVectorF;
typedef iSUnAlgebraVector<ComplexD> AlgebraVectorD;
typedef iSUnAlgebraVector<vComplex> vAlgebraVector;
typedef iSUnAlgebraVector<vComplexF> vAlgebraVectorF;
typedef iSUnAlgebraVector<vComplexD> vAlgebraVectorD;
typedef Lattice<vMatrix> LatticeMatrix;
typedef Lattice<vMatrixF> LatticeMatrixF;
typedef Lattice<vMatrixD> LatticeMatrixD;
typedef Lattice<vAlgebraVector> LatticeAlgebraVector;
typedef Lattice<vAlgebraVectorF> LatticeAlgebraVectorF;
typedef Lattice<vAlgebraVectorD> LatticeAlgebraVectorD;
typedef iSU2Matrix<Complex> SU2Matrix;
typedef iSU2Matrix<ComplexF> SU2MatrixF;
typedef iSU2Matrix<ComplexD> SU2MatrixD;
typedef iSU2Matrix<vComplex> vSU2Matrix;
typedef iSU2Matrix<vComplexF> vSU2MatrixF;
typedef iSU2Matrix<vComplexD> vSU2MatrixD;
typedef Lattice<vSU2Matrix> LatticeSU2Matrix;
typedef Lattice<vSU2MatrixF> LatticeSU2MatrixF;
typedef Lattice<vSU2MatrixD> LatticeSU2MatrixD;
////////////////////////////////////////////////////////////////////////
// There are N^2-1 generators for SU(N).
//
// We take a traceless hermitian generator basis as follows
//
// * Normalisation: trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
// * Off diagonal
// - pairs of rows i1,i2 behaving like pauli matrices signma_x, sigma_y
//
// - there are (Nc-1-i1) slots for i2 on each row [ x 0 x ]
// direct count off each row
//
// - Sum of all pairs is Nc(Nc-1)/2: proof arithmetic series
//
// (Nc-1) + (Nc-2)+... 1 ==> Nc*(Nc-1)/2
// 1+ 2+ + + Nc-1
//
// - There are 2 x Nc (Nc-1)/ 2 of these = Nc^2 - Nc
//
// - We enumerate the row-col pairs.
// - for each row col pair there is a (sigma_x) and a (sigma_y) like
// generator
//
//
// t^a_ij = { in 0.. Nc(Nc-1)/2 -1} => 1/2(delta_{i,i1} delta_{j,i2} +
// delta_{i,i1} delta_{j,i2})
// t^a_ij = { in Nc(Nc-1)/2 ... Nc(Nc-1) - 1} => i/2( delta_{i,i1}
// delta_{j,i2} - i delta_{i,i1} delta_{j,i2})
//
// * Diagonal; must be traceless and normalised
// - Sequence is
// N (1,-1,0,0...)
// N (1, 1,-2,0...)
// N (1, 1, 1,-3,0...)
// N (1, 1, 1, 1,-4,0...)
//
// where 1/2 = N^2 (1+.. m^2)etc.... for the m-th diagonal generator
// NB this gives the famous SU3 result for su2 index 8
//
// N= sqrt(1/2 . 1/6 ) = 1/2 . 1/sqrt(3)
//
// ( 1 )
// ( 1 ) / sqrt(3) /2 = 1/2 lambda_8
// ( -2)
//
////////////////////////////////////////////////////////////////////////
template <class cplx>
static void generator(int lieIndex, iSUnMatrix<cplx> &ta) {
// map lie index to which type of generator
int diagIndex;
int su2Index;
int sigxy;
int NNm1 = ncolour * (ncolour - 1);
if (lieIndex >= NNm1) {
diagIndex = lieIndex - NNm1;
generatorDiagonal(diagIndex, ta);
return;
}
sigxy = lieIndex & 0x1; // even or odd
su2Index = lieIndex >> 1;
if (sigxy)
generatorSigmaY(su2Index, ta);
else
generatorSigmaX(su2Index, ta);
}
template <class cplx>
static void generatorSigmaY(int su2Index, iSUnMatrix<cplx> &ta) {
ta = Zero();
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = 1.0;
ta()()(i2, i1) = 1.0;
ta = ta * 0.5;
}
template <class cplx>
static void generatorSigmaX(int su2Index, iSUnMatrix<cplx> &ta) {
ta = Zero();
cplx i(0.0, 1.0);
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta = ta * 0.5;
}
template <class cplx>
static void generatorDiagonal(int diagIndex, iSUnMatrix<cplx> &ta) {
// diag ({1, 1, ..., 1}(k-times), -k, 0, 0, ...)
ta = Zero();
int k = diagIndex + 1; // diagIndex starts from 0
for (int i = 0; i <= diagIndex; i++) { // k iterations
ta()()(i, i) = 1.0;
}
ta()()(k, k) = -k; // indexing starts from 0
RealD nrm = 1.0 / std::sqrt(2.0 * k * (k + 1));
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
static void su2SubGroupIndex(int &i1, int &i2, int su2_index) {
assert((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (ncolour - 1 - i1); i1++) {
spare = spare - (ncolour - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
//////////////////////////////////////////////////////////////////////////////////////////
// Pull out a subgroup and project on to real coeffs x pauli basis
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx>
static void su2Extract(Lattice<iSinglet<vcplx> > &Determinant,
Lattice<iSU2Matrix<vcplx> > &subgroup,
const Lattice<iSUnMatrix<vcplx> > &source,
int su2_index) {
GridBase *grid(source.Grid());
conformable(subgroup, source);
conformable(subgroup, Determinant);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
autoView( subgroup_v , subgroup,AcceleratorWrite);
autoView( source_v , source,AcceleratorRead);
autoView( Determinant_v , Determinant,AcceleratorWrite);
accelerator_for(ss, grid->oSites(), 1, {
subgroup_v[ss]()()(0, 0) = source_v[ss]()()(i0, i0);
subgroup_v[ss]()()(0, 1) = source_v[ss]()()(i0, i1);
subgroup_v[ss]()()(1, 0) = source_v[ss]()()(i1, i0);
subgroup_v[ss]()()(1, 1) = source_v[ss]()()(i1, i1);
iSU2Matrix<vcplx> Sigma = subgroup_v[ss];
Sigma = Sigma - adj(Sigma) + trace(adj(Sigma));
subgroup_v[ss] = Sigma;
// this should be purely real
Determinant_v[ss] =
Sigma()()(0, 0) * Sigma()()(1, 1) - Sigma()()(0, 1) * Sigma()()(1, 0);
});
}
//////////////////////////////////////////////////////////////////////////////////////////
// Set matrix to one and insert a pauli subgroup
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx>
static void su2Insert(const Lattice<iSU2Matrix<vcplx> > &subgroup,
Lattice<iSUnMatrix<vcplx> > &dest, int su2_index) {
GridBase *grid(dest.Grid());
conformable(subgroup, dest);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
dest = 1.0; // start out with identity
autoView( dest_v , dest, AcceleratorWrite);
autoView( subgroup_v, subgroup, AcceleratorRead);
accelerator_for(ss, grid->oSites(),1,
{
dest_v[ss]()()(i0, i0) = subgroup_v[ss]()()(0, 0);
dest_v[ss]()()(i0, i1) = subgroup_v[ss]()()(0, 1);
dest_v[ss]()()(i1, i0) = subgroup_v[ss]()()(1, 0);
dest_v[ss]()()(i1, i1) = subgroup_v[ss]()()(1, 1);
});
}
///////////////////////////////////////////////
// Generate e^{ Re Tr Staple Link} dlink
//
// *** Note Staple should be appropriate linear compbination between all
// staples.
// *** If already by beta pass coefficient 1.0.
// *** This routine applies the additional 1/Nc factor that comes after trace
// in action.
//
///////////////////////////////////////////////
static void SubGroupHeatBath(GridSerialRNG &sRNG, GridParallelRNG &pRNG,
RealD beta, // coeff multiplying staple in action (with no 1/Nc)
LatticeMatrix &link,
const LatticeMatrix &barestaple, // multiplied by action coeffs so th
int su2_subgroup, int nheatbath, LatticeInteger &wheremask)
{
GridBase *grid = link.Grid();
const RealD twopi = 2.0 * M_PI;
LatticeMatrix staple(grid);
staple = barestaple * (beta / ncolour);
LatticeMatrix V(grid);
V = link * staple;
// Subgroup manipulation in the lie algebra space
LatticeSU2Matrix u(grid); // Kennedy pendleton "u" real projected normalised Sigma
LatticeSU2Matrix uinv(grid);
LatticeSU2Matrix ua(grid); // a in pauli form
LatticeSU2Matrix b(grid); // rotated matrix after hb
// Some handy constant fields
LatticeComplex ones(grid);
ones = 1.0;
LatticeComplex zeros(grid);
zeros = Zero();
LatticeReal rones(grid);
rones = 1.0;
LatticeReal rzeros(grid);
rzeros = Zero();
LatticeComplex udet(grid); // determinant of real(staple)
LatticeInteger mask_true(grid);
mask_true = 1;
LatticeInteger mask_false(grid);
mask_false = 0;
/*
PLB 156 P393 (1985) (Kennedy and Pendleton)
Note: absorb "beta" into the def of sigma compared to KP paper; staple
passed to this routine has "beta" already multiplied in
Action linear in links h and of form:
beta S = beta Sum_p (1 - 1/Nc Re Tr Plaq )
Writing Sigma = 1/Nc (beta Sigma') where sum over staples is "Sigma' "
beta S = const - beta/Nc Re Tr h Sigma'
= const - Re Tr h Sigma
Decompose h and Sigma into (1, sigma_j) ; h_i real, h^2=1, Sigma_i complex
arbitrary.
Tr h Sigma = h_i Sigma_j Tr (sigma_i sigma_j) = h_i Sigma_j 2 delta_ij
Re Tr h Sigma = 2 h_j Re Sigma_j
Normalised re Sigma_j = xi u_j
With u_j a unit vector and U can be in SU(2);
Re Tr h Sigma = 2 h_j Re Sigma_j = 2 xi (h.u)
4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
xi = sqrt(Det)/2;
Write a= u h in SU(2); a has pauli decomp a_j;
Note: Product b' xi is unvariant because scaling Sigma leaves
normalised vector "u" fixed; Can rescale Sigma so b' = 1.
*/
////////////////////////////////////////////////////////
// Real part of Pauli decomposition
// Note a subgroup can project to zero in cold start
////////////////////////////////////////////////////////
su2Extract(udet, u, V, su2_subgroup);
//////////////////////////////////////////////////////
// Normalising this vector if possible; else identity
//////////////////////////////////////////////////////
LatticeComplex xi(grid);
LatticeSU2Matrix lident(grid);
SU2Matrix ident = Complex(1.0);
SU2Matrix pauli1;
SU<2>::generator(0, pauli1);
SU2Matrix pauli2;
SU<2>::generator(1, pauli2);
SU2Matrix pauli3;
SU<2>::generator(2, pauli3);
pauli1 = timesI(pauli1) * 2.0;
pauli2 = timesI(pauli2) * 2.0;
pauli3 = timesI(pauli3) * 2.0;
LatticeComplex cone(grid);
LatticeReal adet(grid);
adet = abs(toReal(udet));
lident = Complex(1.0);
cone = Complex(1.0);
Real machine_epsilon = 1.0e-7;
u = where(adet > machine_epsilon, u, lident);
udet = where(adet > machine_epsilon, udet, cone);
xi = 0.5 * sqrt(udet); // 4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 0.5 * u *
pow(xi, -1.0); // u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
// Debug test for sanity
uinv = adj(u);
b = u * uinv - 1.0;
assert(norm2(b) < 1.0e-4);
/*
Measure: Haar measure dh has d^4a delta(1-|a^2|)
In polars:
da = da0 r^2 sin theta dr dtheta dphi delta( 1 - r^2 -a0^2)
= da0 r^2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r)(sqrt(1-a0^) +
r) )
= da0 r/2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r) )
Action factor Q(h) dh = e^-S[h] dh = e^{ xi Tr uh} dh // beta enters
through xi
= e^{2 xi (h.u)} dh
= e^{2 xi h0u0}.e^{2 xi h1u1}.e^{2 xi
h2u2}.e^{2 xi h3u3} dh
Therefore for each site, take xi for that site
i) generate |a0|<1 with dist
(1-a0^2)^0.5 e^{2 xi a0 } da0
Take alpha = 2 xi = 2 xi [ recall 2 beta/Nc unmod staple norm]; hence 2.0/Nc
factor in Chroma ]
A. Generate two uniformly distributed pseudo-random numbers R and R', R'',
R''' in the unit interval;
B. Set X = -(ln R)/alpha, X' =-(ln R')/alpha;
C. Set C = cos^2(2pi R"), with R" another uniform random number in [0,1] ;
D. Set A = XC;
E. Let d = X'+A;
F. If R'''^2 :> 1 - 0.5 d, go back to A;
G. Set a0 = 1 - d;
Note that in step D setting B ~ X - A and using B in place of A in step E will
generate a second independent a 0 value.
*/
/////////////////////////////////////////////////////////
// count the number of sites by picking "1"'s out of hat
/////////////////////////////////////////////////////////
Integer hit = 0;
LatticeReal rtmp(grid);
rtmp = where(wheremask, rones, rzeros);
RealD numSites = sum(rtmp);
RealD numAccepted;
LatticeInteger Accepted(grid);
Accepted = Zero();
LatticeInteger newlyAccepted(grid);
std::vector<LatticeReal> xr(4, grid);
std::vector<LatticeReal> a(4, grid);
LatticeReal d(grid);
d = Zero();
LatticeReal alpha(grid);
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
xi = 2.0 *xi;
alpha = toReal(xi);
do {
// A. Generate two uniformly distributed pseudo-random numbers R and R',
// R'', R''' in the unit interval;
random(pRNG, xr[0]);
random(pRNG, xr[1]);
random(pRNG, xr[2]);
random(pRNG, xr[3]);
// B. Set X = - ln R/alpha, X' = -ln R'/alpha
xr[1] = -log(xr[1]) / alpha;
xr[2] = -log(xr[2]) / alpha;
// C. Set C = cos^2(2piR'')
xr[3] = cos(xr[3] * twopi);
xr[3] = xr[3] * xr[3];
LatticeReal xrsq(grid);
// D. Set A = XC;
// E. Let d = X'+A;
xrsq = xr[2] + xr[1] * xr[3];
d = where(Accepted, d, xr[2] + xr[1] * xr[3]);
// F. If R'''^2 :> 1 - 0.5 d, go back to A;
LatticeReal thresh(grid);
thresh = 1.0 - d * 0.5;
xrsq = xr[0] * xr[0];
LatticeInteger ione(grid);
ione = 1;
LatticeInteger izero(grid);
izero = Zero();
newlyAccepted = where(xrsq < thresh, ione, izero);
Accepted = where(newlyAccepted, newlyAccepted, Accepted);
Accepted = where(wheremask, Accepted, izero);
// FIXME need an iSum for integer to avoid overload on return type??
rtmp = where(Accepted, rones, rzeros);
numAccepted = sum(rtmp);
hit++;
} while ((numAccepted < numSites) && (hit < nheatbath));
// G. Set a0 = 1 - d;
a[0] = Zero();
a[0] = where(wheremask, 1.0 - d, a[0]);
//////////////////////////////////////////
// ii) generate a_i uniform on two sphere radius (1-a0^2)^0.5
//////////////////////////////////////////
LatticeReal a123mag(grid);
a123mag = sqrt(abs(1.0 - a[0] * a[0]));
LatticeReal cos_theta(grid);
LatticeReal sin_theta(grid);
LatticeReal phi(grid);
random(pRNG, phi);
phi = phi * twopi; // uniform in [0,2pi]
random(pRNG, cos_theta);
cos_theta = (cos_theta * 2.0) - 1.0; // uniform in [-1,1]
sin_theta = sqrt(abs(1.0 - cos_theta * cos_theta));
a[1] = a123mag * sin_theta * cos(phi);
a[2] = a123mag * sin_theta * sin(phi);
a[3] = a123mag * cos_theta;
ua = toComplex(a[0]) * ident + toComplex(a[1]) * pauli1 +
toComplex(a[2]) * pauli2 + toComplex(a[3]) * pauli3;
b = 1.0;
b = where(wheremask, uinv * ua, b);
su2Insert(b, V, su2_subgroup);
// mask the assignment back based on Accptance
link = where(Accepted, V * link, link);
//////////////////////////////
// Debug Checks
// SU2 check
LatticeSU2Matrix check(grid); // rotated matrix after hb
u = Zero();
check = ua * adj(ua) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
check = b * adj(b) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
LatticeMatrix Vcheck(grid);
Vcheck = Zero();
Vcheck = where(Accepted, V * adj(V) - 1.0, Vcheck);
// std::cout<<GridLogMessage << "SU3 check " <<norm2(Vcheck)<<std::endl;
assert(norm2(Vcheck) < 1.0e-4);
// Verify the link stays in SU(3)
// std::cout<<GridLogMessage <<"Checking the modified link"<<std::endl;
Vcheck = link * adj(link) - 1.0;
assert(norm2(Vcheck) < 1.0e-4);
/////////////////////////////////
}
static void printGenerators(void) {
for (int gen = 0; gen < AdjointDimension; gen++) {
Matrix ta;
generator(gen, ta);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << ta << std::endl;
}
}
static void testGenerators(void) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
for (int b = 0; b < AdjointDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << " " << std::endl;
assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
// reunitarise??
template <typename LatticeMatrixType>
static void LieRandomize(GridParallelRNG &pRNG, LatticeMatrixType &out, double scale = 1.0)
{
GridBase *grid = out.Grid();
typedef typename LatticeMatrixType::vector_type vector_type;
typedef iSinglet<vector_type> vTComplexType;
typedef Lattice<vTComplexType> LatticeComplexType;
typedef typename GridTypeMapper<typename LatticeMatrixType::vector_object>::scalar_object MatrixType;
LatticeComplexType ca(grid);
LatticeMatrixType lie(grid);
LatticeMatrixType la(grid);
ComplexD ci(0.0, scale);
// ComplexD cone(1.0, 0.0);
MatrixType ta;
lie = Zero();
for (int a = 0; a < AdjointDimension; a++) {
random(pRNG, ca);
ca = (ca + conjugate(ca)) * 0.5;
ca = ca - 0.5;
generator(a, ta);
la = ci * ca * ta;
lie = lie + la; // e^{i la ta}
}
taExp(lie, out);
}
static void GaussianFundamentalLieAlgebraMatrix(GridParallelRNG &pRNG,
LatticeMatrix &out,
Real scale = 1.0) {
GridBase *grid = out.Grid();
LatticeReal ca(grid);
LatticeMatrix la(grid);
Complex ci(0.0, scale);
Matrix ta;
out = Zero();
for (int a = 0; a < AdjointDimension; a++) {
gaussian(pRNG, ca);
generator(a, ta);
la = toComplex(ca) * ta;
out += la;
}
out *= ci;
}
static void FundamentalLieAlgebraMatrix(const LatticeAlgebraVector &h,
LatticeMatrix &out,
Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeMatrix la(grid);
Matrix ta;
out = Zero();
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
la = peekColour(h, a) * timesI(ta) * scale;
out += la;
}
}
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename Gimpl>
static void GaugeTransform(typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
GridBase *grid = Umu.Grid();
conformable(grid,g.Grid());
typename Gimpl::GaugeLinkField U(grid);
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U= PeekIndex<LorentzIndex>(Umu,mu);
U = g*U*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
PokeIndex<LorentzIndex>(Umu,U,mu);
}
}
template<typename Gimpl>
static void GaugeTransform( std::vector<typename Gimpl::GaugeLinkField> &U, typename Gimpl::GaugeLinkField &g){
GridBase *grid = g.Grid();
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U[mu] = g*U[mu]*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
}
}
template<typename Gimpl>
static void RandomGaugeTransform(GridParallelRNG &pRNG, typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
LieRandomize(pRNG,g,1.0);
GaugeTransform<Gimpl>(Umu,g);
}
// Projects the algebra components a lattice matrix (of dimension ncol*ncol -1 )
// inverse operation: FundamentalLieAlgebraMatrix
static void projectOnAlgebra(LatticeAlgebraVector &h_out, const LatticeMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
Matrix Ta;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, Ta);
pokeColour(h_out, - 2.0 * (trace(timesI(Ta) * in)) * scale, a);
}
}
template <typename GaugeField>
static void HotConfiguration(GridParallelRNG &pRNG, GaugeField &out) {
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
LatticeMatrixType tmp(out.Grid());
for (int mu = 0; mu < Nd; mu++) {
// LieRandomize(pRNG, Umu, 1.0);
// PokeIndex<LorentzIndex>(out, Umu, mu);
gaussian(pRNG,Umu);
tmp = Ta(Umu);
taExp(tmp,Umu);
ProjectSUn(Umu);
PokeIndex<LorentzIndex>(out, Umu, mu);
}
}
template<typename GaugeField>
static void TepidConfiguration(GridParallelRNG &pRNG,GaugeField &out){
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
for(int mu=0;mu<Nd;mu++){
LieRandomize(pRNG,Umu,0.01);
PokeIndex<LorentzIndex>(out,Umu,mu);
}
}
template<typename GaugeField>
static void ColdConfiguration(GaugeField &out){
typedef typename GaugeField::vector_type vector_type;
typedef iSUnMatrix<vector_type> vMatrixType;
typedef Lattice<vMatrixType> LatticeMatrixType;
LatticeMatrixType Umu(out.Grid());
Umu=1.0;
for(int mu=0;mu<Nd;mu++){
PokeIndex<LorentzIndex>(out,Umu,mu);
}
}
template<typename GaugeField>
static void ColdConfiguration(GridParallelRNG &pRNG,GaugeField &out){
ColdConfiguration(out);
}
template<typename LatticeMatrixType>
static void taProj( const LatticeMatrixType &in, LatticeMatrixType &out){
out = Ta(in);
}
template <typename LatticeMatrixType>
static void taExp(const LatticeMatrixType &x, LatticeMatrixType &ex) {
typedef typename LatticeMatrixType::scalar_type ComplexType;
LatticeMatrixType xn(x.Grid());
RealD nfac = 1.0;
xn = x;
ex = xn + ComplexType(1.0); // 1+x
// Do a 12th order exponentiation
for (int i = 2; i <= 12; ++i) {
nfac = nfac / RealD(i); // 1/2, 1/2.3 ...
xn = xn * x; // x2, x3,x4....
ex = ex + xn * nfac; // x2/2!, x3/3!....
}
}
};
template<int N>
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > Inverse(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu)
{
GridBase *grid=Umu.Grid();
auto lvol = grid->lSites();
Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > ret(grid);
autoView(Umu_v,Umu,CpuRead);
autoView(ret_v,ret,CpuWrite);
thread_for(site,lvol,{
Eigen::MatrixXcd EigenU = Eigen::MatrixXcd::Zero(N,N);
Coordinate lcoor;
grid->LocalIndexToLocalCoor(site, lcoor);
iScalar<iScalar<iMatrix<ComplexD, N> > > Us;
iScalar<iScalar<iMatrix<ComplexD, N> > > Ui;
peekLocalSite(Us, Umu_v, lcoor);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
EigenU(i,j) = Us()()(i,j);
}}
Eigen::MatrixXcd EigenUinv = EigenU.inverse();
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
Ui()()(i,j) = EigenUinv(i,j);
}}
pokeLocalSite(Ui,ret_v,lcoor);
});
return ret;
}
// Explicit specialisation for SU(3).
// Explicit specialisation for SU(3).
static void
ProjectSU3 (Lattice<iScalar<iScalar<iMatrix<vComplexD, 3> > > > &Umu)
{
GridBase *grid=Umu.Grid();
const int x=0;
const int y=1;
const int z=2;
// Reunitarise
Umu = ProjectOnGroup(Umu);
autoView(Umu_v,Umu,CpuWrite);
thread_for(ss,grid->oSites(),{
auto cm = Umu_v[ss];
cm()()(2,x) = adj(cm()()(0,y)*cm()()(1,z)-cm()()(0,z)*cm()()(1,y)); //x= yz-zy
cm()()(2,y) = adj(cm()()(0,z)*cm()()(1,x)-cm()()(0,x)*cm()()(1,z)); //y= zx-xz
cm()()(2,z) = adj(cm()()(0,x)*cm()()(1,y)-cm()()(0,y)*cm()()(1,x)); //z= xy-yx
Umu_v[ss]=cm;
});
}
static void ProjectSU3(Lattice<iVector<iScalar<iMatrix<vComplexD, 3> >,Nd> > &U)
{
GridBase *grid=U.Grid();
// Reunitarise
for(int mu=0;mu<Nd;mu++){
auto Umu = PeekIndex<LorentzIndex>(U,mu);
Umu = ProjectOnGroup(Umu);
ProjectSU3(Umu);
PokeIndex<LorentzIndex>(U,Umu,mu);
}
}
typedef SU<2> SU2;
typedef SU<3> SU3;
typedef SU<4> SU4;
typedef SU<5> SU5;
typedef SU<Nc> FundamentalMatrices;
NAMESPACE_END(Grid);
#endif

578
Grid/qcd/utils/SUn.impl.h Normal file
View File

@ -0,0 +1,578 @@
// This file is #included into the body of the class template definition of
// GaugeGroup. So, image there to be
//
// template <int ncolour, class group_name>
// class GaugeGroup {
//
// around it.
//
// Please note that the unconventional file extension makes sure that it
// doesn't get found by the scripts/filelist during bootstrapping.
private:
template <ONLY_IF_SU>
static int su2subgroups(GroupName::SU) { return (ncolour * (ncolour - 1)) / 2; }
////////////////////////////////////////////////////////////////////////
// There are N^2-1 generators for SU(N).
//
// We take a traceless hermitian generator basis as follows
//
// * Normalisation: trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
// * Off diagonal
// - pairs of rows i1,i2 behaving like pauli matrices signma_x, sigma_y
//
// - there are (Nc-1-i1) slots for i2 on each row [ x 0 x ]
// direct count off each row
//
// - Sum of all pairs is Nc(Nc-1)/2: proof arithmetic series
//
// (Nc-1) + (Nc-2)+... 1 ==> Nc*(Nc-1)/2
// 1+ 2+ + + Nc-1
//
// - There are 2 x Nc (Nc-1)/ 2 of these = Nc^2 - Nc
//
// - We enumerate the row-col pairs.
// - for each row col pair there is a (sigma_x) and a (sigma_y) like
// generator
//
//
// t^a_ij = { in 0.. Nc(Nc-1)/2 -1} => 1/2(delta_{i,i1} delta_{j,i2} +
// delta_{i,i1} delta_{j,i2})
// t^a_ij = { in Nc(Nc-1)/2 ... Nc(Nc-1) - 1} => i/2( delta_{i,i1}
// delta_{j,i2} - i delta_{i,i1} delta_{j,i2})
//
// * Diagonal; must be traceless and normalised
// - Sequence is
// N (1,-1,0,0...)
// N (1, 1,-2,0...)
// N (1, 1, 1,-3,0...)
// N (1, 1, 1, 1,-4,0...)
//
// where 1/2 = N^2 (1+.. m^2)etc.... for the m-th diagonal generator
// NB this gives the famous SU3 result for su2 index 8
//
// N= sqrt(1/2 . 1/6 ) = 1/2 . 1/sqrt(3)
//
// ( 1 )
// ( 1 ) / sqrt(3) /2 = 1/2 lambda_8
// ( -2)
//
////////////////////////////////////////////////////////////////////////
template <class cplx, ONLY_IF_SU>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta, GroupName::SU) {
// map lie index to which type of generator
int diagIndex;
int su2Index;
int sigxy;
int NNm1 = ncolour * (ncolour - 1);
if (lieIndex >= NNm1) {
diagIndex = lieIndex - NNm1;
generatorDiagonal(diagIndex, ta);
return;
}
sigxy = lieIndex & 0x1; // even or odd
su2Index = lieIndex >> 1;
if (sigxy)
generatorSigmaY(su2Index, ta);
else
generatorSigmaX(su2Index, ta);
}
template <class cplx, ONLY_IF_SU>
static void generatorSigmaY(int su2Index, iGroupMatrix<cplx> &ta) {
ta = Zero();
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = 1.0;
ta()()(i2, i1) = 1.0;
ta = ta * 0.5;
}
template <class cplx, ONLY_IF_SU>
static void generatorSigmaX(int su2Index, iGroupMatrix<cplx> &ta) {
ta = Zero();
cplx i(0.0, 1.0);
int i1, i2;
su2SubGroupIndex(i1, i2, su2Index);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta = ta * 0.5;
}
template <class cplx, ONLY_IF_SU>
static void generatorDiagonal(int diagIndex, iGroupMatrix<cplx> &ta) {
// diag ({1, 1, ..., 1}(k-times), -k, 0, 0, ...)
ta = Zero();
int k = diagIndex + 1; // diagIndex starts from 0
for (int i = 0; i <= diagIndex; i++) { // k iterations
ta()()(i, i) = 1.0;
}
ta()()(k, k) = -k; // indexing starts from 0
RealD nrm = 1.0 / std::sqrt(2.0 * k * (k + 1));
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
static void su2SubGroupIndex(int &i1, int &i2, int su2_index, GroupName::SU) {
assert((su2_index >= 0) && (su2_index < (ncolour * (ncolour - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (ncolour - 1 - i1); i1++) {
spare = spare - (ncolour - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
public:
//////////////////////////////////////////////////////////////////////////////////////////
// Pull out a subgroup and project on to real coeffs x pauli basis
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx, ONLY_IF_SU>
static void su2Extract(Lattice<iSinglet<vcplx> > &Determinant,
Lattice<iSU2Matrix<vcplx> > &subgroup,
const Lattice<iGroupMatrix<vcplx> > &source,
int su2_index) {
GridBase *grid(source.Grid());
conformable(subgroup, source);
conformable(subgroup, Determinant);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
autoView(subgroup_v, subgroup, AcceleratorWrite);
autoView(source_v, source, AcceleratorRead);
autoView(Determinant_v, Determinant, AcceleratorWrite);
accelerator_for(ss, grid->oSites(), 1, {
subgroup_v[ss]()()(0, 0) = source_v[ss]()()(i0, i0);
subgroup_v[ss]()()(0, 1) = source_v[ss]()()(i0, i1);
subgroup_v[ss]()()(1, 0) = source_v[ss]()()(i1, i0);
subgroup_v[ss]()()(1, 1) = source_v[ss]()()(i1, i1);
iSU2Matrix<vcplx> Sigma = subgroup_v[ss];
Sigma = Sigma - adj(Sigma) + trace(adj(Sigma));
subgroup_v[ss] = Sigma;
// this should be purely real
Determinant_v[ss] =
Sigma()()(0, 0) * Sigma()()(1, 1) - Sigma()()(0, 1) * Sigma()()(1, 0);
});
}
//////////////////////////////////////////////////////////////////////////////////////////
// Set matrix to one and insert a pauli subgroup
//////////////////////////////////////////////////////////////////////////////////////////
template <class vcplx, ONLY_IF_SU>
static void su2Insert(const Lattice<iSU2Matrix<vcplx> > &subgroup,
Lattice<iGroupMatrix<vcplx> > &dest, int su2_index) {
GridBase *grid(dest.Grid());
conformable(subgroup, dest);
int i0, i1;
su2SubGroupIndex(i0, i1, su2_index);
dest = 1.0; // start out with identity
autoView(dest_v, dest, AcceleratorWrite);
autoView(subgroup_v, subgroup, AcceleratorRead);
accelerator_for(ss, grid->oSites(), 1, {
dest_v[ss]()()(i0, i0) = subgroup_v[ss]()()(0, 0);
dest_v[ss]()()(i0, i1) = subgroup_v[ss]()()(0, 1);
dest_v[ss]()()(i1, i0) = subgroup_v[ss]()()(1, 0);
dest_v[ss]()()(i1, i1) = subgroup_v[ss]()()(1, 1);
});
}
///////////////////////////////////////////////
// Generate e^{ Re Tr Staple Link} dlink
//
// *** Note Staple should be appropriate linear compbination between all
// staples.
// *** If already by beta pass coefficient 1.0.
// *** This routine applies the additional 1/Nc factor that comes after trace
// in action.
//
///////////////////////////////////////////////
template <ONLY_IF_SU>
static void SubGroupHeatBath(
GridSerialRNG &sRNG, GridParallelRNG &pRNG,
RealD beta, // coeff multiplying staple in action (with no 1/Nc)
LatticeMatrix &link,
const LatticeMatrix &barestaple, // multiplied by action coeffs so th
int su2_subgroup, int nheatbath, LatticeInteger &wheremask) {
GridBase *grid = link.Grid();
const RealD twopi = 2.0 * M_PI;
LatticeMatrix staple(grid);
staple = barestaple * (beta / ncolour);
LatticeMatrix V(grid);
V = link * staple;
// Subgroup manipulation in the lie algebra space
LatticeSU2Matrix u(
grid); // Kennedy pendleton "u" real projected normalised Sigma
LatticeSU2Matrix uinv(grid);
LatticeSU2Matrix ua(grid); // a in pauli form
LatticeSU2Matrix b(grid); // rotated matrix after hb
// Some handy constant fields
LatticeComplex ones(grid);
ones = 1.0;
LatticeComplex zeros(grid);
zeros = Zero();
LatticeReal rones(grid);
rones = 1.0;
LatticeReal rzeros(grid);
rzeros = Zero();
LatticeComplex udet(grid); // determinant of real(staple)
LatticeInteger mask_true(grid);
mask_true = 1;
LatticeInteger mask_false(grid);
mask_false = 0;
/*
PLB 156 P393 (1985) (Kennedy and Pendleton)
Note: absorb "beta" into the def of sigma compared to KP paper; staple
passed to this routine has "beta" already multiplied in
Action linear in links h and of form:
beta S = beta Sum_p (1 - 1/Nc Re Tr Plaq )
Writing Sigma = 1/Nc (beta Sigma') where sum over staples is "Sigma' "
beta S = const - beta/Nc Re Tr h Sigma'
= const - Re Tr h Sigma
Decompose h and Sigma into (1, sigma_j) ; h_i real, h^2=1, Sigma_i complex
arbitrary.
Tr h Sigma = h_i Sigma_j Tr (sigma_i sigma_j) = h_i Sigma_j 2 delta_ij
Re Tr h Sigma = 2 h_j Re Sigma_j
Normalised re Sigma_j = xi u_j
With u_j a unit vector and U can be in SU(2);
Re Tr h Sigma = 2 h_j Re Sigma_j = 2 xi (h.u)
4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
xi = sqrt(Det)/2;
Write a= u h in SU(2); a has pauli decomp a_j;
Note: Product b' xi is unvariant because scaling Sigma leaves
normalised vector "u" fixed; Can rescale Sigma so b' = 1.
*/
////////////////////////////////////////////////////////
// Real part of Pauli decomposition
// Note a subgroup can project to zero in cold start
////////////////////////////////////////////////////////
su2Extract(udet, u, V, su2_subgroup);
//////////////////////////////////////////////////////
// Normalising this vector if possible; else identity
//////////////////////////////////////////////////////
LatticeComplex xi(grid);
LatticeSU2Matrix lident(grid);
SU2Matrix ident = Complex(1.0);
SU2Matrix pauli1;
GaugeGroup<2, GroupName::SU>::generator(0, pauli1);
SU2Matrix pauli2;
GaugeGroup<2, GroupName::SU>::generator(1, pauli2);
SU2Matrix pauli3;
GaugeGroup<2, GroupName::SU>::generator(2, pauli3);
pauli1 = timesI(pauli1) * 2.0;
pauli2 = timesI(pauli2) * 2.0;
pauli3 = timesI(pauli3) * 2.0;
LatticeComplex cone(grid);
LatticeReal adet(grid);
adet = abs(toReal(udet));
lident = Complex(1.0);
cone = Complex(1.0);
Real machine_epsilon = 1.0e-7;
u = where(adet > machine_epsilon, u, lident);
udet = where(adet > machine_epsilon, udet, cone);
xi = 0.5 * sqrt(udet); // 4xi^2 = Det [ Sig - Sig^dag + 1 Tr Sigdag]
u = 0.5 * u * pow(xi, -1.0); // u = 1/2xi [ Sig - Sig^dag + 1 Tr Sigdag]
// Debug test for sanity
uinv = adj(u);
b = u * uinv - 1.0;
assert(norm2(b) < 1.0e-4);
/*
Measure: Haar measure dh has d^4a delta(1-|a^2|)
In polars:
da = da0 r^2 sin theta dr dtheta dphi delta( 1 - r^2 -a0^2)
= da0 r^2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r)(sqrt(1-a0^) +
r) )
= da0 r/2 sin theta dr dtheta dphi delta( (sqrt(1-a0^) - r) )
Action factor Q(h) dh = e^-S[h] dh = e^{ xi Tr uh} dh // beta
enters through xi = e^{2 xi (h.u)} dh = e^{2 xi h0u0}.e^{2 xi h1u1}.e^{2
xi h2u2}.e^{2 xi h3u3} dh
Therefore for each site, take xi for that site
i) generate |a0|<1 with dist
(1-a0^2)^0.5 e^{2 xi a0 } da0
Take alpha = 2 xi = 2 xi [ recall 2 beta/Nc unmod staple norm];
hence 2.0/Nc factor in Chroma ] A. Generate two uniformly distributed
pseudo-random numbers R and R', R'', R''' in the unit interval; B. Set X =
-(ln R)/alpha, X' =-(ln R')/alpha; C. Set C = cos^2(2pi R"), with R"
another uniform random number in [0,1] ; D. Set A = XC; E. Let d = X'+A;
F. If R'''^2 :> 1 - 0.5 d, go back to A;
G. Set a0 = 1 - d;
Note that in step D setting B ~ X - A and using B in place of A in step E
will generate a second independent a 0 value.
*/
/////////////////////////////////////////////////////////
// count the number of sites by picking "1"'s out of hat
/////////////////////////////////////////////////////////
Integer hit = 0;
LatticeReal rtmp(grid);
rtmp = where(wheremask, rones, rzeros);
RealD numSites = sum(rtmp);
RealD numAccepted;
LatticeInteger Accepted(grid);
Accepted = Zero();
LatticeInteger newlyAccepted(grid);
std::vector<LatticeReal> xr(4, grid);
std::vector<LatticeReal> a(4, grid);
LatticeReal d(grid);
d = Zero();
LatticeReal alpha(grid);
// std::cout<<GridLogMessage<<"xi "<<xi <<std::endl;
xi = 2.0 * xi;
alpha = toReal(xi);
do {
// A. Generate two uniformly distributed pseudo-random numbers R and R',
// R'', R''' in the unit interval;
random(pRNG, xr[0]);
random(pRNG, xr[1]);
random(pRNG, xr[2]);
random(pRNG, xr[3]);
// B. Set X = - ln R/alpha, X' = -ln R'/alpha
xr[1] = -log(xr[1]) / alpha;
xr[2] = -log(xr[2]) / alpha;
// C. Set C = cos^2(2piR'')
xr[3] = cos(xr[3] * twopi);
xr[3] = xr[3] * xr[3];
LatticeReal xrsq(grid);
// D. Set A = XC;
// E. Let d = X'+A;
xrsq = xr[2] + xr[1] * xr[3];
d = where(Accepted, d, xr[2] + xr[1] * xr[3]);
// F. If R'''^2 :> 1 - 0.5 d, go back to A;
LatticeReal thresh(grid);
thresh = 1.0 - d * 0.5;
xrsq = xr[0] * xr[0];
LatticeInteger ione(grid);
ione = 1;
LatticeInteger izero(grid);
izero = Zero();
newlyAccepted = where(xrsq < thresh, ione, izero);
Accepted = where(newlyAccepted, newlyAccepted, Accepted);
Accepted = where(wheremask, Accepted, izero);
// FIXME need an iSum for integer to avoid overload on return type??
rtmp = where(Accepted, rones, rzeros);
numAccepted = sum(rtmp);
hit++;
} while ((numAccepted < numSites) && (hit < nheatbath));
// G. Set a0 = 1 - d;
a[0] = Zero();
a[0] = where(wheremask, 1.0 - d, a[0]);
//////////////////////////////////////////
// ii) generate a_i uniform on two sphere radius (1-a0^2)^0.5
//////////////////////////////////////////
LatticeReal a123mag(grid);
a123mag = sqrt(abs(1.0 - a[0] * a[0]));
LatticeReal cos_theta(grid);
LatticeReal sin_theta(grid);
LatticeReal phi(grid);
random(pRNG, phi);
phi = phi * twopi; // uniform in [0,2pi]
random(pRNG, cos_theta);
cos_theta = (cos_theta * 2.0) - 1.0; // uniform in [-1,1]
sin_theta = sqrt(abs(1.0 - cos_theta * cos_theta));
a[1] = a123mag * sin_theta * cos(phi);
a[2] = a123mag * sin_theta * sin(phi);
a[3] = a123mag * cos_theta;
ua = toComplex(a[0]) * ident + toComplex(a[1]) * pauli1 +
toComplex(a[2]) * pauli2 + toComplex(a[3]) * pauli3;
b = 1.0;
b = where(wheremask, uinv * ua, b);
su2Insert(b, V, su2_subgroup);
// mask the assignment back based on Accptance
link = where(Accepted, V * link, link);
//////////////////////////////
// Debug Checks
// SU2 check
LatticeSU2Matrix check(grid); // rotated matrix after hb
u = Zero();
check = ua * adj(ua) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
check = b * adj(b) - 1.0;
check = where(Accepted, check, u);
assert(norm2(check) < 1.0e-4);
LatticeMatrix Vcheck(grid);
Vcheck = Zero();
Vcheck = where(Accepted, V * adj(V) - 1.0, Vcheck);
// std::cout<<GridLogMessage << "SU3 check " <<norm2(Vcheck)<<std::endl;
assert(norm2(Vcheck) < 1.0e-4);
// Verify the link stays in SU(3)
// std::cout<<GridLogMessage <<"Checking the modified link"<<std::endl;
Vcheck = link * adj(link) - 1.0;
assert(norm2(Vcheck) < 1.0e-4);
/////////////////////////////////
}
template <ONLY_IF_SU>
static void testGenerators(GroupName::SU) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
for (int b = 0; b < AdjointDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AdjointDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << " " << std::endl;
assert(abs(tr) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
}
template <int N, class vtype>
static Lattice<iScalar<iScalar<iMatrix<vtype, N> > > >
ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vtype, N> > > > &Umu, GroupName::SU) {
return ProjectOnGroup(Umu);
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r, GroupName::SU) {
return ProjectOnGroup(r);
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r, GroupName::SU) {
return ProjectOnGroup(r);
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg, GroupName::SU) {
return ProjectOnGroup(arg);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out, GroupName::SU) {
out = Ta(in);
}
/*
* Fundamental rep gauge xform
*/
template<typename Fundamental,typename GaugeMat>
static void GaugeTransformFundamental( Fundamental &ferm, GaugeMat &g){
GridBase *grid = ferm._grid;
conformable(grid,g._grid);
ferm = g*ferm;
}
/*
* Adjoint rep gauge xform
*/
template<typename Gimpl>
static void GaugeTransform(typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
GridBase *grid = Umu.Grid();
conformable(grid,g.Grid());
typename Gimpl::GaugeLinkField U(grid);
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U= PeekIndex<LorentzIndex>(Umu,mu);
U = g*U*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
PokeIndex<LorentzIndex>(Umu,U,mu);
}
}
template<typename Gimpl>
static void GaugeTransform( std::vector<typename Gimpl::GaugeLinkField> &U, typename Gimpl::GaugeLinkField &g){
GridBase *grid = g.Grid();
typename Gimpl::GaugeLinkField ag(grid); ag = adj(g);
for(int mu=0;mu<Nd;mu++){
U[mu] = g*U[mu]*Gimpl::CshiftLink(ag, mu, 1); //BC-aware
}
}
template<typename Gimpl>
static void RandomGaugeTransform(GridParallelRNG &pRNG, typename Gimpl::GaugeField &Umu, typename Gimpl::GaugeLinkField &g){
LieRandomize(pRNG,g,1.0);
GaugeTransform<Gimpl>(Umu,g);
}

View File

@ -51,6 +51,10 @@ public:
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> > LatticeAdjFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> > LatticeAdjFieldD;
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef Lattice<iScalar<iScalar<iVector<vComplex, Dimension> > > > LatticeAdjVector;
template <class cplx>
@ -58,8 +62,8 @@ public:
// returns i(T_Adj)^index necessary for the projectors
// see definitions above
iAdjTa = Zero();
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > ta(ncolour * ncolour - 1);
typename SU<ncolour>::template iSUnMatrix<cplx> tmp;
Vector<iSUnMatrix<cplx> > ta(ncolour * ncolour - 1);
iSUnMatrix<cplx> tmp;
// FIXME not very efficient to get all the generators everytime
for (int a = 0; a < Dimension; a++) SU<ncolour>::generator(a, ta[a]);
@ -67,8 +71,7 @@ public:
for (int a = 0; a < Dimension; a++) {
tmp = ta[a] * ta[Index] - ta[Index] * ta[a];
for (int b = 0; b < (ncolour * ncolour - 1); b++) {
typename SU<ncolour>::template iSUnMatrix<cplx> tmp1 =
2.0 * tmp * ta[b]; // 2.0 from the normalization
iSUnMatrix<cplx> tmp1 = 2.0 * tmp * ta[b]; // 2.0 from the normalization
Complex iTr = TensorRemove(timesI(trace(tmp1)));
//iAdjTa()()(b, a) = iTr;
iAdjTa()()(a, b) = iTr;
@ -134,8 +137,7 @@ public:
for (int a = 0; a < Dimension; a++) {
generator(a, iTa);
LatticeComplex tmp = real(trace(iTa * in)) * coefficient;
pokeColour(h_out, tmp, a);
pokeColour(h_out, real(trace(iTa * in)) * coefficient, a);
}
}

View File

@ -1,273 +0,0 @@
////////////////////////////////////////////////////////////////////////
//
// * Two index representation generators
//
// * Normalisation for the fundamental generators:
// trace ta tb = 1/2 delta_ab = T_F delta_ab
// T_F = 1/2 for SU(N) groups
//
//
// base for NxN two index (anti-symmetric) matrices
// normalized to 1 (d_ij is the kroenecker delta)
//
// (e^(ij)_{kl} = 1 / sqrt(2) (d_ik d_jl +/- d_jk d_il)
//
// Then the generators are written as
//
// (iT_a)^(ij)(lk) = i * ( tr[e^(ij)^dag e^(lk) T^trasp_a] +
// tr[e^(lk)e^(ij)^dag T_a] ) //
//
//
////////////////////////////////////////////////////////////////////////
// Authors: David Preti, Guido Cossu
#ifndef QCD_UTIL_SUN2INDEX_H
#define QCD_UTIL_SUN2INDEX_H
NAMESPACE_BEGIN(Grid);
enum TwoIndexSymmetry { Symmetric = 1, AntiSymmetric = -1 };
inline Real delta(int a, int b) { return (a == b) ? 1.0 : 0.0; }
template <int ncolour, TwoIndexSymmetry S>
class SU_TwoIndex : public SU<ncolour> {
public:
static const int Dimension = ncolour * (ncolour + S) / 2;
static const int NumGenerators = SU<ncolour>::AdjointDimension;
template <typename vtype>
using iSUnTwoIndexMatrix = iScalar<iScalar<iMatrix<vtype, Dimension> > >;
typedef iSUnTwoIndexMatrix<Complex> TIMatrix;
typedef iSUnTwoIndexMatrix<ComplexF> TIMatrixF;
typedef iSUnTwoIndexMatrix<ComplexD> TIMatrixD;
typedef iSUnTwoIndexMatrix<vComplex> vTIMatrix;
typedef iSUnTwoIndexMatrix<vComplexF> vTIMatrixF;
typedef iSUnTwoIndexMatrix<vComplexD> vTIMatrixD;
typedef Lattice<vTIMatrix> LatticeTwoIndexMatrix;
typedef Lattice<vTIMatrixF> LatticeTwoIndexMatrixF;
typedef Lattice<vTIMatrixD> LatticeTwoIndexMatrixD;
typedef Lattice<iVector<iScalar<iMatrix<vComplex, Dimension> >, Nd> >
LatticeTwoIndexField;
typedef Lattice<iVector<iScalar<iMatrix<vComplexF, Dimension> >, Nd> >
LatticeTwoIndexFieldF;
typedef Lattice<iVector<iScalar<iMatrix<vComplexD, Dimension> >, Nd> >
LatticeTwoIndexFieldD;
template <typename vtype>
using iSUnMatrix = iScalar<iScalar<iMatrix<vtype, ncolour> > >;
typedef iSUnMatrix<Complex> Matrix;
typedef iSUnMatrix<ComplexF> MatrixF;
typedef iSUnMatrix<ComplexD> MatrixD;
template <class cplx>
static void base(int Index, iSUnMatrix<cplx> &eij) {
// returns (e)^(ij)_{kl} necessary for change of base U_F -> U_R
assert(Index < NumGenerators);
eij = Zero();
// for the linearisation of the 2 indexes
static int a[ncolour * (ncolour - 1) / 2][2]; // store the a <-> i,j
static bool filled = false;
if (!filled) {
int counter = 0;
for (int i = 1; i < ncolour; i++) {
for (int j = 0; j < i; j++) {
a[counter][0] = i;
a[counter][1] = j;
counter++;
}
}
filled = true;
}
if (Index < ncolour * (ncolour - 1) / 2) {
baseOffDiagonal(a[Index][0], a[Index][1], eij);
} else {
baseDiagonal(Index, eij);
}
}
template <class cplx>
static void baseDiagonal(int Index, iSUnMatrix<cplx> &eij) {
eij = Zero();
eij()()(Index - ncolour * (ncolour - 1) / 2,
Index - ncolour * (ncolour - 1) / 2) = 1.0;
}
template <class cplx>
static void baseOffDiagonal(int i, int j, iSUnMatrix<cplx> &eij) {
eij = Zero();
for (int k = 0; k < ncolour; k++)
for (int l = 0; l < ncolour; l++)
eij()()(l, k) = delta(i, k) * delta(j, l) +
S * delta(j, k) * delta(i, l);
RealD nrm = 1. / std::sqrt(2.0);
eij = eij * nrm;
}
static void printBase(void) {
for (int gen = 0; gen < Dimension; gen++) {
Matrix tmp;
base(gen, tmp);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << tmp << std::endl;
}
}
template <class cplx>
static void generator(int Index, iSUnTwoIndexMatrix<cplx> &i2indTa) {
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > ta(
ncolour * ncolour - 1);
Vector<typename SU<ncolour>::template iSUnMatrix<cplx> > eij(Dimension);
typename SU<ncolour>::template iSUnMatrix<cplx> tmp;
i2indTa = Zero();
for (int a = 0; a < ncolour * ncolour - 1; a++)
SU<ncolour>::generator(a, ta[a]);
for (int a = 0; a < Dimension; a++) base(a, eij[a]);
for (int a = 0; a < Dimension; a++) {
tmp = transpose(ta[Index]) * adj(eij[a]) + adj(eij[a]) * ta[Index];
for (int b = 0; b < Dimension; b++) {
typename SU<ncolour>::template iSUnMatrix<cplx> tmp1 =
tmp * eij[b];
Complex iTr = TensorRemove(timesI(trace(tmp1)));
i2indTa()()(a, b) = iTr;
}
}
}
static void printGenerators(void) {
for (int gen = 0; gen < ncolour * ncolour - 1; gen++) {
TIMatrix i2indTa;
generator(gen, i2indTa);
std::cout << GridLogMessage << "Nc = " << ncolour << " t_" << gen
<< std::endl;
std::cout << GridLogMessage << i2indTa << std::endl;
}
}
static void testGenerators(void) {
TIMatrix i2indTa, i2indTb;
std::cout << GridLogMessage << "2IndexRep - Checking if traceless"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(trace(i2indTa)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "2IndexRep - Checking if antihermitean"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(adj(i2indTa) + i2indTa) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage
<< "2IndexRep - Checking Tr[Ta*Tb]=delta(a,b)*(N +- 2)/2"
<< std::endl;
for (int a = 0; a < ncolour * ncolour - 1; a++) {
for (int b = 0; b < ncolour * ncolour - 1; b++) {
generator(a, i2indTa);
generator(b, i2indTb);
// generator returns iTa, so we need a minus sign here
Complex Tr = -TensorRemove(trace(i2indTa * i2indTb));
std::cout << GridLogMessage << "a=" << a << "b=" << b << "Tr=" << Tr
<< std::endl;
}
}
std::cout << GridLogMessage << std::endl;
}
static void TwoIndexLieAlgebraMatrix(
const typename SU<ncolour>::LatticeAlgebraVector &h,
LatticeTwoIndexMatrix &out, Real scale = 1.0) {
conformable(h, out);
GridBase *grid = out.Grid();
LatticeTwoIndexMatrix la(grid);
TIMatrix i2indTa;
out = Zero();
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
la = peekColour(h, a) * i2indTa;
out += la;
}
out *= scale;
}
// Projects the algebra components
// of a lattice matrix ( of dimension ncol*ncol -1 )
static void projectOnAlgebra(
typename SU<ncolour>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
h_out = Zero();
TIMatrix i2indTa;
Real coefficient = -2.0 / (ncolour + 2 * S) * scale;
// 2/(Nc +/- 2) for the normalization of the trace in the two index rep
for (int a = 0; a < ncolour * ncolour - 1; a++) {
generator(a, i2indTa);
auto tmp = real(trace(i2indTa * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
// a projector that keeps the generators stored to avoid the overhead of
// recomputing them
static void projector(typename SU<ncolour>::LatticeAlgebraVector &h_out,
const LatticeTwoIndexMatrix &in, Real scale = 1.0) {
conformable(h_out, in);
// to store the generators
static std::vector<TIMatrix> i2indTa(ncolour * ncolour -1);
h_out = Zero();
static bool precalculated = false;
if (!precalculated) {
precalculated = true;
for (int a = 0; a < ncolour * ncolour - 1; a++) generator(a, i2indTa[a]);
}
Real coefficient =
-2.0 / (ncolour + 2 * S) * scale; // 2/(Nc +/- 2) for the normalization
// of the trace in the two index rep
for (int a = 0; a < ncolour * ncolour - 1; a++) {
auto tmp = real(trace(i2indTa[a] * in)) * coefficient;
pokeColour(h_out, tmp, a);
}
}
};
// Some useful type names
typedef SU_TwoIndex<Nc, Symmetric> TwoIndexSymmMatrices;
typedef SU_TwoIndex<Nc, AntiSymmetric> TwoIndexAntiSymmMatrices;
typedef SU_TwoIndex<2, Symmetric> SU2TwoIndexSymm;
typedef SU_TwoIndex<3, Symmetric> SU3TwoIndexSymm;
typedef SU_TwoIndex<4, Symmetric> SU4TwoIndexSymm;
typedef SU_TwoIndex<5, Symmetric> SU5TwoIndexSymm;
typedef SU_TwoIndex<2, AntiSymmetric> SU2TwoIndexAntiSymm;
typedef SU_TwoIndex<3, AntiSymmetric> SU3TwoIndexAntiSymm;
typedef SU_TwoIndex<4, AntiSymmetric> SU4TwoIndexAntiSymm;
typedef SU_TwoIndex<5, AntiSymmetric> SU5TwoIndexAntiSymm;
NAMESPACE_END(Grid);
#endif

317
Grid/qcd/utils/Sp2n.impl.h Normal file
View File

@ -0,0 +1,317 @@
// This file is #included into the body of the class template definition of
// GaugeGroup. So, image there to be
//
// template <int ncolour, class group_name>
// class GaugeGroup {
//
// around it.
//
// Please note that the unconventional file extension makes sure that it
// doesn't get found by the scripts/filelist during bootstrapping.
private:
template <ONLY_IF_Sp>
static int su2subgroups(GroupName::Sp) { return (ncolour/2 * (ncolour/2 - 1)) / 2; }
// Sp(2N) has N(2N+1) = 2N^2+N generators
//
// normalise the generators such that
// Trace ( Ta Tb) = 1/2 delta_ab
//
// N generators in the cartan, 2N^2 off
// off diagonal:
// there are 6 types named a,b,c,d and w,z
// abcd are N(N-1)/2 each while wz are N each
template <class cplx, ONLY_IF_Sp>
static void generator(int lieIndex, iGroupMatrix<cplx> &ta, GroupName::Sp) {
// map lie index into type of generators: diagonal, abcd type, wz type
const int nsp = ncolour/2;
int diagIndex;
int aIndex, bIndex, cIndex, dIndex;
int wIndex, zIndex; // a,b,c,d are N(N-1)/2 and w,z are N
const int mod = nsp * (nsp - 1) * 0.5;
const int offdiag =
2 * nsp * nsp; // number of generators not in the cartan subalgebra
const int wmod = 4 * mod;
const int zmod = wmod + nsp;
if (lieIndex >= offdiag) {
diagIndex = lieIndex - offdiag; // 0, ... ,N-1
// std::cout << GridLogMessage << "diag type " << std::endl;
generatorDiagtype(diagIndex, ta);
return;
}
if ((lieIndex >= wmod) && (lieIndex < zmod)) {
// std::cout << GridLogMessage << "w type " << std::endl;
wIndex = lieIndex - wmod; // 0, ... ,N-1
generatorWtype(wIndex, ta);
return;
}
if ((lieIndex >= zmod) && (lieIndex < offdiag)) {
// std::cout << GridLogMessage << "z type " << std::endl;
// std::cout << GridLogMessage << "lie index " << lieIndex << std::endl;
// std::cout << GridLogMessage << "z mod " << zmod << std::endl;
zIndex = lieIndex - zmod; // 0, ... ,N-1
generatorZtype(zIndex, ta);
return;
}
if (lieIndex < mod) { // atype 0, ... , N(N-1)/2=mod
// std::cout << GridLogMessage << "a type " << std::endl;
aIndex = lieIndex;
// std::cout << GridLogMessage << "a indx " << aIndex << std::endl;
generatorAtype(aIndex, ta);
return;
}
if ((lieIndex >= mod) && lieIndex < 2 * mod) { // btype mod, ... , 2mod-1
// std::cout << GridLogMessage << "b type " << std::endl;
bIndex = lieIndex - mod;
generatorBtype(bIndex, ta);
return;
}
if ((lieIndex >= 2 * mod) &&
lieIndex < 3 * mod) { // ctype 2mod, ... , 3mod-1
// std::cout << GridLogMessage << "c type " << std::endl;
cIndex = lieIndex - 2 * mod;
generatorCtype(cIndex, ta);
return;
}
if ((lieIndex >= 3 * mod) &&
lieIndex < wmod) { // ctype 3mod, ... , 4mod-1 = wmod-1
// std::cout << GridLogMessage << "d type " << std::endl;
dIndex = lieIndex - 3 * mod;
generatorDtype(dIndex, ta);
return;
}
} // end of generator
template <class cplx, ONLY_IF_Sp>
static void generatorDiagtype(int diagIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i) = - ta(i+N,i+N) = 1/2 for each i index of the cartan subalgebra
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2;
ta()()(diagIndex, diagIndex) = nrm;
ta()()(diagIndex + nsp, diagIndex + nsp) = -nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorAtype(int aIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j) = ta(j,i) = -ta(i+N,j+N) = -ta(j+N,i+N) = 1 / 2 sqrt(2)
// with i<j and i=0,...,N-2
// follows that j=i+1, ... , N
int i1, i2;
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, aIndex);
ta()()(i1, i2) = 1;
ta()()(i2, i1) = 1;
ta()()(i1 + nsp, i2 + nsp) = -1;
ta()()(i2 + nsp, i1 + nsp) = -1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorBtype(int bIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j) = -ta(j,i) = ta(i+N,j+N) = -ta(j+N,i+N) = i / 1/ 2 sqrt(2)
// with i<j and i=0,...,N-2
// follows that j=i+1, ... , N-1
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
cplx i(0.0, 1.0);
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, bIndex);
ta()()(i1, i2) = i;
ta()()(i2, i1) = -i;
ta()()(i1 + nsp, i2 + nsp) = i;
ta()()(i2 + nsp, i1 + nsp) = -i;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorCtype(int cIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j+N) = ta(j,i+N) = ta(i+N,j) = ta(j+N,i) = 1 / 2 sqrt(2)
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, cIndex);
ta()()(i1, i2 + nsp) = 1;
ta()()(i2, i1 + nsp) = 1;
ta()()(i1 + nsp, i2) = 1;
ta()()(i2 + nsp, i1) = 1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorDtype(int dIndex, iGroupMatrix<cplx> &ta) {
// ta(i,j+N) = ta(j,i+N) = -ta(i+N,j) = -ta(j+N,i) = i / 2 sqrt(2)
const int nsp=ncolour/2;
int i1, i2;
ta = Zero();
cplx i(0.0, 1.0);
RealD nrm = 1 / (2 * std::sqrt(2));
su2SubGroupIndex(i1, i2, dIndex);
ta()()(i1, i2 + nsp) = i;
ta()()(i2, i1 + nsp) = i;
ta()()(i1 + nsp, i2) = -i;
ta()()(i2 + nsp, i1) = -i;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorWtype(int wIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i+N) = ta(i+N,i) = 1/2
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2; // check
ta()()(wIndex, wIndex + nsp) = 1;
ta()()(wIndex + nsp, wIndex) = 1;
ta = ta * nrm;
}
template <class cplx, ONLY_IF_Sp>
static void generatorZtype(int zIndex, iGroupMatrix<cplx> &ta) {
// ta(i,i+N) = - ta(i+N,i) = i/2
const int nsp=ncolour/2;
ta = Zero();
RealD nrm = 1.0 / 2; // check
cplx i(0.0, 1.0);
ta()()(zIndex, zIndex + nsp) = i;
ta()()(zIndex + nsp, zIndex) = -i;
ta = ta * nrm;
}
////////////////////////////////////////////////////////////////////////
// Map a su2 subgroup number to the pair of rows that are non zero
////////////////////////////////////////////////////////////////////////
template <ONLY_IF_Sp>
static void su2SubGroupIndex(int &i1, int &i2, int su2_index, GroupName::Sp) {
const int nsp=ncolour/2;
assert((su2_index >= 0) && (su2_index < (nsp * (nsp - 1)) / 2));
int spare = su2_index;
for (i1 = 0; spare >= (nsp - 1 - i1); i1++) {
spare = spare - (nsp - 1 - i1); // remove the Nc-1-i1 terms
}
i2 = i1 + 1 + spare;
}
static void testGenerators(GroupName::Sp) {
Matrix ta;
Matrix tb;
std::cout << GridLogMessage
<< "Fundamental - Checking trace ta tb is 0.5 delta_ab "
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
for (int b = 0; b < AlgebraDimension; b++) {
generator(a, ta);
generator(b, tb);
Complex tr = TensorRemove(trace(ta * tb));
std::cout << GridLogMessage << "(" << a << "," << b << ") = " << tr
<< std::endl;
if (a == b) assert(abs(tr - Complex(0.5)) < 1.0e-6);
if (a != b) assert(abs(tr) < 1.0e-6);
}
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if hermitian"
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
std::cout << GridLogMessage << a << std::endl;
assert(norm2(ta - adj(ta)) < 1.0e-6);
}
std::cout << GridLogMessage << std::endl;
std::cout << GridLogMessage << "Fundamental - Checking if traceless"
<< std::endl;
for (int a = 0; a < AlgebraDimension; a++) {
generator(a, ta);
Complex tr = TensorRemove(trace(ta));
std::cout << GridLogMessage << a << std::endl;
assert(abs(tr) < 1.0e-6);
}
}
template <int N>
static Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > >
ProjectOnGeneralGroup(const Lattice<iScalar<iScalar<iMatrix<vComplexD, N> > > > &Umu, GroupName::Sp) {
return ProjectOnSpGroup(Umu);
}
template <class vtype>
accelerator_inline static iScalar<vtype> ProjectOnGeneralGroup(const iScalar<vtype> &r, GroupName::Sp) {
return ProjectOnSpGroup(r);
}
template <class vtype, int N>
accelerator_inline static iVector<vtype,N> ProjectOnGeneralGroup(const iVector<vtype,N> &r, GroupName::Sp) {
return ProjectOnSpGroup(r);
}
template <class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline static iMatrix<vtype,N> ProjectOnGeneralGroup(const iMatrix<vtype,N> &arg, GroupName::Sp) {
return ProjectOnSpGroup(arg);
}
template <typename LatticeMatrixType>
static void taProj(const LatticeMatrixType &in, LatticeMatrixType &out, GroupName::Sp) {
out = SpTa(in);
}
public:
template <ONLY_IF_Sp>
static void Omega(LatticeColourMatrixD &in) {
const int nsp=ncolour/2;
LatticeColourMatrixD OmegaLatt(in.Grid());
LatticeColourMatrixD identity(in.Grid());
ColourMatrix Omega;
OmegaLatt = Zero();
Omega = Zero();
identity = 1.;
for (int i = 0; i < nsp; i++) {
Omega()()(i, nsp + i) = 1.;
Omega()()(nsp + i, i) = -1;
}
OmegaLatt = OmegaLatt + (identity * Omega);
in = OmegaLatt;
}
template <ONLY_IF_Sp, class vtype, int N>
static void Omega(iScalar<iScalar<iMatrix<vtype, N> > > &in) {
const int nsp=ncolour/2;
iScalar<iScalar<iMatrix<vtype, N> > > Omega;
Omega = Zero();
for (int i = 0; i < nsp; i++) {
Omega()()(i, nsp + i) = 1.;
Omega()()(nsp + i, i) = -1;
}
in = Omega;
}

View File

@ -8,9 +8,9 @@
#include <Grid/qcd/utils/ScalarObjs.h>
// Include representations
#include <Grid/qcd/utils/SUn.h>
#include <Grid/qcd/utils/GaugeGroup.h>
#include <Grid/qcd/utils/SUnAdjoint.h>
#include <Grid/qcd/utils/SUnTwoIndex.h>
#include <Grid/qcd/utils/GaugeGroupTwoIndex.h>
// All-to-all contraction kernels that touch the
// internal lattice structure

View File

@ -290,7 +290,7 @@ public:
}
*/
//////////////////////////////////////////////////
// the sum over all staples on each site
// the sum over all nu-oriented staples for nu != mu on each site
//////////////////////////////////////////////////
static void Staple(GaugeMat &staple, const GaugeLorentz &Umu, int mu) {
@ -300,6 +300,10 @@ public:
for (int d = 0; d < Nd; d++) {
U[d] = PeekIndex<LorentzIndex>(Umu, d);
}
Staple(staple, U, mu);
}
static void Staple(GaugeMat &staple, const std::vector<GaugeMat> &U, int mu) {
staple = Zero();
for (int nu = 0; nu < Nd; nu++) {
@ -335,6 +339,202 @@ public:
}
}
/////////////
//Staples for each direction mu, summed over nu != mu
//staple: output staples for each mu (Nd)
//U: link array (Nd)
/////////////
static void StapleAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U) {
assert(staple.size() == Nd); assert(U.size() == Nd);
for(int mu=0;mu<Nd;mu++) Staple(staple[mu], U, mu);
}
//A workspace class allowing reuse of the stencil
class WilsonLoopPaddedStencilWorkspace{
std::unique_ptr<GeneralLocalStencil> stencil;
size_t nshift;
void generateStencil(GridBase* padded_grid){
double t0 = usecond();
//Generate shift arrays
std::vector<Coordinate> shifts = this->getShifts();
nshift = shifts.size();
double t1 = usecond();
//Generate local stencil
stencil.reset(new GeneralLocalStencil(padded_grid,shifts));
double t2 = usecond();
std::cout << GridLogPerformance << " WilsonLoopPaddedWorkspace timings: coord:" << (t1-t0)/1000 << "ms, stencil:" << (t2-t1)/1000 << "ms" << std::endl;
}
public:
//Get the stencil. If not already generated, or if generated using a different Grid than in PaddedCell, it will be created on-the-fly
const GeneralLocalStencil & getStencil(const PaddedCell &pcell){
assert(pcell.depth >= this->paddingDepth());
if(!stencil || stencil->Grid() != (GridBase*)pcell.grids.back() ) generateStencil((GridBase*)pcell.grids.back());
return *stencil;
}
size_t Nshift() const{ return nshift; }
virtual std::vector<Coordinate> getShifts() const = 0;
virtual int paddingDepth() const = 0; //padding depth required
virtual ~WilsonLoopPaddedStencilWorkspace(){}
};
//This workspace allows the sharing of a common PaddedCell object between multiple stencil workspaces
class WilsonLoopPaddedWorkspace{
std::vector<WilsonLoopPaddedStencilWorkspace*> stencil_wk;
std::unique_ptr<PaddedCell> pcell;
void generatePcell(GridBase* unpadded_grid){
assert(stencil_wk.size());
int max_depth = 0;
for(auto const &s : stencil_wk) max_depth=std::max(max_depth, s->paddingDepth());
pcell.reset(new PaddedCell(max_depth, dynamic_cast<GridCartesian*>(unpadded_grid)));
}
public:
//Add a stencil definition. This should be done before the first call to retrieve a stencil object.
//Takes ownership of the pointer
void addStencil(WilsonLoopPaddedStencilWorkspace *stencil){
assert(!pcell);
stencil_wk.push_back(stencil);
}
const GeneralLocalStencil & getStencil(const size_t stencil_idx, GridBase* unpadded_grid){
if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
return stencil_wk[stencil_idx]->getStencil(*pcell);
}
const PaddedCell & getPaddedCell(GridBase* unpadded_grid){
if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
return *pcell;
}
~WilsonLoopPaddedWorkspace(){
for(auto &s : stencil_wk) delete s;
}
};
//A workspace class allowing reuse of the stencil
class StaplePaddedAllWorkspace: public WilsonLoopPaddedStencilWorkspace{
public:
std::vector<Coordinate> getShifts() const override{
std::vector<Coordinate> shifts;
for(int mu=0;mu<Nd;mu++){
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
Coordinate shift_0(Nd,0);
Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
Coordinate shift_mnu(Nd,0); shift_mnu[nu]=-1;
Coordinate shift_mnu_pmu(Nd,0); shift_mnu_pmu[nu]=-1; shift_mnu_pmu[mu]=1;
//U_nu(x+mu)U^dag_mu(x+nu) U^dag_nu(x)
shifts.push_back(shift_0);
shifts.push_back(shift_nu);
shifts.push_back(shift_mu);
//U_nu^dag(x-nu+mu) U_mu^dag(x-nu) U_nu(x-nu)
shifts.push_back(shift_mnu);
shifts.push_back(shift_mnu);
shifts.push_back(shift_mnu_pmu);
}
}
}
return shifts;
}
int paddingDepth() const override{ return 1; }
};
//Padded cell implementation of the staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
StaplePaddedAllWorkspace wk;
StaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
}
//Padded cell implementation of the staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
//gStencil: the precomputed generalized local stencil for the staple
static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil) {
double t0 = usecond();
assert(U_padded.size() == Nd); assert(staple.size() == Nd);
assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
assert(Cell.depth >= 1);
GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
int shift_mu_off = gStencil._npoints/Nd;
//Open views to padded gauge links and keep open over mu loop
typedef LatticeView<typename GaugeMat::vector_object> GaugeViewType;
size_t vsize = Nd*sizeof(GaugeViewType);
GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
GaugeMat gStaple(ggrid);
int outer_off = 0;
for(int mu=0;mu<Nd;mu++){
{ //view scope
autoView( gStaple_v , gStaple, AcceleratorWrite);
auto gStencil_v = gStencil.View(AcceleratorRead);
accelerator_for(ss, ggrid->oSites(), ggrid->Nsimd(), {
decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
stencil_ss = Zero();
int off = outer_off;
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
GeneralStencilEntry const* e = gStencil_v.GetEntry(off++,ss);
auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
auto U2 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U2 * U1 * U0;
e = gStencil_v.GetEntry(off++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(off++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(off++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U2 * U1 * U0;
}
}
coalescedWrite(gStaple_v[ss],stencil_ss);
}
);
} //ensure views are all closed!
staple[mu] = Cell.Extract(gStaple);
outer_off += shift_mu_off;
}//mu loop
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
free(Ug_dirs_v_host);
acceleratorFreeDevice(Ug_dirs_v);
double t1=usecond();
std::cout << GridLogPerformance << "StaplePaddedAll timing:" << (t1-t0)/1000 << "ms" << std::endl;
}
//////////////////////////////////////////////////
// the sum over all staples on each site in direction mu,nu, upper part
//////////////////////////////////////////////////
@ -707,18 +907,14 @@ public:
// the sum over all staples on each site
//////////////////////////////////////////////////
static void RectStapleDouble(GaugeMat &U2, const GaugeMat &U, int mu) {
U2 = U * Cshift(U, mu, 1);
U2 = U * Gimpl::CshiftLink(U, mu, 1);
}
////////////////////////////////////////////////////////////////////////////
// Hop by two optimisation strategy does not work nicely with Gparity. (could
// do,
// but need to track two deep where cross boundary and apply a conjugation).
// Must differentiate this in Gimpl, and use Gimpl::isPeriodicGaugeField to do
// so .
// Hop by two optimisation strategy. Use RectStapleDouble to obtain 'U2'
////////////////////////////////////////////////////////////////////////////
static void RectStapleOptimised(GaugeMat &Stap, std::vector<GaugeMat> &U2,
std::vector<GaugeMat> &U, int mu) {
static void RectStapleOptimised(GaugeMat &Stap, const std::vector<GaugeMat> &U2,
const std::vector<GaugeMat> &U, int mu) {
Stap = Zero();
@ -732,9 +928,9 @@ public:
// Up staple ___ ___
// | |
tmp = Cshift(adj(U[nu]), nu, -1);
tmp = Gimpl::CshiftLink(adj(U[nu]), nu, -1);
tmp = adj(U2[mu]) * tmp;
tmp = Cshift(tmp, mu, -2);
tmp = Gimpl::CshiftLink(tmp, mu, -2);
Staple2x1 = Gimpl::CovShiftForward(U[nu], nu, tmp);
@ -742,14 +938,14 @@ public:
// |___ ___|
//
tmp = adj(U2[mu]) * U[nu];
Staple2x1 += Gimpl::CovShiftBackward(U[nu], nu, Cshift(tmp, mu, -2));
Staple2x1 += Gimpl::CovShiftBackward(U[nu], nu, Gimpl::CshiftLink(tmp, mu, -2));
// ___ ___
// | ___|
// |___ ___|
//
Stap += Cshift(Gimpl::CovShiftForward(U[mu], mu, Staple2x1), mu, 1);
Stap += Gimpl::CshiftLink(Gimpl::CovShiftForward(U[mu], mu, Staple2x1), mu, 1);
// ___ ___
// |___ |
@ -758,7 +954,7 @@ public:
// tmp= Staple2x1* Cshift(U[mu],mu,-2);
// Stap+= Cshift(tmp,mu,1) ;
Stap += Cshift(Staple2x1, mu, 1) * Cshift(U[mu], mu, -1);
Stap += Gimpl::CshiftLink(Staple2x1, mu, 1) * Gimpl::CshiftLink(U[mu], mu, -1);
;
// --
@ -766,10 +962,10 @@ public:
//
// | |
tmp = Cshift(adj(U2[nu]), nu, -2);
tmp = Gimpl::CshiftLink(adj(U2[nu]), nu, -2);
tmp = Gimpl::CovShiftBackward(U[mu], mu, tmp);
tmp = U2[nu] * Cshift(tmp, nu, 2);
Stap += Cshift(tmp, mu, 1);
tmp = U2[nu] * Gimpl::CshiftLink(tmp, nu, 2);
Stap += Gimpl::CshiftLink(tmp, mu, 1);
// | |
//
@ -778,25 +974,12 @@ public:
tmp = Gimpl::CovShiftBackward(U[mu], mu, U2[nu]);
tmp = adj(U2[nu]) * tmp;
tmp = Cshift(tmp, nu, -2);
Stap += Cshift(tmp, mu, 1);
tmp = Gimpl::CshiftLink(tmp, nu, -2);
Stap += Gimpl::CshiftLink(tmp, mu, 1);
}
}
}
static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu) {
RectStapleUnoptimised(Stap, Umu, mu);
}
static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap,
std::vector<GaugeMat> &U2, std::vector<GaugeMat> &U,
int mu) {
if (Gimpl::isPeriodicGaugeField()) {
RectStapleOptimised(Stap, U2, U, mu);
} else {
RectStapleUnoptimised(Stap, Umu, mu);
}
}
static void RectStapleUnoptimised(GaugeMat &Stap, const GaugeLorentz &Umu,
int mu) {
GridBase *grid = Umu.Grid();
@ -895,6 +1078,288 @@ public:
}
}
static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu) {
RectStapleUnoptimised(Stap, Umu, mu);
}
static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap,
std::vector<GaugeMat> &U2, std::vector<GaugeMat> &U,
int mu) {
RectStapleOptimised(Stap, U2, U, mu);
}
//////////////////////////////////////////////////////
//Compute the rectangular staples for all orientations
//Stap : Array of staples (Nd)
//U: Gauge links in each direction (Nd)
/////////////////////////////////////////////////////
static void RectStapleAll(std::vector<GaugeMat> &Stap, const std::vector<GaugeMat> &U){
assert(Stap.size() == Nd); assert(U.size() == Nd);
std::vector<GaugeMat> U2(Nd,U[0].Grid());
for(int mu=0;mu<Nd;mu++) RectStapleDouble(U2[mu], U[mu], mu);
for(int mu=0;mu<Nd;mu++) RectStapleOptimised(Stap[mu], U2, U, mu);
}
//A workspace class allowing reuse of the stencil
class RectStaplePaddedAllWorkspace: public WilsonLoopPaddedStencilWorkspace{
public:
std::vector<Coordinate> getShifts() const override{
std::vector<Coordinate> shifts;
for (int mu = 0; mu < Nd; mu++){
for (int nu = 0; nu < Nd; nu++) {
if (nu != mu) {
auto genShift = [&](int mushift,int nushift){
Coordinate out(Nd,0); out[mu]=mushift; out[nu]=nushift; return out;
};
//tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
shifts.push_back(genShift(0,0));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(+1,+1));
shifts.push_back(genShift(+2,0));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(+1,-1));
shifts.push_back(genShift(+2,-1));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,-1));
shifts.push_back(genShift(-1,-1));
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(+1,-1));
//tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,0));
shifts.push_back(genShift(-1,+1));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(+1,0));
//tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
shifts.push_back(genShift(0,0));
shifts.push_back(genShift(0,+1));
shifts.push_back(genShift(0,+2));
shifts.push_back(genShift(+1,+1));
shifts.push_back(genShift(+1,0));
//tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
shifts.push_back(genShift(0,-1));
shifts.push_back(genShift(0,-2));
shifts.push_back(genShift(0,-2));
shifts.push_back(genShift(+1,-2));
shifts.push_back(genShift(+1,-1));
}
}
}
return shifts;
}
int paddingDepth() const override{ return 2; }
};
//Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
RectStaplePaddedAllWorkspace wk;
RectStaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
}
//Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
//staple: output staple for each mu, summed over nu != mu (Nd)
//U_padded: the gauge link fields padded out using the PaddedCell class
//Cell: the padded cell class
//gStencil: the stencil
static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil) {
double t0 = usecond();
assert(U_padded.size() == Nd); assert(staple.size() == Nd);
assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
assert(Cell.depth >= 2);
GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
size_t nshift = gStencil._npoints;
int mu_off_delta = nshift / Nd;
//Open views to padded gauge links and keep open over mu loop
typedef LatticeView<typename GaugeMat::vector_object> GaugeViewType;
size_t vsize = Nd*sizeof(GaugeViewType);
GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
GaugeMat gStaple(ggrid); //temp staple object on padded grid
int offset = 0;
for(int mu=0; mu<Nd; mu++){
{ //view scope
autoView( gStaple_v , gStaple, AcceleratorWrite);
auto gStencil_v = gStencil.View(AcceleratorRead);
accelerator_for(ss, ggrid->oSites(), ggrid->Nsimd(), {
decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
stencil_ss = Zero();
int s=offset;
for(int nu=0;nu<Nd;nu++){
if(nu != mu){
//tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
GeneralStencilEntry const* e = gStencil_v.GetEntry(s++,ss);
auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
auto U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
auto U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
e = gStencil_v.GetEntry(s++,ss);
U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
//tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
e = gStencil_v.GetEntry(s++,ss);
U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
e = gStencil_v.GetEntry(s++,ss);
U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
e = gStencil_v.GetEntry(s++,ss);
U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
}
}
coalescedWrite(gStaple_v[ss],stencil_ss);
}
);
offset += mu_off_delta;
}//kernel/view scope
staple[mu] = Cell.Extract(gStaple);
}//mu loop
for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
free(Ug_dirs_v_host);
acceleratorFreeDevice(Ug_dirs_v);
double t1 = usecond();
std::cout << GridLogPerformance << "RectStaplePaddedAll timings:" << (t1-t0)/1000 << "ms" << std::endl;
}
//A workspace for reusing the PaddedCell and GeneralLocalStencil objects
class StapleAndRectStapleAllWorkspace: public WilsonLoopPaddedWorkspace{
public:
StapleAndRectStapleAllWorkspace(){
this->addStencil(new StaplePaddedAllWorkspace);
this->addStencil(new RectStaplePaddedAllWorkspace);
}
};
//////////////////////////////////////////////////////
//Compute the 1x1 and 1x2 staples for all orientations
//Stap : Array of staples (Nd)
//RectStap: Array of rectangular staples (Nd)
//U: Gauge links in each direction (Nd)
/////////////////////////////////////////////////////
static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U){
StapleAndRectStapleAllWorkspace wk;
StapleAndRectStapleAll(Stap,RectStap,U,wk);
}
//////////////////////////////////////////////////////
//Compute the 1x1 and 1x2 staples for all orientations
//Stap : Array of staples (Nd)
//RectStap: Array of rectangular staples (Nd)
//U: Gauge links in each direction (Nd)
//wk: a workspace containing stored PaddedCell and GeneralLocalStencil objects to maximize reuse
/////////////////////////////////////////////////////
static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U, StapleAndRectStapleAllWorkspace &wk){
#if 0
StapleAll(Stap, U);
RectStapleAll(RectStap, U);
#else
double t0 = usecond();
GridCartesian* unpadded_grid = dynamic_cast<GridCartesian*>(U[0].Grid());
const PaddedCell &Ghost = wk.getPaddedCell(unpadded_grid);
CshiftImplGauge<Gimpl> cshift_impl;
std::vector<GaugeMat> U_pad(Nd, Ghost.grids.back());
for(int mu=0;mu<Nd;mu++) U_pad[mu] = Ghost.Exchange(U[mu], cshift_impl);
double t1 = usecond();
StaplePaddedAll(Stap, U_pad, Ghost, wk.getStencil(0,unpadded_grid) );
double t2 = usecond();
RectStaplePaddedAll(RectStap, U_pad, Ghost, wk.getStencil(1,unpadded_grid));
double t3 = usecond();
std::cout << GridLogPerformance << "StapleAndRectStapleAll timings: pad:" << (t1-t0)/1000 << "ms, staple:" << (t2-t1)/1000 << "ms, rect-staple:" << (t3-t2)/1000 << "ms" << std::endl;
#endif
}
//////////////////////////////////////////////////
// Wilson loop of size (R1, R2), oriented in mu,nu plane
//////////////////////////////////////////////////

View File

@ -1130,6 +1130,14 @@ static_assert(sizeof(SIMD_Ftype) == sizeof(SIMD_Itype), "SIMD vector lengths inc
#endif
#endif
// Fixme need coalesced read gpermute
template<class vobj> void gpermute(vobj & inout,int perm){
vobj tmp=inout;
if (perm & 0x1 ) { permute(inout,tmp,0); tmp=inout;}
if (perm & 0x2 ) { permute(inout,tmp,1); tmp=inout;}
if (perm & 0x4 ) { permute(inout,tmp,2); tmp=inout;}
if (perm & 0x8 ) { permute(inout,tmp,3); tmp=inout;}
}
NAMESPACE_END(Grid);

View File

@ -32,7 +32,12 @@ NAMESPACE_BEGIN(Grid);
struct GeneralStencilEntry {
uint64_t _offset; // 4 bytes
uint8_t _permute; // 1 bytes // Horrible alignment properties
uint8_t _wrap; // 1 bytes // Horrible alignment properties
};
struct GeneralStencilEntryReordered : public GeneralStencilEntry {
uint64_t _input;
};
// Could pack to 8 + 4 + 4 = 128 bit and use
class GeneralLocalStencilView {
@ -46,7 +51,7 @@ class GeneralLocalStencilView {
accelerator_inline GeneralStencilEntry * GetEntry(int point,int osite) {
return & this->_entries_p[point+this->_npoints*osite];
}
void ViewClose(void){};
};
////////////////////////////////////////
// The Stencil Class itself
@ -61,7 +66,7 @@ protected:
public:
GridBase *Grid(void) const { return _grid; }
View_type View(void) const {
View_type View(int mode) const {
View_type accessor(*( (View_type *) this));
return accessor;
}
@ -79,60 +84,66 @@ public:
this->_entries.resize(npoints* osites);
this->_entries_p = &_entries[0];
thread_for(site, osites, {
Coordinate Coor;
Coordinate NbrCoor;
Coordinate Coor;
Coordinate NbrCoor;
for(Integer site=0;site<osites;site++){
for(Integer ii=0;ii<npoints;ii++){
Integer lex = site*npoints+ii;
GeneralStencilEntry SE;
////////////////////////////////////////////////
// Outer index of neighbour Offset calculation
////////////////////////////////////////////////
grid->oCoorFromOindex(Coor,site);
for(int d=0;d<Coor.size();d++){
int rd = grid->_rdimensions[d];
NbrCoor[d] = (Coor[d] + shifts[ii][d] + rd )%rd;
for(Integer ii=0;ii<npoints;ii++){
Integer lex = site*npoints+ii;
GeneralStencilEntry SE;
////////////////////////////////////////////////
// Outer index of neighbour Offset calculation
////////////////////////////////////////////////
grid->oCoorFromOindex(Coor,site);
for(int d=0;d<Coor.size();d++){
int rd = grid->_rdimensions[d];
NbrCoor[d] = (Coor[d] + shifts[ii][d] + rd )%rd;
}
SE._offset = grid->oIndexReduced(NbrCoor);
////////////////////////////////////////////////
// Inner index permute calculation
// Simpler version using icoor calculation
////////////////////////////////////////////////
SE._permute =0;
SE._wrap=0;
for(int d=0;d<Coor.size();d++){
int fd = grid->_fdimensions[d];
int rd = grid->_rdimensions[d];
int ld = grid->_ldimensions[d];
int ly = grid->_simd_layout[d];
assert((ly==1)||(ly==2)||(ly==grid->Nsimd()));
int shift = (shifts[ii][d]+fd)%fd; // make it strictly positive 0.. L-1
int x = Coor[d]; // x in [0... rd-1] as an oSite
if ( (x + shift)%fd != (x+shift)%ld ){
SE._wrap = 1;
}
int permute_dim = grid->PermuteDim(d);
int permute_slice=0;
if(permute_dim){
int num = shift%rd; // Slice within dest osite cell of slice zero
int wrap = shift/rd; // Number of osite local volume cells crossed through
// x+num < rd dictates whether we are in same permute state as slice 0
if ( x< rd-num ) permute_slice=wrap;
else permute_slice=(wrap+1)%ly;
}
if ( permute_slice ) {
int ptype =grid->PermuteType(d);
uint8_t mask =0x1<<ptype;
SE._permute |= mask;
}
}
////////////////////////////////////////////////
// Store in look up table
////////////////////////////////////////////////
this->_entries[lex] = SE;
}
SE._offset = grid->oIndexReduced(NbrCoor);
////////////////////////////////////////////////
// Inner index permute calculation
// Simpler version using icoor calculation
////////////////////////////////////////////////
SE._permute =0;
for(int d=0;d<Coor.size();d++){
int fd = grid->_fdimensions[d];
int rd = grid->_rdimensions[d];
int ly = grid->_simd_layout[d];
assert((ly==1)||(ly==2));
int shift = (shifts[ii][d]+fd)%fd; // make it strictly positive 0.. L-1
int x = Coor[d]; // x in [0... rd-1] as an oSite
int permute_dim = grid->PermuteDim(d);
int permute_slice=0;
if(permute_dim){
int num = shift%rd; // Slice within dest osite cell of slice zero
int wrap = shift/rd; // Number of osite local volume cells crossed through
// x+num < rd dictates whether we are in same permute state as slice 0
if ( x< rd-num ) permute_slice=wrap;
else permute_slice=(wrap+1)%ly;
}
if ( permute_slice ) {
int ptype =grid->PermuteType(d);
uint8_t mask =0x1<<ptype;
SE._permute |= mask;
}
}
////////////////////////////////////////////////
// Store in look up table
////////////////////////////////////////////////
this->_entries[lex] = SE;
}
}
});
}
};

View File

@ -32,6 +32,7 @@
#include <Grid/stencil/SimpleCompressor.h> // subdir aggregate
#include <Grid/stencil/Lebesgue.h> // subdir aggregate
#include <Grid/stencil/GeneralLocalStencil.h>
//////////////////////////////////////////////////////////////////////////////////////////
// Must not lose sight that goal is to be able to construct really efficient

View File

@ -73,6 +73,16 @@ vobj coalescedReadPermute(const vobj & __restrict__ vec,int ptype,int doperm,int
return vec;
}
}
//'perm_mask' acts as a bitmask
template<class vobj> accelerator_inline
vobj coalescedReadGeneralPermute(const vobj & __restrict__ vec,int perm_mask,int nd,int lane=0)
{
auto obj = vec, tmp = vec;
for (int d=0;d<nd;d++)
if (perm_mask & (0x1 << d)) { permute(obj,tmp,d); tmp=obj;}
return obj;
}
template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const vobj & __restrict__ extracted,int lane=0)
{
@ -83,7 +93,7 @@ void coalescedWriteNonTemporal(vobj & __restrict__ vec,const vobj & __restrict__
{
vstream(vec, extracted);
}
#else
#else //==GRID_SIMT
//#ifndef GRID_SYCL
@ -166,6 +176,14 @@ typename vobj::scalar_object coalescedReadPermute(const vobj & __restrict__ vec,
return extractLane(plane,vec);
}
template<class vobj> accelerator_inline
typename vobj::scalar_object coalescedReadGeneralPermute(const vobj & __restrict__ vec,int perm_mask,int nd,int lane=acceleratorSIMTlane(vobj::Nsimd()))
{
int plane = lane;
for (int d=0;d<nd;d++)
plane = (perm_mask & (0x1 << d)) ? plane ^ (vobj::Nsimd() >> (d + 1)) : plane;
return extractLane(plane,vec);
}
template<class vobj> accelerator_inline
void coalescedWrite(vobj & __restrict__ vec,const typename vobj::scalar_object & __restrict__ extracted,int lane=acceleratorSIMTlane(vobj::Nsimd()))
{
insertLane(lane,vec,extracted);

View File

@ -66,13 +66,61 @@ template<class vtype,int N> accelerator_inline iMatrix<vtype,N> Ta(const iMatrix
return ret;
}
template<class vtype> accelerator_inline iScalar<vtype> SpTa(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
ret._internal = SpTa(r._internal);
return ret;
}
template<class vtype,int N> accelerator_inline iVector<vtype,N> SpTa(const iVector<vtype,N>&r)
{
iVector<vtype,N> ret;
for(int i=0;i<N;i++){
ret._internal[i] = SpTa(r._internal[i]);
}
return ret;
}
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> SpTa(const iMatrix<vtype,N> &arg)
{
// Generalises Ta to Sp2n
// Applies the following projections
// P_{antihermitian} P_{antihermitian-Sp-algebra} P_{traceless}
// where the ordering matters
// P_{traceless} subtracts the trace
// P_{antihermitian-Sp-algebra} provides the block structure of the algebra based on U = exp(T) i.e. anti-hermitian generators
// P_{antihermitian} does in-adj(in) / 2
iMatrix<vtype,N> ret(arg);
double factor = (1.0/(double)N);
vtype nrm;
nrm = 0.5;
ret = arg - (trace(arg)*factor);
for(int c1=0;c1<N/2;c1++)
{
for(int c2=0;c2<N/2;c2++)
{
ret._internal[c1][c2] = nrm*(conjugate(ret._internal[c1+N/2][c2+N/2]) + ret._internal[c1][c2]); // new[up-left] = old[up-left]+old*[down-right]
ret._internal[c1][c2+N/2] = nrm*(ret._internal[c1][c2+N/2] - conjugate(ret._internal[c1+N/2][c2])); // new[up-right] = old[up-right]-old*[down-left]
}
for(int c2=N/2;c2<N;c2++)
{
ret._internal[c1+N/2][c2-N/2] = -conjugate(ret._internal[c1][c2]); // reconstructs lower blocks
ret._internal[c1+N/2][c2] = conjugate(ret._internal[c1][c2-N/2]); // from upper blocks
}
}
ret = (ret - adj(ret))*0.5;
return ret;
}
///////////////////////////////////////////////
// ProjectOnGroup function for scalar, vector, matrix
// Projects on orthogonal, unitary group
///////////////////////////////////////////////
template<class vtype> accelerator_inline iScalar<vtype> ProjectOnGroup(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
@ -90,10 +138,12 @@ template<class vtype,int N> accelerator_inline iVector<vtype,N> ProjectOnGroup(c
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
{
typedef typename iMatrix<vtype,N>::scalar_type scalar;
// need a check for the group type?
iMatrix<vtype,N> ret(arg);
vtype nrm;
vtype inner;
scalar one(1.0);
for(int c1=0;c1<N;c1++){
// Normalises row c1
@ -102,7 +152,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
@ -127,7 +177,7 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
nrm = sqrt(inner);
nrm = 1.0/nrm;
nrm = one/nrm;
for(int c2=0;c2<N;c2++)
ret._internal[c1][c2]*= nrm;
}
@ -135,6 +185,85 @@ accelerator_inline iMatrix<vtype,N> ProjectOnGroup(const iMatrix<vtype,N> &arg)
return ret;
}
// re-do for sp2n
// Ta cannot be defined here for Sp2n because I need the generators from the Sp class
// It is defined in gauge impl types
template<class vtype> accelerator_inline iScalar<vtype> ProjectOnSpGroup(const iScalar<vtype>&r)
{
iScalar<vtype> ret;
ret._internal = ProjectOnSpGroup(r._internal);
return ret;
}
template<class vtype,int N> accelerator_inline iVector<vtype,N> ProjectOnSpGroup(const iVector<vtype,N>&r)
{
iVector<vtype,N> ret;
for(int i=0;i<N;i++){
ret._internal[i] = ProjectOnSpGroup(r._internal[i]);
}
return ret;
}
// int N is 2n in Sp(2n)
template<class vtype,int N, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0 >::type * =nullptr>
accelerator_inline iMatrix<vtype,N> ProjectOnSpGroup(const iMatrix<vtype,N> &arg)
{
// need a check for the group type?
iMatrix<vtype,N> ret(arg);
vtype nrm;
vtype inner;
for(int c1=0;c1<N/2;c1++)
{
for (int b=0; b<c1; b++) // remove the b-rows from U_c1
{
decltype(ret._internal[b][b]*ret._internal[b][b]) pr;
decltype(ret._internal[b][b]*ret._internal[b][b]) prn;
zeroit(pr);
zeroit(prn);
for(int c=0; c<N; c++)
{
pr += conjugate(ret._internal[c1][c])*ret._internal[b][c]; // <U_c1 | U_b >
prn += conjugate(ret._internal[c1][c])*ret._internal[b+N/2][c]; // <U_c1 | U_{b+N} >
}
for(int c=0; c<N; c++)
{
ret._internal[c1][c] -= (conjugate(pr) * ret._internal[b][c] + conjugate(prn) * ret._internal[b+N/2][c] ); // U_c1 -= ( <U_c1 | U_b > U_b + <U_c1 | U_{b+N} > U_{b+N} )
}
}
zeroit(inner);
for(int c2=0;c2<N;c2++)
{
inner += innerProduct(ret._internal[c1][c2],ret._internal[c1][c2]);
}
nrm = sqrt(inner);
nrm = 1.0/nrm;
for(int c2=0;c2<N;c2++)
{
ret._internal[c1][c2]*= nrm;
}
for(int c2=0;c2<N/2;c2++)
{
ret._internal[c1+N/2][c2+N/2] = conjugate(ret._internal[c1][c2]); // down right in the new matrix = (up-left)* of the old matrix
}
for(int c2=N/2;c2<N;c2++)
{
ret._internal[c1+N/2][c2-N/2] = -conjugate(ret._internal[c1][c2]);; // down left in the new matrix = -(up-right)* of the old
}
}
return ret;
}
NAMESPACE_END(Grid);
#endif

View File

@ -53,7 +53,6 @@ template<class vtype, int N> accelerator_inline iVector<vtype, N> Exponentiate(c
}
// Specialisation: Cayley-Hamilton exponential for SU(3)
#if 0
template<class vtype, typename std::enable_if< GridTypeMapper<vtype>::TensorLevel == 0>::type * =nullptr>

View File

@ -120,7 +120,7 @@ hipStream_t computeStream;
void acceleratorInit(void)
{
int nDevices = 1;
hipGetDeviceCount(&nDevices);
auto discard = hipGetDeviceCount(&nDevices);
gpu_props = new hipDeviceProp_t[nDevices];
char * localRankStr = NULL;
@ -147,7 +147,7 @@ void acceleratorInit(void)
#define GPU_PROP_FMT(canMapHostMemory,FMT) printf("AcceleratorHipInit: " #canMapHostMemory ": " FMT" \n",prop.canMapHostMemory);
#define GPU_PROP(canMapHostMemory) GPU_PROP_FMT(canMapHostMemory,"%d");
hipGetDeviceProperties(&gpu_props[i], i);
discard = hipGetDeviceProperties(&gpu_props[i], i);
hipDeviceProp_t prop;
prop = gpu_props[i];
totalDeviceMem = prop.totalGlobalMem;
@ -184,13 +184,13 @@ void acceleratorInit(void)
}
int device = rank;
#endif
hipSetDevice(device);
hipStreamCreate(&copyStream);
hipStreamCreate(&computeStream);
discard = hipSetDevice(device);
discard = hipStreamCreate(&copyStream);
discard = hipStreamCreate(&computeStream);
const int len=64;
char busid[len];
if( rank == world_rank ) {
hipDeviceGetPCIBusId(busid, len, device);
discard = hipDeviceGetPCIBusId(busid, len, device);
printf("local rank %d device %d bus id: %s\n", rank, device, busid);
}
if ( world_rank == 0 ) printf("AcceleratorHipInit: ================================================\n");

View File

@ -117,7 +117,7 @@ accelerator_inline int acceleratorSIMTlane(int Nsimd) {
#endif
} // CUDA specific
inline void cuda_mem(void)
inline void acceleratorMem(void)
{
size_t free_t,total_t,used_t;
cudaMemGetInfo(&free_t,&total_t);
@ -125,6 +125,11 @@ inline void cuda_mem(void)
std::cout << " MemoryManager : GPU used "<<used_t<<" free "<<free_t<< " total "<<total_t<<std::endl;
}
inline void cuda_mem(void)
{
acceleratorMem();
}
#define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \
{ \
int nt=acceleratorThreads(); \
@ -137,6 +142,18 @@ inline void cuda_mem(void)
dim3 cu_blocks ((num1+nt-1)/nt,num2,1); \
LambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda); \
}
#define prof_accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \
{ \
int nt=acceleratorThreads(); \
typedef uint64_t Iterator; \
auto lambda = [=] accelerator \
(Iterator iter1,Iterator iter2,Iterator lane) mutable { \
__VA_ARGS__; \
}; \
dim3 cu_threads(nsimd,acceleratorThreads(),1); \
dim3 cu_blocks ((num1+nt-1)/nt,num2,1); \
ProfileLambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda); \
}
#define accelerator_for6dNB(iter1, num1, \
iter2, num2, \
@ -157,6 +174,20 @@ inline void cuda_mem(void)
Lambda6Apply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,num3,num4,num5,num6,lambda); \
}
#define accelerator_for2dNB( iter1, num1, iter2, num2, nsimd, ... ) \
{ \
int nt=acceleratorThreads(); \
typedef uint64_t Iterator; \
auto lambda = [=] accelerator \
(Iterator iter1,Iterator iter2,Iterator lane) mutable { \
__VA_ARGS__; \
}; \
dim3 cu_threads(nsimd,acceleratorThreads(),1); \
dim3 cu_blocks ((num1+nt-1)/nt,num2,1); \
LambdaApply<<<cu_blocks,cu_threads,0,computeStream>>>(num1,num2,nsimd,lambda); \
}
template<typename lambda> __global__
void LambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda)
{
@ -168,6 +199,17 @@ void LambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda)
Lambda(x,y,z);
}
}
template<typename lambda> __global__
void ProfileLambdaApply(uint64_t num1, uint64_t num2, uint64_t num3, lambda Lambda)
{
// Weird permute is to make lane coalesce for large blocks
uint64_t x = threadIdx.y + blockDim.y*blockIdx.x;
uint64_t y = threadIdx.z + blockDim.z*blockIdx.y;
uint64_t z = threadIdx.x;
if ( (x < num1) && (y<num2) && (z<num3) ) {
Lambda(x,y,z);
}
}
template<typename lambda> __global__
void Lambda6Apply(uint64_t num1, uint64_t num2, uint64_t num3,
@ -208,6 +250,7 @@ inline void *acceleratorAllocShared(size_t bytes)
if( err != cudaSuccess ) {
ptr = (void *) NULL;
printf(" cudaMallocManaged failed for %d %s \n",bytes,cudaGetErrorString(err));
assert(0);
}
return ptr;
};
@ -232,6 +275,7 @@ inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes
}
inline void acceleratorCopySynchronise(void) { cudaStreamSynchronize(copyStream); };
inline int acceleratorIsCommunicable(void *ptr)
{
// int uvm=0;
@ -267,6 +311,11 @@ NAMESPACE_END(Grid);
NAMESPACE_BEGIN(Grid);
inline void acceleratorMem(void)
{
std::cout <<" SYCL acceleratorMem not implemented"<<std::endl;
}
extern cl::sycl::queue *theGridAccelerator;
extern cl::sycl::queue *theCopyAccelerator;
@ -345,6 +394,15 @@ NAMESPACE_BEGIN(Grid);
#define accelerator __host__ __device__
#define accelerator_inline __host__ __device__ inline
inline void acceleratorMem(void)
{
size_t free_t,total_t,used_t;
auto discard = hipMemGetInfo(&free_t,&total_t);
used_t=total_t-free_t;
std::cout << " MemoryManager : GPU used "<<used_t<<" free "<<free_t<< " total "<<total_t<<std::endl;
}
extern hipStream_t copyStream;
extern hipStream_t computeStream;
/*These routines define mapping from thread grid to loop & vector lane indexing */
@ -405,7 +463,7 @@ void LambdaApply(uint64_t numx, uint64_t numy, uint64_t numz, lambda Lambda)
#define accelerator_barrier(dummy) \
{ \
hipStreamSynchronize(computeStream); \
auto tmp=hipStreamSynchronize(computeStream); \
auto err = hipGetLastError(); \
if ( err != hipSuccess ) { \
printf("After hipDeviceSynchronize() : HIP error %s \n", hipGetErrorString( err )); \
@ -421,7 +479,7 @@ inline void *acceleratorAllocShared(size_t bytes)
auto err = hipMallocManaged((void **)&ptr,bytes);
if( err != hipSuccess ) {
ptr = (void *) NULL;
printf(" hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err));
fprintf(stderr," hipMallocManaged failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr);
}
return ptr;
};
@ -433,24 +491,24 @@ inline void *acceleratorAllocDevice(size_t bytes)
auto err = hipMalloc((void **)&ptr,bytes);
if( err != hipSuccess ) {
ptr = (void *) NULL;
printf(" hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err));
fprintf(stderr," hipMalloc failed for %ld %s \n",bytes,hipGetErrorString(err)); fflush(stderr);
}
return ptr;
};
inline void acceleratorFreeShared(void *ptr){ hipFree(ptr);};
inline void acceleratorFreeDevice(void *ptr){ hipFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
inline void acceleratorFreeShared(void *ptr){ auto discard=hipFree(ptr);};
inline void acceleratorFreeDevice(void *ptr){ auto discard=hipFree(ptr);};
inline void acceleratorCopyToDevice(void *from,void *to,size_t bytes) { auto discard=hipMemcpy(to,from,bytes, hipMemcpyHostToDevice);}
inline void acceleratorCopyFromDevice(void *from,void *to,size_t bytes){ auto discard=hipMemcpy(to,from,bytes, hipMemcpyDeviceToHost);}
//inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) { hipMemcpy(to,from,bytes, hipMemcpyDeviceToDevice);}
//inline void acceleratorCopySynchronise(void) { }
inline void acceleratorMemSet(void *base,int value,size_t bytes) { hipMemset(base,value,bytes);}
inline void acceleratorMemSet(void *base,int value,size_t bytes) { auto discard=hipMemset(base,value,bytes);}
inline void acceleratorCopyDeviceToDeviceAsynch(void *from,void *to,size_t bytes) // Asynch
{
hipMemcpyDtoDAsync(to,from,bytes, copyStream);
auto discard=hipMemcpyDtoDAsync(to,from,bytes, copyStream);
}
inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream); };
inline void acceleratorCopySynchronise(void) { auto discard=hipStreamSynchronize(copyStream); };
#endif
@ -460,6 +518,9 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
#if defined(GRID_SYCL) || defined(GRID_CUDA) || defined(GRID_HIP)
// FIXME -- the non-blocking nature got broken March 30 2023 by PAB
#define accelerator_forNB( iter1, num1, nsimd, ... ) accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );
#define prof_accelerator_for( iter1, num1, nsimd, ... ) \
prof_accelerator_for2dNB( iter1, num1, iter2, 1, nsimd, {__VA_ARGS__} );\
accelerator_barrier(dummy);
#define accelerator_for( iter, num, nsimd, ... ) \
accelerator_forNB(iter, num, nsimd, { __VA_ARGS__ } ); \
@ -473,6 +534,12 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
#endif
inline void acceleratorCopyDeviceToDevice(void *from,void *to,size_t bytes)
{
acceleratorCopyDeviceToDeviceAsynch(from,to,bytes);
acceleratorCopySynchronise();
}
//////////////////////////////////////////////
// CPU Target - No accelerator just thread instead
//////////////////////////////////////////////
@ -482,6 +549,15 @@ inline void acceleratorCopySynchronise(void) { hipStreamSynchronize(copyStream);
#undef GRID_SIMT
inline void acceleratorMem(void)
{
/*
struct rusage rusage;
getrusage( RUSAGE_SELF, &rusage );
return (size_t)rusage.ru_maxrss;
*/
std::cout <<" system acceleratorMem not implemented"<<std::endl;
}
#define accelerator
#define accelerator_inline strong_inline
@ -575,4 +651,17 @@ accelerator_inline void acceleratorFence(void)
return;
}
template<class T> void acceleratorPut(T& dev,T&host)
{
acceleratorCopyToDevice(&host,&dev,sizeof(T));
}
template<class T> T acceleratorGet(T& dev)
{
T host;
acceleratorCopyFromDevice(&dev,&host,sizeof(T));
return host;
}
NAMESPACE_END(Grid);

Some files were not shown because too many files have changed in this diff Show More